id
stringlengths
1
8
text
stringlengths
6
1.05M
dataset_id
stringclasses
1 value
/World%20of%20Warships%20replays%20parser-3.3.3.tar.gz/World of Warships replays parser-3.3.3/replay_unpack/clients/wot/player.py
import logging import struct from replay_unpack.core import ( Entity ) from replay_unpack.core.network.player import ControlledPlayerBase from .helper import get_definitions, get_controller from .network.packets import ( Map, BasePlayerCreate, CellPlayerCreate, EntityCreate, Position, EntityMethod, EntityProperty, NestedProperty, EntityEnter, EntityLeave, PACKETS_MAPPING ) class ReplayPlayer(ControlledPlayerBase): def _get_definitions(self, version): return get_definitions(version) def _get_controller(self, version): return get_controller(version) def _get_packets_mapping(self): return PACKETS_MAPPING def _process_packet(self, packet): if isinstance(packet, Map): logging.debug('Welcome to map %s: %s', packet.name, packet.arenaId) self._battle_controller.map = packet.name elif isinstance(packet, BasePlayerCreate): # I'm not sure what is the order of cell/base/client player creation if packet.entityId in self._battle_controller.entities: base_player = self._battle_controller.entities[packet.entityId] else: base_player = Entity(id_=packet.entityId, spec=self._definitions.get_entity_def_by_name('Avatar')) # base is internal, so props are stored in order of xml file # io = BytesIO(packet.value.value) # for index, prop in enumerate(base_player.base_properties): # base_player.set_base_property(index, io) self._battle_controller.create_entity(base_player) self._battle_controller.on_player_enter_world(packet.entityId) elif isinstance(packet, CellPlayerCreate): # I'm not sure what is the order of cell/base/client player creation if packet.entityId in self._battle_controller.entities: cell_player = self._battle_controller.entities[packet.entityId] else: cell_player = Entity(id_=packet.entityId, spec=self._definitions.get_entity_def_by_name('Avatar')) # cell is internal, so props are stored in order of xml file io = packet.value.io() for index, prop in enumerate(cell_player.client_properties_internal): cell_player.set_client_property_internal(index, io) # TODO: why this assert fails? # assert io.read() == b'' self._battle_controller.create_entity(cell_player) elif isinstance(packet, EntityEnter): self._battle_controller.entities[packet.entityId].is_in_aoi = True elif isinstance(packet, EntityLeave): self._battle_controller.entities[packet.entityId].is_in_aoi = False elif isinstance(packet, EntityCreate): entity = Entity( id_=packet.entityID, spec=self._definitions.get_entity_def_by_index(packet.type)) values = packet.state.io() values_count, = struct.unpack('B', values.read(1)) for i in range(values_count): k = values.read(1) idx, = struct.unpack('B', k) entity.set_client_property(idx, values) assert values.read() == b'' self._battle_controller.create_entity(entity) elif isinstance(packet, Position): self._battle_controller.entities[packet.entityId].position = packet.position self._battle_controller.entities[packet.entityId].yaw = packet.yaw self._battle_controller.entities[packet.entityId].pitch = packet.pitch self._battle_controller.entities[packet.entityId].roll = packet.roll elif isinstance(packet, EntityMethod): entity = self._battle_controller.entities[packet.entityId] entity.call_client_method(packet.messageId, packet.data.io()) elif isinstance(packet, EntityProperty): entity = self._battle_controller.entities[packet.objectID] entity.set_client_property(packet.messageId, packet.data.io()) elif isinstance(packet, NestedProperty): e = self._battle_controller.entities[packet.entity_id] logging.debug('') logging.debug('nested property request for id=%s isSlice=%s packet=%s', e.id, packet.is_slice, packet.payload.hex()) packet.read_and_apply(e)
PypiClean
/trixie-0.1.2.tar.gz/trixie-0.1.2/homeassistant/components/sensor/uptime.py
import logging import voluptuous as vol from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import CONF_NAME, CONF_UNIT_OF_MEASUREMENT import homeassistant.helpers.config_validation as cv from homeassistant.helpers.entity import Entity import homeassistant.util.dt as dt_util _LOGGER = logging.getLogger(__name__) DEFAULT_NAME = 'Uptime' ICON = 'mdi:clock' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_UNIT_OF_MEASUREMENT, default='days'): vol.All(cv.string, vol.In(['minutes', 'hours', 'days'])) }) async def async_setup_platform( hass, config, async_add_devices, discovery_info=None): """Set up the uptime sensor platform.""" name = config.get(CONF_NAME) units = config.get(CONF_UNIT_OF_MEASUREMENT) async_add_devices([UptimeSensor(name, units)], True) class UptimeSensor(Entity): """Representation of an uptime sensor.""" def __init__(self, name, unit): """Initialize the uptime sensor.""" self._name = name self._unit = unit self.initial = dt_util.now() self._state = None @property def name(self): """Return the name of the sensor.""" return self._name @property def icon(self): """Icon to display in the front end.""" return ICON @property def unit_of_measurement(self): """Return the unit of measurement the value is expressed in.""" return self._unit @property def state(self): """Return the state of the sensor.""" return self._state async def async_update(self): """Update the state of the sensor.""" delta = dt_util.now() - self.initial div_factor = 3600 if self.unit_of_measurement == 'days': div_factor *= 24 elif self.unit_of_measurement == 'minutes': div_factor /= 60 delta = delta.total_seconds() / div_factor self._state = round(delta, 2) _LOGGER.debug("New value: %s", delta)
PypiClean
/lib_shopware6_api_base-2.1.3.tar.gz/lib_shopware6_api_base-2.1.3/.docs/installation_via_makefile.rst
- via makefile: makefiles are a very convenient way to install. Here we can do much more, like installing virtual environments, clean caches and so on. .. code-block:: shell # from Your shell's homedirectory: $ git clone https://github.com/bitranox/lib_shopware6_api_base.git $ cd lib_shopware6_api_base # to run the tests: $ make test # to install the package $ make install # to clean the package $ make clean # uninstall the package $ make uninstall
PypiClean
/enteletaor-1.1.1.tar.gz/enteletaor-1.1.1/enteletaor_lib/modules/tasks/utils.py
import six from kombu.simple import Empty from six.moves.cPickle import loads from kombu.exceptions import SerializationError # ---------------------------------------------------------------------- def get_param_type(value): """ Try to identify the parameter type by their value :return: string with type. Valid values: str, int, float, dict, list, bytes, object :rtype: str """ try: # Distinguish between int and float if int(value) == value: return "int" else: return "float" except ValueError: # If raises type must be string or complex data if type(value) == dict: return "dict" elif type(value) == list: return "list" elif type(value) == bytes: try: six.u(value) return "bytes" except Exception: return "str" elif type(value) in (str, unicode if six.PY2 else ""): return "str" else: return "object" # ---------------------------------------------------------------------- # Import/export process information # ---------------------------------------------------------------------- def export_process(process_info, config): """ Export process info to json file :return: return a dict JSON compatible :rtype: dict """ export_data = [] for p, v in six.iteritems(process_info): # Function name restriction? if config.function_name is not None and config.function_name != p: continue # Extract function params params = [] for i, l_p in enumerate(v): l_params = { 'param_position': i, 'param_type': get_param_type(l_p), 'param_value': None } params.append(l_params) # Add to function information l_process = { 'function': p, 'parameters': params } # Add to all data export_data.append(l_process) return export_data # ---------------------------------------------------------------------- def get_remote_messages(config, queue, fill=True, block=False): """ Get all messages from queue without removing from it :return: yield raw deserialized messages :rtype: json """ to_inject = [] try: while 1: message = queue.get(block=False, timeout=1) # -------------------------------------------------------------------------- # Try to deserialize # -------------------------------------------------------------------------- # Is Pickle info? try: deserialized = loads(message.body) except SerializationError: pass yield deserialized to_inject.append(deserialized) except Empty: # When Queue is Empty -> reinject all removed messages if fill is True: for x in to_inject: queue.put(x, serializer="pickle") # ---------------------------------------------------------------------- def list_remote_process(config, queue): """ Get all messages from queue without removing from it :return: yield two values: remote_process name, remote args :rtype: str, set """ already_processed = set() for deserialized in get_remote_messages(config, queue): msg_id = deserialized['id'] # Read info if msg_id not in already_processed: remote_process = deserialized['task'] remote_args = deserialized['args'] # Store as processed already_processed.add(msg_id) yield remote_process, remote_args, msg_id
PypiClean
/paddle2coreml-6.2-cp38-none-macosx_11_0_arm64.whl/coremltools/proto/VisionFeaturePrint_pb2.py
import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pb2 from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='VisionFeaturePrint.proto', package='CoreML.Specification.CoreMLModels', syntax='proto3', serialized_pb=_b('\n\x18VisionFeaturePrint.proto\x12!CoreML.Specification.CoreMLModels\"\xe0\x04\n\x12VisionFeaturePrint\x12L\n\x05scene\x18\x14 \x01(\x0b\x32;.CoreML.Specification.CoreMLModels.VisionFeaturePrint.SceneH\x00\x12P\n\x07objects\x18\x15 \x01(\x0b\x32=.CoreML.Specification.CoreMLModels.VisionFeaturePrint.ObjectsH\x00\x1a\xb7\x01\n\x05Scene\x12Y\n\x07version\x18\x01 \x01(\x0e\x32H.CoreML.Specification.CoreMLModels.VisionFeaturePrint.Scene.SceneVersion\"S\n\x0cSceneVersion\x12\x19\n\x15SCENE_VERSION_INVALID\x10\x00\x12\x13\n\x0fSCENE_VERSION_1\x10\x01\x12\x13\n\x0fSCENE_VERSION_2\x10\x02\x1a\xd5\x01\n\x07Objects\x12]\n\x07version\x18\x01 \x01(\x0e\x32L.CoreML.Specification.CoreMLModels.VisionFeaturePrint.Objects.ObjectsVersion\x12\x0e\n\x06output\x18\x64 \x03(\t\"[\n\x0eObjectsVersion\x12\x1b\n\x17OBJECTS_VERSION_INVALID\x10\x00\x12\x15\n\x11OBJECTS_VERSION_1\x10\x01\x12\x15\n\x11OBJECTS_VERSION_2\x10\x02\x42\x18\n\x16VisionFeaturePrintTypeB\x02H\x03\x62\x06proto3') ) _VISIONFEATUREPRINT_SCENE_SCENEVERSION = _descriptor.EnumDescriptor( name='SceneVersion', full_name='CoreML.Specification.CoreMLModels.VisionFeaturePrint.Scene.SceneVersion', filename=None, file=DESCRIPTOR, values=[ _descriptor.EnumValueDescriptor( name='SCENE_VERSION_INVALID', index=0, number=0, options=None, type=None), _descriptor.EnumValueDescriptor( name='SCENE_VERSION_1', index=1, number=1, options=None, type=None), _descriptor.EnumValueDescriptor( name='SCENE_VERSION_2', index=2, number=2, options=None, type=None), ], containing_type=None, options=None, serialized_start=347, serialized_end=430, ) _sym_db.RegisterEnumDescriptor(_VISIONFEATUREPRINT_SCENE_SCENEVERSION) _VISIONFEATUREPRINT_OBJECTS_OBJECTSVERSION = _descriptor.EnumDescriptor( name='ObjectsVersion', full_name='CoreML.Specification.CoreMLModels.VisionFeaturePrint.Objects.ObjectsVersion', filename=None, file=DESCRIPTOR, values=[ _descriptor.EnumValueDescriptor( name='OBJECTS_VERSION_INVALID', index=0, number=0, options=None, type=None), _descriptor.EnumValueDescriptor( name='OBJECTS_VERSION_1', index=1, number=1, options=None, type=None), _descriptor.EnumValueDescriptor( name='OBJECTS_VERSION_2', index=2, number=2, options=None, type=None), ], containing_type=None, options=None, serialized_start=555, serialized_end=646, ) _sym_db.RegisterEnumDescriptor(_VISIONFEATUREPRINT_OBJECTS_OBJECTSVERSION) _VISIONFEATUREPRINT_SCENE = _descriptor.Descriptor( name='Scene', full_name='CoreML.Specification.CoreMLModels.VisionFeaturePrint.Scene', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='version', full_name='CoreML.Specification.CoreMLModels.VisionFeaturePrint.Scene.version', index=0, number=1, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ _VISIONFEATUREPRINT_SCENE_SCENEVERSION, ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=247, serialized_end=430, ) _VISIONFEATUREPRINT_OBJECTS = _descriptor.Descriptor( name='Objects', full_name='CoreML.Specification.CoreMLModels.VisionFeaturePrint.Objects', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='version', full_name='CoreML.Specification.CoreMLModels.VisionFeaturePrint.Objects.version', index=0, number=1, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='output', full_name='CoreML.Specification.CoreMLModels.VisionFeaturePrint.Objects.output', index=1, number=100, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ _VISIONFEATUREPRINT_OBJECTS_OBJECTSVERSION, ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=433, serialized_end=646, ) _VISIONFEATUREPRINT = _descriptor.Descriptor( name='VisionFeaturePrint', full_name='CoreML.Specification.CoreMLModels.VisionFeaturePrint', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='scene', full_name='CoreML.Specification.CoreMLModels.VisionFeaturePrint.scene', index=0, number=20, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='objects', full_name='CoreML.Specification.CoreMLModels.VisionFeaturePrint.objects', index=1, number=21, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[_VISIONFEATUREPRINT_SCENE, _VISIONFEATUREPRINT_OBJECTS, ], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ _descriptor.OneofDescriptor( name='VisionFeaturePrintType', full_name='CoreML.Specification.CoreMLModels.VisionFeaturePrint.VisionFeaturePrintType', index=0, containing_type=None, fields=[]), ], serialized_start=64, serialized_end=672, ) _VISIONFEATUREPRINT_SCENE.fields_by_name['version'].enum_type = _VISIONFEATUREPRINT_SCENE_SCENEVERSION _VISIONFEATUREPRINT_SCENE.containing_type = _VISIONFEATUREPRINT _VISIONFEATUREPRINT_SCENE_SCENEVERSION.containing_type = _VISIONFEATUREPRINT_SCENE _VISIONFEATUREPRINT_OBJECTS.fields_by_name['version'].enum_type = _VISIONFEATUREPRINT_OBJECTS_OBJECTSVERSION _VISIONFEATUREPRINT_OBJECTS.containing_type = _VISIONFEATUREPRINT _VISIONFEATUREPRINT_OBJECTS_OBJECTSVERSION.containing_type = _VISIONFEATUREPRINT_OBJECTS _VISIONFEATUREPRINT.fields_by_name['scene'].message_type = _VISIONFEATUREPRINT_SCENE _VISIONFEATUREPRINT.fields_by_name['objects'].message_type = _VISIONFEATUREPRINT_OBJECTS _VISIONFEATUREPRINT.oneofs_by_name['VisionFeaturePrintType'].fields.append( _VISIONFEATUREPRINT.fields_by_name['scene']) _VISIONFEATUREPRINT.fields_by_name['scene'].containing_oneof = _VISIONFEATUREPRINT.oneofs_by_name['VisionFeaturePrintType'] _VISIONFEATUREPRINT.oneofs_by_name['VisionFeaturePrintType'].fields.append( _VISIONFEATUREPRINT.fields_by_name['objects']) _VISIONFEATUREPRINT.fields_by_name['objects'].containing_oneof = _VISIONFEATUREPRINT.oneofs_by_name['VisionFeaturePrintType'] DESCRIPTOR.message_types_by_name['VisionFeaturePrint'] = _VISIONFEATUREPRINT _sym_db.RegisterFileDescriptor(DESCRIPTOR) VisionFeaturePrint = _reflection.GeneratedProtocolMessageType('VisionFeaturePrint', (_message.Message,), dict( Scene = _reflection.GeneratedProtocolMessageType('Scene', (_message.Message,), dict( DESCRIPTOR = _VISIONFEATUREPRINT_SCENE, __module__ = 'VisionFeaturePrint_pb2' # @@protoc_insertion_point(class_scope:CoreML.Specification.CoreMLModels.VisionFeaturePrint.Scene) )) , Objects = _reflection.GeneratedProtocolMessageType('Objects', (_message.Message,), dict( DESCRIPTOR = _VISIONFEATUREPRINT_OBJECTS, __module__ = 'VisionFeaturePrint_pb2' # @@protoc_insertion_point(class_scope:CoreML.Specification.CoreMLModels.VisionFeaturePrint.Objects) )) , DESCRIPTOR = _VISIONFEATUREPRINT, __module__ = 'VisionFeaturePrint_pb2' # @@protoc_insertion_point(class_scope:CoreML.Specification.CoreMLModels.VisionFeaturePrint) )) _sym_db.RegisterMessage(VisionFeaturePrint) _sym_db.RegisterMessage(VisionFeaturePrint.Scene) _sym_db.RegisterMessage(VisionFeaturePrint.Objects) DESCRIPTOR.has_options = True DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) # @@protoc_insertion_point(module_scope)
PypiClean
/torch_points3d-1.3.0-py3-none-any.whl/torch_points3d/models/segmentation/kpconv.py
from typing import Any import logging from omegaconf.dictconfig import DictConfig from omegaconf.listconfig import ListConfig from torch.nn import Sequential, Dropout, Linear import torch.nn.functional as F from torch import nn from .base import Segmentation_MP from torch_points3d.core.common_modules import FastBatchNorm1d from torch_points3d.modules.KPConv import * from torch_points3d.core.base_conv.partial_dense import * from torch_points3d.core.common_modules import MultiHeadClassifier from torch_points3d.models.base_model import BaseModel from torch_points3d.models.base_architectures.unet import UnwrappedUnetBasedModel from torch_points3d.datasets.multiscale_data import MultiScaleBatch from torch_points3d.datasets.segmentation import IGNORE_LABEL log = logging.getLogger(__name__) class KPConvPaper(UnwrappedUnetBasedModel): def __init__(self, option, model_type, dataset, modules): # Extract parameters from the dataset self._num_classes = dataset.num_classes self._weight_classes = dataset.weight_classes self._use_category = getattr(option, "use_category", False) if self._use_category: if not dataset.class_to_segments: raise ValueError( "The dataset needs to specify a class_to_segments property when using category information for segmentation" ) self._class_to_seg = dataset.class_to_segments self._num_categories = len(self._class_to_seg) log.info("Using category information for the predictions with %i categories", self._num_categories) else: self._num_categories = 0 # Assemble encoder / decoder UnwrappedUnetBasedModel.__init__(self, option, model_type, dataset, modules) # Build final MLP last_mlp_opt = option.mlp_cls if self._use_category: self.FC_layer = MultiHeadClassifier( last_mlp_opt.nn[0], self._class_to_seg, dropout_proba=last_mlp_opt.dropout, bn_momentum=last_mlp_opt.bn_momentum, ) else: in_feat = last_mlp_opt.nn[0] + self._num_categories self.FC_layer = Sequential() for i in range(1, len(last_mlp_opt.nn)): self.FC_layer.add_module( str(i), Sequential( *[ Linear(in_feat, last_mlp_opt.nn[i], bias=False), FastBatchNorm1d(last_mlp_opt.nn[i], momentum=last_mlp_opt.bn_momentum), LeakyReLU(0.2), ] ), ) in_feat = last_mlp_opt.nn[i] if last_mlp_opt.dropout: self.FC_layer.add_module("Dropout", Dropout(p=last_mlp_opt.dropout)) self.FC_layer.add_module("Class", Lin(in_feat, self._num_classes, bias=False)) self.FC_layer.add_module("Softmax", nn.LogSoftmax(-1)) self.loss_names = ["loss_seg"] self.lambda_reg = self.get_from_opt(option, ["loss_weights", "lambda_reg"]) if self.lambda_reg: self.loss_names += ["loss_reg"] self.lambda_internal_losses = self.get_from_opt(option, ["loss_weights", "lambda_internal_losses"]) self.visual_names = ["data_visual"] def set_input(self, data, device): """Unpack input data from the dataloader and perform necessary pre-processing steps. Parameters: input: a dictionary that contains the data itself and its metadata information. """ data = data.to(device) data.x = add_ones(data.pos, data.x, True) if isinstance(data, MultiScaleBatch): self.pre_computed = data.multiscale self.upsample = data.upsample del data.upsample del data.multiscale else: self.upsample = None self.pre_computed = None self.input = data self.labels = data.y self.batch_idx = data.batch if self._use_category: self.category = data.category def forward(self, *args, **kwargs) -> Any: """Run forward pass. This will be called by both functions <optimize_parameters> and <test>.""" stack_down = [] data = self.input for i in range(len(self.down_modules) - 1): data = self.down_modules[i](data, precomputed=self.pre_computed) stack_down.append(data) data = self.down_modules[-1](data, precomputed=self.pre_computed) innermost = False if not isinstance(self.inner_modules[0], Identity): stack_down.append(data) data = self.inner_modules[0](data) innermost = True for i in range(len(self.up_modules)): if i == 0 and innermost: data = self.up_modules[i]((data, stack_down.pop())) else: data = self.up_modules[i]((data, stack_down.pop()), precomputed=self.upsample) last_feature = data.x if self._use_category: self.output = self.FC_layer(last_feature, self.category) else: self.output = self.FC_layer(last_feature) if self.labels is not None: self.compute_loss() self.data_visual = self.input self.data_visual.pred = torch.max(self.output, -1)[1] return self.output def compute_loss(self): if self._weight_classes is not None: self._weight_classes = self._weight_classes.to(self.output.device) self.loss = 0 # Get regularization on weights if self.lambda_reg: self.loss_reg = self.get_regularization_loss(regularizer_type="l2", lambda_reg=self.lambda_reg) self.loss += self.loss_reg # Collect internal losses and set them with self and them to self for later tracking if self.lambda_internal_losses: self.loss += self.collect_internal_losses(lambda_weight=self.lambda_internal_losses) # Final cross entrop loss self.loss_seg = F.nll_loss(self.output, self.labels, weight=self._weight_classes, ignore_index=IGNORE_LABEL) self.loss += self.loss_seg def backward(self): """Calculate losses, gradients, and update network weights; called in every training iteration""" # caculate the intermediate results if necessary; here self.output has been computed during function <forward> # calculate loss given the input and intermediate results self.loss.backward() # calculate gradients of network G w.r.t. loss_G
PypiClean
/dp-celery-5.0.5.tar.gz/dp-celery-5.0.5/docs/reference/index.rst
.. _apiref: =============== API Reference =============== :Release: |version| :Date: |today| .. toctree:: :maxdepth: 1 cli celery celery.app celery.app.task celery.app.amqp celery.app.defaults celery.app.control celery.app.registry celery.app.backends celery.app.builtins celery.app.events celery.app.log celery.app.utils celery.app.autoretry celery.bootsteps celery.result celery.schedules celery.signals celery.security celery.utils.debug celery.exceptions celery.loaders celery.loaders.app celery.loaders.default celery.loaders.base celery.states celery.contrib.abortable celery.contrib.migrate celery.contrib.pytest celery.contrib.sphinx celery.contrib.testing.worker celery.contrib.testing.app celery.contrib.testing.manager celery.contrib.testing.mocks celery.contrib.rdb celery.events celery.events.receiver celery.events.dispatcher celery.events.event celery.events.state celery.beat celery.apps.worker celery.apps.beat celery.apps.multi celery.worker celery.worker.request celery.worker.state celery.worker.strategy celery.worker.consumer celery.worker.consumer.agent celery.worker.consumer.connection celery.worker.consumer.consumer celery.worker.consumer.control celery.worker.consumer.events celery.worker.consumer.gossip celery.worker.consumer.heart celery.worker.consumer.mingle celery.worker.consumer.tasks celery.worker.worker celery.bin.base celery.bin.celery celery.bin.worker celery.bin.beat celery.bin.events celery.bin.logtool celery.bin.amqp celery.bin.graph celery.bin.multi celery.bin.call celery.bin.control celery.bin.list celery.bin.migrate celery.bin.purge celery.bin.result celery.bin.shell celery.bin.upgrade
PypiClean
/collective.hostout-1.0a3.tar.gz/collective.hostout-1.0a3/collective/hostout/hostout.py
import logging, os, shutil, tempfile, urllib2, urlparse import setuptools.archive_util import datetime import zc.buildout import fabric import tarfile import ConfigParser import sys from hashlib import md5 import os from zc.buildout import buildout from os.path import join, exists from itertools import chain import re from zc.buildout.buildout import Buildout from paramiko import DSSKey, PKey, RSAKey from paramiko import SSHConfig from fabric.main import load_fabfile from fabric import api from fabric.state import output import time, random, md5 from collective.hostout import relpath import pkg_resources from setuptools import package_index from urllib import pathname2url from pkg_resources import resource_string, resource_filename """ 1. ensure we are on trunk and up to date somehow. 1. Find any dependencies that need a new release and increment the version and create a distribution 1. create a hostout.cfg which is a repeatable buildout which is pinned for deployment by listing all of all the eggs. 2. version this + all dev dependencies with a tag so can recover this version. 4. bundle the cfg up + eggs (maybe just dev eggs) 5. send to host 6. setup host it need be 7. overwrite with bundle and build """ def clean(lines): if lines is None: return [] return [l.strip() for l in lines.split('\n') if l.strip() != ''] _isurl = re.compile('([a-zA-Z0-9+.-]+)://').match max_name_len = 18 def get_all_extends(cfgfile): if _isurl(cfgfile): return [] config = ConfigParser.ConfigParser() config.optionxform = str config.read([cfgfile]) files = [cfgfile] if not 'buildout' in config.sections(): return files if not 'extends' in config.options('buildout'): return files extends = chain(*[el.split() for el in clean(config.get('buildout', 'extends'))]) curdir = os.path.dirname(cfgfile) for extend in extends: if not _isurl(extend): extend = os.path.join(curdir, extend) files.extend(get_all_extends(extend)) return files class DistributionGenerationException(Exception): def __init__(self, path, args): self.path = path self.args = args def __str__(self): return "Error releasing egg at %s: No egg found after \n python setup.py %s" % (self.path, self.args) class HostOut: def __init__(self, name, opt, packages, hostouts): self.buildout_dir = packages.buildout_location self.dist_dir = packages.dist_dir self.packages = packages self.hostout_package = None self.options = opt self.hostouts = hostouts self.name = name self.remote_dir = opt['path'] try: self.host, self.port = opt['host'].split(':') self.port = int(self.port) except: self.host = opt['host'] self.port = 22 self.user = opt['user'] self.password = opt['password'] self.identityfile = opt['identity-file'] self.start_cmd = opt.get('post-commands') self.stop_cmd = opt.get('pre-commands') self.extra_config = opt['include'] self.buildout_cfg = [p.strip() for p in opt['buildout'].split() if p.strip()] self.versions_part = opt.get('versions','versions') self.parts = [p.strip() for p in opt['parts'].split() if p.strip()] self.buildout_cache = opt.get('buildout-cache','') opt['download_cache']= "%s/%s" % (self.buildout_cache, 'downloads') if not self.buildout_cache: install_base = os.path.dirname(self.getRemoteBuildoutPath()) self.buildout_cache = os.path.join(install_base,'buildout-cache') self.fabfiles = [p.strip() for p in opt.get('fabfiles','').split() if p.strip()] #self.packages = opt['packages'] #dist_dir = os.path.abspath(os.path.join(self.buildout_location,self.dist_dir)) #if not os.path.exists(dist_dir): # os.makedirs(dist_dir) #self.tar = None self.sets = [] def getHostoutFile(self): #make sure package has generated self.getHostoutPackage() return self.config_file[len(self.packages.buildout_location)+1:] def getPreCommands(self): return self._subRemote(clean(self.stop_cmd)) def getPostCommands(self): return self._subRemote(clean(self.start_cmd)) def getBuildoutDependencies(self): abs = lambda p: os.path.abspath(os.path.join(self.getLocalBuildoutPath(),p)) return [abs(p) for p in clean(self.extra_config)] def getLocalBuildoutPath(self): return os.path.abspath(self.packages.buildout_location) def getRemoteBuildoutPath(self): return self.remote_dir def splitPath(self): """return the two parts of the path needed by unified installer, the base install path and the instance sub directory of the install path. It does this by assuming the last part of the path is the instance sub directory""" install_dir=os.path.split(self.remote_dir)[0] instance=os.path.split(self.remote_dir)[1] return (install_Dir, instance) def localEggs(self): self.getHostoutPackage() #ensure eggs are generated return [e for p,v,e in self.packages.local_eggs.values()] def getParts(self): return self.parts def getDownloadCache(self): return "%s/%s" % (self.buildout_cache, 'downloads') def getEggCache(self): return "%s/%s" % (self.buildout_cache, 'eggs') def _subRemote(self, cmds): "replace abs localpaths to the buildout with absluote remate buildout paths" return [c.replace(self.getLocalBuildoutPath(), self.getRemoteBuildoutPath()) for c in cmds] # def getDeployTar(self): # return self.packages.getDeployTar() def getHostoutPackage(self): "determine all the buildout files that make up this configuration and package them" if self.hostout_package is not None: return self.hostout_package folder = self.dist_dir dist_dir = self.packages.dist_dir self.config_file = self.genhostout() config_file = os.path.abspath(os.path.join(self.packages.buildout_location,self.config_file)) base = os.path.dirname(config_file) if not os.path.exists(config_file): raise Exception("Invalid config file") files = get_all_extends(config_file) files += self.getBuildoutDependencies() self.packages.writeVersions(config_file, self.versions_part) dist_dir = self.dist_dir self.releaseid = '%s_%s'%(time.time(),uuid()) self.releaseid = _dir_hash(files) name = '%s/%s_%s.tgz'%(dist_dir,'deploy', self.releaseid) self.hostout_package = name if os.path.exists(name): return name else: self.tar = tarfile.open(name,"w:gz") for file in files: relative = file[len(self.buildout_dir)+1:] #TODO self.tar.add(file,arcname=relative) self.tar.close() return self.hostout_package def getIdentityKey(self): keyfile = os.path.abspath(os.path.join(self.getLocalBuildoutPath(),'hostout_rsa')) keyfile = self.options.get('identity-file', keyfile) if not os.path.exists(keyfile): key = RSAKey.generate(1024) key.write_private_key_file(keyfile) else: key = RSAKey.from_private_key_file(keyfile) return keyfile, "ssh-rsa %s hostout@hostout" % key.get_base64() def readsshconfig(self): config = os.path.expanduser('~/.ssh/config') if not os.path.exists(config): return f = open(config,'r') sshconfig = SSHConfig() sshconfig.parse(f) f.close() host = self.host try: host,port = host.split(':') except: port = None opt = sshconfig.lookup(host) if port is None: port = opt.get('port') host = opt.get('hostname', host) if port: host = "%s:%s" % (host,port) self.host=host if not self.identityfile: self.identityfile = opt.get('identityfile', None) if self.identityfile: self.identityfile = os.path.expanduser(self.identityfile).strip() if not self.user: self.user=opt.get('user','root') def allcmds(self): if self.sets: return self._allcmds self.sets.extend( findfabfiles() ) for fabfile in self.fabfiles: #fabric._load_default_settings() commands = load_fabfile(fabfile) self.sets.append((commands,fabfile)) self._allcmds = {} for commands,fabfile in self.sets: self._allcmds.update(commands) return self._allcmds def runfabric(self, cmds=None, cmdargs=[]): "return all commands if none found to run" res = True ran = False #sets = [(fabric.COMMANDS,"<DEFAULT>")] self.allcmds() sets = self.sets self.options['user'] = self.options['user'] or self.user or 'root' self.options['effective-user'] = self.options['effective-user'] or self.user or 'root' self.options['buildout-user'] = self.options['buildout-user'] or self.user or 'root' api.env['hostout'] = self api.env.update( self.options ) #api.env.path = '' #HACK - path == cwd if self.password: api.env['password']=self.password if self.identityfile and os.path.exists(self.identityfile): api.env['key_filename']=self.identityfile api.env.update( dict( user=self.user, hosts=[self.host], port=self.port, )) inits = [(set.get('initcommand'),fabfile) for set,fabfile in sets if 'initcommand' in set] for cmd in cmds: # Let plugins change host or user if they want for func,fabfile in inits: func(cmd) funcs = [(set.get(cmd),fabfile) for set,fabfile in sets if cmd in set] if not funcs: host = api.env.host print >> sys.stderr, "'%(cmd)s' is not a valid command for host '%(host)s'"%locals() break for func,fabfile in funcs: print "Hostout: Running command '%(cmd)s' from '%(fabfile)s'" % locals() api.env['host'] = api.env.hosts[0] api.env['host_string']="%(user)s@%(host)s:%(port)s"%api.env api.env.cwd = '' output.debug = True ran = True if cmd == cmds[-1]: res = func(*cmdargs) else: res = func() if res not in [None,True]: print >> sys.stderr, "Hostout aborted" res = False break else: res = True def __getattr__(self, name): """ call all the methods by this name in fabfiles """ if name not in self.allcmds(): raise AttributeError() def run(*args): return self.runfabric([name], args) return run # def genhostout(self): # """ generate a new buildout file which pins versions and uses our deployment distributions""" # # base = self.buildout_dir # files = [relpath(file, base) for file in self.buildout_cfg] #dist_dir = relpath(self.dist_dir, base) #versions = "" # hostout = HOSTOUT_TEMPLATE % dict(buildoutfile=' '.join(files), #eggdir=dist_dir, # download_cache=self.getDownloadCache(), # egg_cache=self.getEggCache(), # ) # path = os.path.join(base,'%s.cfg'%self.name) # hostoutf = open(path,'w') # hostoutf.write(hostout) # hostoutf.close() # return path def genhostout(self): base = self.buildout_dir path = os.path.join(base,'%s.cfg'%self.name) config = ConfigParser.ConfigParser() config.optionxform = str config.read([path]) if 'buildout' not in config.sections(): config.add_section('buildout') files = [self.options['versionsfile']] + self.buildout_cfg files = [relpath(file, base) for file in files] config.set('buildout', 'extends', ' '.join(files)) config.set('buildout', 'develop', '') config.set('buildout', 'eggs-directory', self.getEggCache()) config.set('buildout', 'download-cache', self.getDownloadCache()) config.set('buildout', 'newest', 'true') if self.getParts(): config.set('buildout', 'parts', ' '.join(self.getParts())) fp = open(path,'w') config.write(fp) fp.close() return path HOSTOUT_TEMPLATE = """ [buildout] extends = %(buildoutfile)s #prevent us looking for them as developer eggs develop= #install-from-cache = true #Match to unifiedinstaller eggs-directory = %(egg_cache)s download-cache = %(download_cache)s #non-newest set because we know exact versions we want newest=true """ import zc.buildout.easy_install from zc.buildout.buildout import pkg_resources_loc class Packages: """ responsible for packaging the development eggs ready to be released to each host""" def __init__(self, config): self.packages = packages = [p for p in config.get('buildout','packages').split()] self.buildout_location = config.get('buildout', 'location') self.dist_dir = config.get('buildout','dist_dir') # self.versions = dict(config.items('versions')) self.tar = None dist_dir = os.path.abspath(os.path.join(self.buildout_location,self.dist_dir)) if not os.path.exists(dist_dir): os.makedirs(dist_dir) self.dist_dir = dist_dir self.local_eggs = {} def getDistEggs(self): eggs = pkg_resources.find_distributions(self.dist_dir) return dict([(( egg.project_name,egg.version),egg) for egg in eggs]) #eggs = pkg_resources.Environment(self.dist_dir) #return dict([(( egg.project_name,egg.version),egg) for egg in eggs]) def release_eggs(self): "developer eggs->if changed, increment versions, build and get ready to upload" # first get list of deveelop packages we got from recipe # for each package # if self.local_eggs: return self.local_eggs #python setup.py sdist bdist_egg # tmpdir = tempfile.mkdtemp() localdist_dir = tempfile.mkdtemp() eggs = self.getDistEggs() donepackages = [] ids = {} self.local_eggs = {} released = {} if self.packages: print "Hostout: Preparing eggs for transport" for path in self.packages: # use buildout to run setup for us hash = _dir_hash([path]) ids[hash]=path path = os.path.abspath(path) dist = self.find_distributions(path) if len(dist): dist = dist[0] egg = eggs.get( (dist.project_name, dist.version) ) else: egg = None if egg and hash in dist.version: self.local_eggs[dist.project_name] = (dist.project_name, dist.version, egg.location) elif os.path.isdir(path): print "Hostout: Develop egg %s changed. Releasing with hash %s" % (path,hash) args=[path, 'clean', 'egg_info', '--tag-build','dev_'+hash, 'sdist', '--formats=zip', #fix bizzare gztar truncation on windows # 'bdist_egg', '--dist-dir', '%s'%localdist_dir, ] res = self.setup(args = args) dist = self.find_distributions(path) if not len(dist) or not os.listdir(localdist_dir): raise DistributionGenerationException(path, args) dist = dist[0] pkg = os.listdir(localdist_dir)[0] loc = os.path.join(self.dist_dir, pkg) if os.path.exists(loc): os.remove(loc) shutil.move(os.path.join(localdist_dir, pkg), self.dist_dir) self.local_eggs[dist.project_name] = (dist.project_name, dist.version, loc) #released[dist.project_name] = dist.version else: # shutil.copy(path,localdist_dir) self.local_eggs[path] = (None, None, path) if released: import pdb; pdb.set_trace() env = package_index.PackageIndex('file://'+pathname2url(localdist_dir)) #eggs = self.getDistEggs() for (name,version) in released.items(): req = pkg_resources.Requirement.parse("%(name)s==%(version)s"%locals()) env.prescan() egg = env.find_packages(req) #egg = eggs.get( (name, version) ) if egg: self.local_eggs[name] = (name, version, egg.location) else: raise Exception("%(name)s wasn't generated. See errors above" % locals()) if self.local_eggs: specs = ["\t%s = %s"% (p,v) for p,v,e in self.local_eggs.values()] print "Hostout: Eggs to transport:\n%s" % '\n'.join(specs) return self.local_eggs def find_distributions(self, path): #HACK: need to parse setup.py instead assuming src return [d for d in pkg_resources.find_distributions(path, only=True)] + \ [d for d in pkg_resources.find_distributions(os.path.join(path,'src'), only=True)] def getVersion(self, path): "Test to see if we already have a release of this developer egg" dist = [d for d in pkg_resources.find_distributions(path, only=True)] dist = dist[0] return dist.version def writeVersions(self, versions_file, part): self.release_eggs() #ensure we've got self.develop_versions # assert len(specs) == len(self.packages) config = ConfigParser.RawConfigParser() config.optionxform = str config.read([versions_file]) specs = {} # specs.update(self.versions) #have to use lower since eggs are case insensitive specs.update(dict([(p,v) for p,v,e in self.local_eggs.values()])) config.set('buildout', 'versions', part) if part in config.sections(): config.remove_section(part) config.add_section(part) for name, version in sorted(specs.items()): config.set(part,name,version) fp = open(versions_file,'w') config.write(fp) fp.close() print "Hostout: Wrote versions to %s"%versions_file def setup(self, args): setup = args.pop(0) if os.path.isdir(setup): setup = os.path.join(setup, 'setup.py') #self._logger.info("Running setup script %r.", setup) setup = os.path.abspath(setup) fd, tsetup = tempfile.mkstemp() try: os.write(fd, zc.buildout.easy_install.runsetup_template % dict( setuptools=pkg_resources_loc, setupdir=os.path.dirname(setup), setup=setup, __file__ = setup, )) os.spawnl(os.P_WAIT, sys.executable, zc.buildout.easy_install._safe_arg (sys.executable), tsetup, *[zc.buildout.easy_install._safe_arg(a) for a in args]) finally: os.close(fd) os.remove(tsetup) def main(cfgfile, args): "execute the fabfile we generated" config = ConfigParser.ConfigParser() config.optionxform = str config.read([cfgfile]) files = [cfgfile] allhosts = {} # buildout = Buildout(config.get('buildout','buildout'),[]) packages = Packages(config) #eggs = packages.release_eggs() # for section in [s for s in config.sections() if s not in ['buildout', 'versions']]: options = dict(config.items(section)) hostout = HostOut(section, options, packages, allhosts) allhosts[section] = hostout # cmdline is bin/hostout host1 host2 ... cmd1 cmd2 ... arg1 arg2... cmds = [] cmdargs = [] hosts = [] pos = 'hosts' for arg in args + [None]: if pos == 'hosts': if arg in allhosts: hosts += [(arg,allhosts[arg])] continue elif arg == 'all': hosts = allhosts.items() else: pos = 'cmds' # get all cmds allcmds = {'deploy':None} for host,hostout in hosts: hostout.readsshconfig() allcmds.update(hostout.allcmds()) if pos == 'cmds': if arg == 'deploy': cmds += ['predeploy','uploadeggs','uploadbuildout','buildout','postdeploy'] continue elif arg in allcmds: cmds += [arg] continue pos = 'args' if pos == 'args' and arg is not None: cmdargs += [arg] if not hosts or not cmds: print >> sys.stderr, "cmdline is: bin/hostout host1 [host2...] [all] cmd1 [cmd2...] [arg1 arg2...]" if not hosts: print >> sys.stderr, "Valid hosts are: %s"% ' '.join(allhosts.keys()) elif not cmds: print >> sys.stderr, "Valid commands are:" max_name_len = reduce(lambda a,b: max(a, len(b)), allcmds.keys(), 0) cmds = allcmds.items() cmds.sort(lambda x,y: cmp(x[0], y[0])) for name, fn in cmds: print >> sys.stderr, ' ', name.ljust(max_name_len), if fn.__doc__: print >> sys.stderr, ':', fn.__doc__.splitlines()[0] elif name == 'deploy': print >> sys.stderr, ':', 'predeploy, uploadeggs, uploadbuildout, buildout and then postdeploy' else: print >> sys.stderr, '' else: try: for host, hostout in hosts: hostout.readsshconfig() hostout.runfabric(cmds, cmdargs) print("Done.") except SystemExit: # a number of internal functions might raise this one. raise except KeyboardInterrupt: print("Stopped.") # except: # sys.excepthook(*sys.exc_info()) # # we might leave stale threads if we don't explicitly exit() # return False # finally: # #disconnect_all() # pass def is_task(tup): """ Takes (name, object) tuple, returns True if it's a non-Fab public callable. """ name, func = tup return ( callable(func) and not name.startswith('_') ) # # Use setuptools entry points to find the fabfiles in this env # def findfabfiles(): from pkg_resources import iter_entry_points fabfiles = [] for ep in iter_entry_points( group='fabric', # Use None to get all entry point names name=None, ): imported = ep.load() funcs = dict(filter(is_task, vars(imported).items())) fabfiles.append( (funcs, ep.module_name) ) # ep.name doesn't matter #print fabfiles return fabfiles # Fabric load_fabfile uses __import__ which doesn't always load from path import imp def load_fabfile(filename, **kwargs): """ Load up the given fabfile. This loads the fabfile specified by the `filename` parameter into fabric and makes its commands and other functions available in the scope of the current fabfile. If the file has already been loaded it will not be loaded again. May take an additional `fail` keyword argument with one of these values: * ignore - do nothing on failure * warn - print warning on failure * abort - terminate fabric on failure Example: load("conf/production-settings.py") """ if not os.path.exists(filename): raise Exception("Load failed:\n" "File not found: " + filename) return #if filename in _LOADED_FABFILES: # return #_LOADED_FABFILES.add(filename) captured = {} commands = {} #execfile(filename, _new_namespace(), captured) imported = imp.load_source(filename.replace('/','.'), filename) return dict(filter(is_task, vars(imported).items())) def uuid( *args ): """ Generates a universally unique ID. Any arguments only create more randomness. """ t = long( time.time() * 1000 ) r = long( random.random()*100000000000000000L ) try: a = socket.gethostbyname( socket.gethostname() ) except: # if we can't get a network address, just imagine one a = random.random()*100000000000000000L data = str(t)+' '+str(r)+' '+str(a)+' '+str(args) data = md5.md5(data).hexdigest() return data ignore_directories = '.svn', 'CVS', 'build', '.git' ignore_files = ['PKG-INFO'] def _dir_hash(paths): hash = md5.new() for path in paths: if os.path.isdir(path): walked = os.walk(path) else: walked = [(os.path.dirname(path), [], [os.path.basename(path)])] for (dirpath, dirnames, filenames) in walked: dirnames[:] = [n for n in dirnames if not (n in ignore_directories or n.endswith('.egg-info'))] filenames[:] = [f for f in filenames if not (f in ignore_files or f.endswith('pyc') or f.endswith('pyo'))] hash.update(' '.join(dirnames)) hash.update(' '.join(filenames)) for name in filenames: hash.update(open(os.path.join(dirpath, name)).read()) import base64 hash = base64.urlsafe_b64encode(hash.digest()).strip() hash = hash.replace('_','-').replace('=','') return hash
PypiClean
/kua-0.2.tar.gz/kua-0.2/docs/index.rst
.. kua documentation master file, created by sphinx-quickstart on Sat Aug 27 14:46:31 2016. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. kua's docs ========== User’s Guide ------------ .. toctree:: :maxdepth: 2 installation usage recipes limitations API Reference ------------- Information on a specific function, class or method. .. toctree:: :maxdepth: 2 api Additional Notes ---------------- Design notes, legal information and changelog. .. toctree:: :maxdepth: 2 changelog license
PypiClean
/artellapipe-libs-usd-0.0.5.tar.gz/artellapipe-libs-usd-0.0.5/artellapipe/libs/usd/externals/python/2/OpenGL/GL/ARB/texture_compression.py
from OpenGL import platform, constant, arrays from OpenGL import extensions, wrapper import ctypes from OpenGL.raw.GL import _types, _glgets from OpenGL.raw.GL.ARB.texture_compression import * from OpenGL.raw.GL.ARB.texture_compression import _EXTENSION_NAME def glInitTextureCompressionARB(): '''Return boolean indicating whether this extension is available''' from OpenGL import extensions return extensions.hasGLExtension( _EXTENSION_NAME ) # INPUT glCompressedTexImage3DARB.data size not checked against imageSize glCompressedTexImage3DARB=wrapper.wrapper(glCompressedTexImage3DARB).setInputArraySize( 'data', None ) # INPUT glCompressedTexImage2DARB.data size not checked against imageSize glCompressedTexImage2DARB=wrapper.wrapper(glCompressedTexImage2DARB).setInputArraySize( 'data', None ) # INPUT glCompressedTexImage1DARB.data size not checked against imageSize glCompressedTexImage1DARB=wrapper.wrapper(glCompressedTexImage1DARB).setInputArraySize( 'data', None ) # INPUT glCompressedTexSubImage3DARB.data size not checked against imageSize glCompressedTexSubImage3DARB=wrapper.wrapper(glCompressedTexSubImage3DARB).setInputArraySize( 'data', None ) # INPUT glCompressedTexSubImage2DARB.data size not checked against imageSize glCompressedTexSubImage2DARB=wrapper.wrapper(glCompressedTexSubImage2DARB).setInputArraySize( 'data', None ) # INPUT glCompressedTexSubImage1DARB.data size not checked against imageSize glCompressedTexSubImage1DARB=wrapper.wrapper(glCompressedTexSubImage1DARB).setInputArraySize( 'data', None ) # OUTPUT glGetCompressedTexImageARB.img COMPSIZE(target, level) ### END AUTOGENERATED SECTION from OpenGL.GL import images for dimensions in (1,2,3): for function in ('glCompressedTexImage%sDARB','glCompressedTexSubImage%sDARB'): name = function%(dimensions,) globals()[ name ] = images.compressedImageFunction( globals()[ name ] ) try: del name, function except NameError as err: pass try: del dimensions except NameError as err: pass if glGetCompressedTexImageARB: def glGetCompressedTexImageARB( target, level, img=None ): """Retrieve a compressed texture image""" if img is None: length = glget.glGetTexLevelParameteriv( target, 0, GL_TEXTURE_COMPRESSED_IMAGE_SIZE_ARB, ) img = arrays.ArrayDataType.zeros( (length,), GL_1_0.GL_UNSIGNED_BYTE ) return glGetCompressedTexImageARB(target, 0, img);
PypiClean
/seelabletv2-1.0.0.tar.gz/seelabletv2-1.0.0/seelabv2GUI/advanced_logger.py
import sys,time,configparser t = time.time() from . import utils if sys.version_info.major==3: from PyQt5 import QtGui, QtCore, QtWidgets else: from PyQt4 import QtGui, QtCore from PyQt4 import QtGui as QtWidgets import pyqtgraph as pg import math, os.path, struct from collections import OrderedDict from .layouts import ui_advancedLogger from .layouts.oscilloscope_widget import DIOINPUT, colors import functools from functools import partial import numpy as np class Expt(QtWidgets.QMainWindow, ui_advancedLogger.Ui_MainWindow): def __init__(self, device=None): super(Expt, self).__init__() self.setupUi(self) self.splitter.setSizes([100,400]) self.logging = False self.p = device self.I2C = device.I2C #connection to the device hardware self.curve = self.plot.plot(pen=colors[0]) self.X = [] self.Y = [] #Define some keyboard shortcuts for ease of use self.shortcutActions={} self.shortcuts={" ":self.setRecord,'x':self.setX,'y':self.setY} for a in self.shortcuts: shortcut = QtWidgets.QShortcut(QtGui.QKeySequence(a), self) shortcut.activated.connect(self.shortcuts[a]) self.shortcutActions[a] = shortcut self.XInput = DIOINPUT(self,self.p,confirmValues = self.setXParameters,title="X Axis Parameter") self.YInput = DIOINPUT(self,self.p,confirmValues = self.setYParameters,title="Y Axis Parameter") #self.XInput.show() self.startTime = time.time() self.interval = 0.1 #Seconds self.timer = QtCore.QTimer() self.timer.timeout.connect(self.updateEverything) self.timer.start(2) def setConfig(self,text): config = configparser.ConfigParser(allow_no_value=True) config.read_string(text) print('received configuration:',config) if 'logging' in config: if 'interval' in config['logging']: self.delayBox.setValue(int(config['logging']['interval'])) if 'settling' in config['logging']: self.settlingTimeBox.setValue(int(config['logging']['settling'])) if 'datapoints' in config['logging']: self.datapointsBox.setValue(int(config['logging']['datapoints'])) for name,axis in zip(['x-axis','y-axis'],[self.XInput,self.YInput]): if name in config: if 'tool' in config[name]: axis.setWindow(config[name]['tool']) print(name,' set to ',config[name]['tool']) if 'minimum' in config[name]: axis.minValue.setValue(float(config[name]['minimum'])) if 'maximum' in config[name]: axis.maxValue.setValue(float(config[name]['maximum'])) if 'meter' in config[name]: index = axis.subSelection.findText(config[name]['meter'], QtCore.Qt.MatchFixedString) if index >= 0: axis.subSelection.setCurrentIndex(index) if 'channel' in config[name]: index = axis.miniscope.A1Box.findText(config[name]['channel'], QtCore.Qt.MatchFixedString) if index >= 0: axis.miniscope.A1Box.setCurrentIndex(index) if 'parameter' in config[name]: axis.miniscope.activeParameter = int(config[name]['parameter'])-1 axis.miniscope.list.setCurrentRow(axis.miniscope.activeParameter) if 'cross-check-frequency' in config[name]: axis.miniscope.freqCheckBox.setChecked(True) else: axis.miniscope.freqCheckBox.setChecked(False) axis.launch() axis.activateWindow() self.XInput.reposition('bottom-left') self.YInput.reposition('top-left') def setXParameters(self,s): self.xLabel.setText(s) def setYParameters(self,s): self.yLabel.setText(s) def setRecord(self,s): if s: if self.XInput.type == 'output': self.XInput.initSweep(self.datapointsBox.value()) if self.YInput.type == 'output': self.YInput.initSweep(self.datapointsBox.value()) self.plot.setRange(xRange=[self.XInput.minValue.value(),self.XInput.maxValue.value()],yRange=[self.YInput.minValue.value(),self.YInput.maxValue.value()]) self.logging = True time.sleep(0.5) #self.XInput.hide() #self.YInput.hide() self.curve.clear() self.X = [] self.Y = [] for a in [self.XInput,self.YInput]: if a.name == 'Time': a.initialize() self.startTime = time.time() self.interval = self.delayBox.value()/1000. else: self.logging = False self.XInput.message.setText("Done") self.YInput.message.setText("Done") def setX(self): self.XInput.launch() self.XInput.activateWindow() def setY(self): self.YInput.launch() self.YInput.activateWindow() def updateEverything(self): if self.logging: if (time.time() - self.startTime) > self.interval: self.startTime = time.time() x = self.XInput.nextValue() time.sleep(self.settlingTimeBox.value()/1000.) y = self.YInput.nextValue(freq = x) if x == None or y == None: self.logging = False #Stop the logging. self.logBox.setChecked(False) if x and y: self.X.append(x) self.Y.append(y) self.curve.setData(self.X,self.Y) else: for a in [self.XInput,self.YInput]: if a.isVisible() and a.type=='input' and a.autoRefresh: v = a.read() if v is not None: a.setValue(v) if __name__ == '__main__': from . eyes17 import eyes dev = eyes17.eyes.open() app = QtWidgets.QApplication(sys.argv) # translation stuff lang=QtCore.QLocale.system().name() t=QtCore.QTranslator() t.load("lang/"+lang, os.path.dirname(__file__)) app.installTranslator(t) t1=QtCore.QTranslator() t1.load("qt_"+lang, QtCore.QLibraryInfo.location(QtCore.QLibraryInfo.TranslationsPath)) app.installTranslator(t1) mw = Expt(dev) mw.show() sys.exit(app.exec_())
PypiClean
/CausalPy-0.1.0-py3-none-any.whl/causalpy/pymc_models.py
from typing import Any, Dict, Optional import arviz as az import numpy as np import pandas as pd import pymc as pm import pytensor.tensor as pt from arviz import r2_score class ModelBuilder(pm.Model): """ This is a wrapper around pm.Model to give scikit-learn like API. """ def __init__(self, sample_kwargs: Optional[Dict[str, Any]] = None): """ :param sample_kwargs: A dictionary of kwargs that get unpacked and passed to the :func:`pymc.sample` function. Defaults to an empty dictionary. """ super().__init__() self.idata = None self.sample_kwargs = sample_kwargs if sample_kwargs is not None else {} def build_model(self, X, y, coords) -> None: """Build the model. Example ------- >>> class CausalPyModel(ModelBuilder): >>> def build_model(self, X, y): >>> with self: >>> X_ = pm.MutableData(name="X", value=X) >>> y_ = pm.MutableData(name="y", value=y) >>> beta = pm.Normal("beta", mu=0, sigma=1, shape=X_.shape[1]) >>> sigma = pm.HalfNormal("sigma", sigma=1) >>> mu = pm.Deterministic("mu", pm.math.dot(X_, beta)) >>> pm.Normal("y_hat", mu=mu, sigma=sigma, observed=y_) """ raise NotImplementedError("This method must be implemented by a subclass") def _data_setter(self, X) -> None: with self.model: pm.set_data({"X": X}) def fit(self, X, y, coords: Optional[Dict[str, Any]] = None) -> None: """Draw samples from posterior, prior predictive, and posterior predictive distributions. """ self.build_model(X, y, coords) with self.model: self.idata = pm.sample(**self.sample_kwargs) self.idata.extend(pm.sample_prior_predictive()) self.idata.extend( pm.sample_posterior_predictive(self.idata, progressbar=False) ) return self.idata def predict(self, X): """Predict data given input data `X`""" self._data_setter(X) with self.model: # sample with new input data post_pred = pm.sample_posterior_predictive( self.idata, var_names=["y_hat", "mu"], progressbar=False ) return post_pred def score(self, X, y) -> pd.Series: """Score the Bayesian :math:`R^2` given inputs ``X`` and outputs ``y``. .. caution:: The Bayesian :math:`R^2` is not the same as the traditional coefficient of determination, https://en.wikipedia.org/wiki/Coefficient_of_determination. """ yhat = self.predict(X) yhat = az.extract( yhat, group="posterior_predictive", var_names="y_hat" ).T.values # Note: First argument must be a 1D array return r2_score(y.flatten(), yhat) # .stack(sample=("chain", "draw") class WeightedSumFitter(ModelBuilder): """Used for synthetic control experiments""" def build_model(self, X, y, coords): """Defines the PyMC model""" with self: self.add_coords(coords) n_predictors = X.shape[1] X = pm.MutableData("X", X, dims=["obs_ind", "coeffs"]) y = pm.MutableData("y", y[:, 0], dims="obs_ind") # TODO: There we should allow user-specified priors here beta = pm.Dirichlet("beta", a=np.ones(n_predictors), dims="coeffs") # beta = pm.Dirichlet( # name="beta", a=(1 / n_predictors) * np.ones(n_predictors), # dims="coeffs" # ) sigma = pm.HalfNormal("sigma", 1) mu = pm.Deterministic("mu", pm.math.dot(X, beta), dims="obs_ind") pm.Normal("y_hat", mu, sigma, observed=y, dims="obs_ind") class LinearRegression(ModelBuilder): """Custom PyMC model for linear regression""" def build_model(self, X, y, coords): """Defines the PyMC model""" with self: self.add_coords(coords) X = pm.MutableData("X", X, dims=["obs_ind", "coeffs"]) y = pm.MutableData("y", y[:, 0], dims="obs_ind") beta = pm.Normal("beta", 0, 50, dims="coeffs") sigma = pm.HalfNormal("sigma", 1) mu = pm.Deterministic("mu", pm.math.dot(X, beta), dims="obs_ind") pm.Normal("y_hat", mu, sigma, observed=y, dims="obs_ind") class InstrumentalVariableRegression(ModelBuilder): """Custom PyMC model for instrumental linear regression""" def build_model(self, X, Z, y, t, coords, priors): """Specify model with treatment regression and focal regression data and priors :param X: A pandas dataframe used to predict our outcome y :param Z: A pandas dataframe used to predict our treatment variable t :param y: An array of values representing our focal outcome y :param t: An array of values representing the treatment t of which we're interested in estimating the causal impact :param coords: A dictionary with the coordinate names for our instruments and covariates :param priors: An optional dictionary of priors for the mus and sigmas of both regressions :code:`priors = {"mus": [0, 0], "sigmas": [1, 1], "eta": 2, "lkj_sd": 2}` """ # --- Priors --- with self: self.add_coords(coords) beta_t = pm.Normal( name="beta_t", mu=priors["mus"][0], sigma=priors["sigmas"][0], dims="instruments", ) beta_z = pm.Normal( name="beta_z", mu=priors["mus"][1], sigma=priors["sigmas"][1], dims="covariates", ) sd_dist = pm.HalfCauchy.dist(beta=priors["lkj_sd"], shape=2) chol, corr, sigmas = pm.LKJCholeskyCov( name="chol_cov", eta=priors["eta"], n=2, sd_dist=sd_dist, ) # compute and store the covariance matrix pm.Deterministic(name="cov", var=pt.dot(l=chol, r=chol.T)) # --- Parameterization --- mu_y = pm.Deterministic(name="mu_y", var=pm.math.dot(X, beta_z)) # focal regression mu_t = pm.Deterministic(name="mu_t", var=pm.math.dot(Z, beta_t)) # instrumental regression mu = pm.Deterministic(name="mu", var=pt.stack(tensors=(mu_y, mu_t), axis=1)) # --- Likelihood --- pm.MvNormal( name="likelihood", mu=mu, chol=chol, observed=np.stack(arrays=(y.flatten(), t.flatten()), axis=1), shape=(X.shape[0], 2), ) def fit(self, X, Z, y, t, coords, priors): """Draw samples from posterior, prior predictive, and posterior predictive distributions. """ self.build_model(X, Z, y, t, coords, priors) with self.model: self.idata = pm.sample(**self.sample_kwargs) self.idata.extend(pm.sample_prior_predictive()) self.idata.extend( pm.sample_posterior_predictive(self.idata, progressbar=False) ) return self.idata
PypiClean
/starlink-pywrapper-0.3.tar.gz/starlink-pywrapper-0.3/starlink/kappa_help/thresh.rst
THRESH ====== Purpose ~~~~~~~ Edits an NDF to replace values between or outside given limits with specified constant values Description ~~~~~~~~~~~ This application creates an output NDF by copying values from an input NDF, replacing all values within given data ranges by a user-specified constant or by the bad value. Upper and lower thresholds are supplied using parameters THRLO and THRHI. If THRLO is less than or equal to THRHI, values between and including the two thresholds are copied from the input to output array. Any values in the input array greater than the upper threshold will be set to the value of parameter NEWHI, and anything less than the lower threshold will be set to the value of parameter NEWLO, in the output data array. Thus the output NDF is constrained to lie between the two bounds. If THRLO is greater than THRHI, values greater than or equal to THRLO are copied from the input to output array, together with values less than or equal to THRHI. Any values between THRLO and THRHI will be set to the value of parameter NEWLO in the output NDF. Each replacement value may be the bad-pixel value for masking. Usage ~~~~~ :: thresh in out thrlo thrhi newlo newhi [comp] ADAM parameters ~~~~~~~~~~~~~~~ COMP = LITERAL (Read) ````````````````````` The components whose values are to be constrained between thresholds. The options are limited to the arrays within the supplied NDF. In general the value may be "Data", "Quality", "Error", or "Variance". If "Quality" is specified, then the quality values are treated as numerical values in the range 0 to 255. ["Data"] IN = NDF (Read) ``````````````` Input NDF structure containing the array to have thresholds applied. NEWHI = LITERAL (Read) `````````````````````` This gives the value to which all input array-element values greater than the upper threshold are set. If this is set to "Bad", the bad value is substituted. Numerical values of NEWHI must lie in within the minimum and maximum values of the data type of the array being processed. The suggested default is the upper threshold. This parameter is ignored if THRLO is greater than THRHI. NEWLO = LITERAL (Read) `````````````````````` This gives the value to which all input array-element values less than the lower threshold are set. If this is set to "Bad", the bad value is substituted. Numerical values of NEWLO must lie in within the minimum and maximum values of the data type of the array being processed. The suggested default is the lower threshold. NUMHI = _INTEGER (Write) ```````````````````````` The number of pixels whose values were thresholded as being greater than the THRHI threshold. NUMLO = _INTEGER (Write) ```````````````````````` The number of pixels whose values were thresholded as being less than the THRLO threshold. NUMRANGE = _INTEGER (Write) ``````````````````````````` The number of pixels whose values were thresholded as being between the THRLO and THRHI thresholds, if THRLO is greater than THRHI. NUMSAME = _INTEGER (Write) `````````````````````````` The number of unchanged pixels. OUT = NDF (Write) ````````````````` Output NDF structure containing the thresholded version of the array. THRHI = _DOUBLE (Read) `````````````````````` The upper threshold value within the input array. It must lie in within the minimum and maximum values of the data type of the array being processed. The suggested default is the current value. THRLO = _DOUBLE (Read) `````````````````````` The lower threshold value within the input array. It must lie within the minimum and maximum values of the data type of the array being processed. The suggested default is the current value. TITLE = LITERAL (Read) `````````````````````` Title for the output NDF structure. A null value (!) propagates the title from the input NDF to the output NDF. [!] Examples ~~~~~~~~ thresh zzcam zzcam2 100 500 0 0 This copies the data array in the NDF called zzcam to the NDF called zzcam2. Any data value less than 100 or greater than 500 in zzcam is set to 0 in zzcam2. thresh zzcam zzcam2 500 100 0 This copies the data array in the NDF called zzcam to the NDF called zzcam2. Any data value less than 500 and greater than 100 in zzcam is set to 0 in zzcam2. thresh zzcam zzcam2 100 500 0 0 comp=Variance As above except that the data array is copied unchanged and the thresholds apply to the variance array. thresh n253 n253cl thrlo=-0.5 thrhi=10.1 \ This copies the data array in the NDF called n253 to the NDF called n253cl. Any data value less than -0.5 in n253 is set to -0.5 in n253cl, and any value greater than 10.1 in n253 becomes 10.1 in n253cl. thresh pavo pavosky -0.02 0.02 bad bad All data values outside the range -0.02 to 0.02 in the NDF called pavo become bad in the NDF called pavosky. All values within this range are copied from pavo to pavosky. Related Applications ~~~~~~~~~~~~~~~~~~~~ KAPPA: HISTEQ, MATHS; Figaro: CLIP, IDIFF, RESCALE. Copyright ~~~~~~~~~ Copyright (C) 1991, 1994 Science & Engineering Research Council. Copyright (C) 1996, 1998, 2000-2001, 2004 Central Laboratory of the Research Councils. Copyright (C) 2012 Science & Technology Facilities Council. All Rights Reserved. Licence ~~~~~~~ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either Version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Implementation Status ~~~~~~~~~~~~~~~~~~~~~ + This routine correctly processes the AXIS, DATA, QUALITY, VARIANCE, LABEL, TITLE, UNITS, WCS and HISTORY components of an NDF data structure and propagates all extensions. + Processing of bad pixels and automatic quality masking are supported. + All non-complex numeric data types can be handled. + Any number of NDF dimensions is supported.
PypiClean
/rabbitmq-alert-1.8.1.tar.gz/rabbitmq-alert-1.8.1/README.rst
About RabbitMQ Alert ==================== Send notifications when predefined conditions are met. Which conditions? ================= - Ready messages - Unacknowledged messages - Total queued messages - Number of connected consumers - Number of open connections - Number of nodes running - Memory used by each node in MBs | My inspiration to create this notification sender is to monitor a set of Celery workers. Sometimes they stop working and monitoring | the queue size seems to be an easy way to know when these situations happen. Additionally, automatically monitoring the queue sizes | is a great way to scale up/down the number of workers. What type of notifications? =========================== Currently the following are supported: - E-mails - Slack messages - Telegram messages Installation ============ Use the ``PIP`` command, which should already exist in your Linux installation: :: sudo pip install rabbitmq-alert Usage ===== Execute with the global configuration file ------------------------------------------ Copy the example configuration file to the default path of the global configuration file: :: sudo cp /etc/rabbitmq-alert/config.ini.example /etc/rabbitmq-alert/config.ini | Edit it with you preferred settings. Then you are ready to execute ``rabbitmq-alert`` | using the global configuration file. Just execute: :: sudo rabbitmq-alert Execute with options -------------------- | You can execute ``rabbitmq-alert`` along using the provided options, but first take a look at ``--help`` to see whats available | and the purpose of each option. Example: :: sudo rabbitmq-alert \ --host=my-server --port=55672 --username=guest --password=guest \ --vhost=%2F --queue=my_queue1,my_queue2 --ready-queue-size=3 --check-rate=300 \ [email protected] [email protected] \ --email-subject="RabbitMQ alert at %s - %s" --email-server=localhost Execute with a custom configuration file ---------------------------------------- | Alternatively, you can use a custom configuration file. For the required format, take a look | at the ``/etc/rabbitmq-alert/config.ini.example`` file. Then execute ``rabbitmq-alert`` with the configuration file option: :: sudo rabbitmq-alert -c my_config.ini Execute as a daemon ------------------- | A ``systemd`` script is created upon installation with ``PIP``. | Use the following commands to reload the ``systemd`` configuration | and start ``rabbitmq-alert`` as a daemon. :: sudo systemctl daemon-reload sudo systemctl start rabbitmq-alert To have ``rabbitmq-alert`` always started on boot: :: sudo systemctl enable rabbitmq-alert In case your system still uses ``init.d``, an ``init.d`` script has been created in ``/etc/init.d`` upon ``PIP`` installation. To start ``rabbitmq-alert`` as a daemon: :: sudo /etc/init.d/rabbitmq-alert start To have ``rabbitmq-alert`` always started on boot: :: sudo update-rc.d rabbitmq-alert defaults Different options per queue --------------------------- | Except conditions for all queues, you can also define queue specific conditions | in the configuration file, in case you want to have fine-tuned options for each queue. | Just create a ``[Conditions]`` section for each queue. Example: :: [Conditions:my-queue] ... [Conditions:my-other-queue] ... Note that queue names also have to be defined in the ``[Server]`` section of the configuration file: :: [Server] ... queues=my-queue,my-other-queue ... Logging ------- | You can find the logs of ``rabbitmq-alert`` to ``/var/log/rabbitmq-alert/``. | Log files are rotated in a daily basis. Execute in a container ---------------------- | There is a docker image for the project. First, you have to create a configuration file | for ``rabbitmq-alert``, which will then be copied into the container. Then you can run | ``rabbitmq-alert`` inside a container. :: docker run -d --name rabbitmq-alert -v config.ini:/etc/rabbitmq-alert/config.ini \ mylkoh/rabbitmq-alert:latest For the configuration file, advise the ``config.ini.example`` that exists in the project's repository. Contribute ========== | The project ``rabbitmq-alert`` is written in ``python2``. | Of course, you can contribute to the project. Take a look at the GitHub “Issues” page and pick an issue to implement / fix. | Fork the project, develop and then create a pull request, in order for your code to be added to the project. Prepare your environment ------------------------ To start, you have to install the dev dependencies which are some required python packages: :: make deps-dev Run the tests! -------------- After writing your awesomeness, run the test suites to ensure that everything is still fine: :: make test Firstly, ensure that you have removed the rabbitmqalert package from your system. Otherwise you may find yourself running the tests on the installed package instead of the source code. Do add tests yourself for the code you contribute to ensure the quality of the project. Happy coding :-) Build and publish a new container version ----------------------------------------- To build a new image version of the project: :: docker build --no-cache -t mylkoh/rabbitmq-alert:1.2.2 -t mylkoh/rabbitmq-alert:latest . Publish the image: :: docker push mylkoh/rabbitmq-alert Testing the container --------------------- Create a network that all containers will belong to: :: docker network create rabbitmq-alert Run ``rabbitmq`` into a container: :: docker run -d --name some-rabbit --net rabbitmq-alert -p 8080:15672 rabbitmq:3-management | You can then go to http://localhost:8080 in a browser to use the management plugin. | The username and password are both ``guest``. Create a fake SMTP server: :: docker run -d --name fake-smtp --net rabbitmq-alert -p 25:25 munkyboy/fakesmtp Now, run ``rabbitmq-alert`` using the same network: :: docker run -d --name rabbitmq-alert --net rabbitmq-alert \ -v config.ini:/etc/rabbitmq-alert/config.ini mylkoh/rabbitmq-alert:latest
PypiClean
/wellmap-3.4.0.tar.gz/wellmap-3.4.0/docs/basic_usage_r.rst
****************** Basic usage with R ****************** The following steps show how to get started with |wellmapr| in R: .. make-list-from-sections:: 1. Install wellmapr =================== Install |wellmapr| from GitHub. It's good to be aware that |wellmapr| is written in python and made available to R using the `reticulate`_ package. This detail shouldn't affect you in normal usage, but may be relevant if the installation doesn't go smoothly. See :doc:`this page <troubleshooting_r>` for more troubleshooting tips. .. code-block:: r > devtools::install_github("kalekundert/wellmap", subdir="wellmapr") 2. Describe the plate layout ============================ Write a `TOML file <file_format>` describing the layout of an experiment. For example, the following layout might be used for a standard curve: .. literalinclude:: basic_usage/std_curve.toml :language: toml :caption: :download:`std_curve.toml <basic_usage/std_curve.toml>` 3. Confirm the plate layout =========================== Confirm that the layout is correct by using |wellmapr::show()| to produce a visualization of the layout. This is an important step, because it's much easier to spot mistakes in the visualization than in the layout file itself. .. code-block:: r > wellmapr::show("std_curve.toml") This map shows that: - Each row is a different replicate. - Each column is a different dilution. .. figure:: basic_usage/std_curve_map.svg It's also possible to create maps like this from the command line, which may be more convenient in some cases. The best way to do this is to use `reticulate::py_config()`_ to find the path to the python installation used by reticulate_, then to invoke the :prog:`wellmap` command associated with that installation. The alias is optional, but could be saved in your shell configuration to make the command easier to remember: .. code-block:: console $ Rscript -e 'reticulate::py_config()' python: /home/kale/.local/share/r-miniconda/envs/r-reticulate/bin/python libpython: /home/kale/.local/share/r-miniconda/envs/r-reticulate/lib/libpython3.6m.so pythonhome: /home/kale/.local/share/r-miniconda/envs/r-reticulate:/home/kale/.local/share/r-miniconda/envs/r-reticulate version: 3.6.10 | packaged by conda-forge | (default, Apr 24 2020, 16:44:11) [GCC 7.3.0] numpy: /home/kale/.local/share/r-miniconda/envs/r-reticulate/lib/python3.6/site-packages/numpy numpy_version: 1.18.5 $ alias wellmap=/home/kale/.local/share/r-miniconda/envs/r-reticulate/bin/wellmap $ wellmap std_curve.toml 4. Prepare the data =================== Load the data from the experiment in question into a tidy_ data frame. Tidy data are easier to work with in general, and are required by |wellmapr| in particular. If you aren't familiar with the concept of tidy data, `this article`__ is a good introduction. The basic idea is to ensure that: __ https://r4ds.had.co.nz/tidy-data.html - Each variable is represented by a single column. - Each observation is represented by a single row. If possible, it's best to export data from the instrument that collected it directly to a tidy format. When this isn't possible, though, you'll need to tidy the data yourself. For example, consider the following data (which corresponds to the layout from above). This is qPCR data, where a higher :math:`C_q` value indicates that less material is present. The data are shaped like the plate itself, e.g. a row in the data for every row on the plate, and a column in the data for every column on the plate. It's not uncommon for microplate instruments to export data in this format. .. csv-table:: :download:`std_curve.csv <basic_usage/std_curve.csv>` :file: basic_usage/std_curve.csv :header-rows: 1 Below is the code to load this data into a tidy tibble_ with the following columns: - *row*: A letter identifying a row on the microplate, e.g. A-H - *col*: A number identifying a column on the microplate, e.g. 1-12 - *Cq*: The :math:`C_q` value measured for the identified well. .. code-block:: r > library(tidyverse) > > load_cq <- function(path) { + read_csv(path) %>% + rename(row = Cq) %>% + pivot_longer( + !row, + names_to = "col", + values_to = "Cq", + ) + } > data <- load_cq("std_curve.csv") > data # A tibble: 18 x 3 row col Cq <chr> <chr> <dbl> 1 A 1 24.2 2 A 2 20.7 3 A 3 17.2 4 A 4 13.8 5 A 5 10.3 6 A 6 6.97 7 B 1 24.2 8 B 2 20.8 9 B 3 17.2 10 B 4 13.8 11 B 5 10.4 12 B 6 6.87 13 C 1 24.2 14 C 2 20.8 15 C 3 17.1 16 C 4 13.8 17 C 5 10.3 18 C 6 6.74 5. Label the data ================= Use |wellmapr::load()| to associate the labels specified in the TOML file (e.g. the dilutions and replicates) with the experimental data (e.g. the :math:`C_q` values). This process has three steps: - Load a data frame containing the data (see above). - Load another data frame containing the labels. - Merge the two data frames. For the sake of clarity and completeness, we will first show how to perform these steps `manually <#manual-merge>`__. Practically, though, it's easier to let |wellmapr| perform them `automatically <#automatic-merge>`__. Manual merge ------------ Use the |wellmapr::load()| function to create a tibble_ containing the information from the TOML file. This data frame will have columns for each label we specified: *replicate*, *dilution*. It will also have six columns identifying the wells in different ways: *well*, *well0*, *row*, *col*, *row_i*, *col_j*. These columns are redundant, but this redundancy makes it easier to merge the labels with the data. For example, if the wells are named "A1,A2,..." in the data, the *well* column can be used for the merge. If the wells are named "A01,A02,...", the *well0* column can be used instead. If the wells are named in some non-standard way, the *row_i* and *col_j* columns can be used to calculate an appropriate merge column. .. code-block:: r > layout <- wellmapr::load("std_curve.toml") > layout well well0 row col row_i col_j replicate dilution 1 A1 A01 A 1 0 0 1 1e+05 2 A2 A02 A 2 0 1 1 1e+04 3 A3 A03 A 3 0 2 1 1e+03 4 A4 A04 A 4 0 3 1 1e+02 5 A5 A05 A 5 0 4 1 1e+01 6 A6 A06 A 6 0 5 1 1e+00 7 B1 B01 B 1 1 0 2 1e+05 8 B2 B02 B 2 1 1 2 1e+04 9 B3 B03 B 3 1 2 2 1e+03 10 B4 B04 B 4 1 3 2 1e+02 11 B5 B05 B 5 1 4 2 1e+01 12 B6 B06 B 6 1 5 2 1e+00 13 C1 C01 C 1 2 0 3 1e+05 14 C2 C02 C 2 2 1 3 1e+04 15 C3 C03 C 3 2 2 3 1e+03 16 C4 C04 C 4 2 3 3 1e+02 17 C5 C05 C 5 2 4 3 1e+01 18 C6 C06 C 6 2 5 3 1e+00 Use the `dplyr::inner_join()`_ function to associate the labels with the data. In this case, both data frames have columns named *row* and *col*, so those columns are automatically used for the merge (as indicated). It is also easy to merge using columns with different names; see the documentation on `dplyr::inner_join()`_ for more information. .. code-block:: pycon > inner_join(layout, data) Joining, by = c("row", "col") well well0 row col row_i col_j replicate dilution Cq 1 A1 A01 A 1 0 0 1 1e+05 24.180859 2 A2 A02 A 2 0 1 1 1e+04 20.740120 3 A3 A03 A 3 0 2 1 1e+03 17.183802 4 A4 A04 A 4 0 3 1 1e+02 13.774300 5 A5 A05 A 5 0 4 1 1e+01 10.294983 6 A6 A06 A 6 0 5 1 1e+00 6.967062 7 B1 B01 B 1 1 0 2 1e+05 24.157118 8 B2 B02 B 2 1 1 2 1e+04 20.779703 9 B3 B03 B 3 1 2 2 1e+03 17.171795 10 B4 B04 B 4 1 3 2 1e+02 13.768831 11 B5 B05 B 5 1 4 2 1e+01 10.362967 12 B6 B06 B 6 1 5 2 1e+00 6.870273 13 C1 C01 C 1 2 0 3 1e+05 24.238230 14 C2 C02 C 2 2 1 3 1e+04 20.787008 15 C3 C03 C 3 2 2 3 1e+03 17.147598 16 C4 C04 C 4 2 3 3 1e+02 13.779314 17 C5 C05 C 5 2 4 3 1e+01 10.292967 18 C6 C06 C 6 2 5 3 1e+00 6.735704 Automatic merge --------------- While it's good to understand how the labels are merged with the data, it's better to let |wellmapr| perform the merge for you. Not only is this more succinct, it also handles some tricky corner cases behind the scenes, e.g. layouts with multiple data files. To load *and* merge the data using |wellmapr::load()|, you need to provide the following arguments: - **data_loader**: A function that accepts a path to a file and returns a tibble_ containing the data from that file. Note that the function we wrote in the previous section fulfills these requirements. If the raw data are tidy to begin with, it is often possible to directly use `readr::read_csv()`_ or similar for this argument. - **merge_cols**: An indication of which columns to merge. In the snippet below, ``TRUE`` means to use any columns that are shared between the two data frames (e.g. that have the same name). You can also use a dictionary to be more explicit about which columns to merge on. Here we also provide the **path_guess** argument, which specifies that the experimental data can be found in a CSV file with the same base name as the layout. Note that this argument uses the syntax for string formatting in python, as described in the :doc:`API documentation <api_python>`. It also would've been possible to specify the path to the CSV directly from the TOML file (see `meta.path`), in which case this argument would've been unnecessary. .. code-block:: r > wellmapr::load( + "std_curve.toml", + data_loader = load_cq, + merge_cols = TRUE, + path_guess = "{0.stem}.csv", + ) well well0 row col row_i col_j path replicate dilution Cq 0 A1 A01 A 1 0 0 <environment: 0x56501964bc60> 1 1e+05 24.180859 1 A2 A02 A 2 0 1 <environment: 0x565019653a68> 1 1e+04 20.740120 2 A3 A03 A 3 0 2 <environment: 0x56501965d790> 1 1e+03 17.183802 3 A4 A04 A 4 0 3 <environment: 0x565019665598> 1 1e+02 13.774300 4 A5 A05 A 5 0 4 <environment: 0x56501966f2c0> 1 1e+01 10.294983 5 A6 A06 A 6 0 5 <environment: 0x565019673298> 1 1e+00 6.967062 6 B1 B01 B 1 1 0 <environment: 0x56501967b0a0> 2 1e+05 24.157118 7 B2 B02 B 2 1 1 <environment: 0x565019684dc8> 2 1e+04 20.779703 8 B3 B03 B 3 1 2 <environment: 0x56501968cbd0> 2 1e+03 17.171795 9 B4 B04 B 4 1 3 <environment: 0x5650196968f8> 2 1e+02 13.768831 10 B5 B05 B 5 1 4 <environment: 0x56501969e700> 2 1e+01 10.362967 11 B6 B06 B 6 1 5 <environment: 0x5650196a8428> 2 1e+00 6.870273 12 C1 C01 C 1 2 0 <environment: 0x5650196b0230> 3 1e+05 24.238230 13 C2 C02 C 2 2 1 <environment: 0x5650196b9f58> 3 1e+04 20.787008 14 C3 C03 C 3 2 2 <environment: 0x5650196c3c80> 3 1e+03 17.147598 15 C4 C04 C 4 2 3 <environment: 0x5650196cba88> 3 1e+02 13.779314 16 C5 C05 C 5 2 4 <environment: 0x5650196d57b0> 3 1e+01 10.292967 17 C6 C06 C 6 2 5 <environment: 0x5650196dd5b8> 3 1e+00 6.735704 6. Analyze the data =================== Analyze the data given the connection between the labels and the data. This step doesn't involve :mod:`wellmap`, but is included here for completeness. The example below makes a linear regression of the data in log-space: .. literalinclude:: basic_usage/std_curve.R :language: r :caption: :download:`std_curve.R <basic_usage/std_curve.R>` .. figure:: basic_usage/std_curve_r.svg .. _tidy: https://www.jstatsoft.org/article/view/v059i10 .. _reticulate: https://rstudio.github.io/reticulate/ .. _tibble: https://tibble.tidyverse.org/ .. _`reticulate::py_config()`: https://rstudio.github.io/reticulate/articles/versions.html .. _`dplyr::inner_join()`: https://dplyr.tidyverse.org/reference/join.html .. _`readr::read_csv()`: https://readr.tidyverse.org/reference/read_delim.html .. |wellmapr| replace:: :mod:`wellmapr <wellmap>` .. |wellmapr::show()| replace:: :func:`wellmapr::show() <wellmap.show>` .. |wellmapr::load()| replace:: :func:`wellmapr::load() <wellmap.load>`
PypiClean
/invenio-db-1.1.4.tar.gz/invenio-db-1.1.4/invenio_db/shared.py
from flask_sqlalchemy import SQLAlchemy as FlaskSQLAlchemy from sqlalchemy import MetaData, event, util from sqlalchemy.engine import Engine from sqlalchemy.sql import text from werkzeug.local import LocalProxy NAMING_CONVENTION = util.immutabledict( { "ix": "ix_%(column_0_label)s", "uq": "uq_%(table_name)s_%(column_0_name)s", "ck": "ck_%(table_name)s_%(constraint_name)s", "fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s", "pk": "pk_%(table_name)s", } ) """Configuration for constraint naming conventions.""" metadata = MetaData(naming_convention=NAMING_CONVENTION) """Default database metadata object holding associated schema constructs.""" class SQLAlchemy(FlaskSQLAlchemy): """Implement or overide extension methods.""" def apply_driver_hacks(self, app, sa_url, options): """Call before engine creation.""" # Don't forget to apply hacks defined on parent object. super(SQLAlchemy, self).apply_driver_hacks(app, sa_url, options) if sa_url.drivername == "sqlite": connect_args = options.setdefault("connect_args", {}) if "isolation_level" not in connect_args: # disable pysqlite's emitting of the BEGIN statement entirely. # also stops it from emitting COMMIT before any DDL. connect_args["isolation_level"] = None if not event.contains(Engine, "connect", do_sqlite_connect): event.listen(Engine, "connect", do_sqlite_connect) if not event.contains(Engine, "begin", do_sqlite_begin): event.listen(Engine, "begin", do_sqlite_begin) from sqlite3 import register_adapter def adapt_proxy(proxy): """Get current object and try to adapt it again.""" return proxy._get_current_object() register_adapter(LocalProxy, adapt_proxy) elif sa_url.drivername == "postgresql+psycopg2": # pragma: no cover from psycopg2.extensions import adapt, register_adapter def adapt_proxy(proxy): """Get current object and try to adapt it again.""" return adapt(proxy._get_current_object()) register_adapter(LocalProxy, adapt_proxy) elif sa_url.drivername == "mysql+pymysql": # pragma: no cover from pymysql import converters def escape_local_proxy(val, mapping): """Get current object and try to adapt it again.""" return converters.escape_item( val._get_current_object(), self.engine.dialect.encoding, mapping=mapping, ) converters.conversions[LocalProxy] = escape_local_proxy converters.encoders[LocalProxy] = escape_local_proxy return sa_url, options def do_sqlite_connect(dbapi_connection, connection_record): """Ensure SQLite checks foreign key constraints. For further details see "Foreign key support" sections on https://docs.sqlalchemy.org/en/latest/dialects/sqlite.html#foreign-key-support """ # Enable foreign key constraint checking cursor = dbapi_connection.cursor() cursor.execute(text("PRAGMA foreign_keys=ON")) cursor.close() def do_sqlite_begin(dbapi_connection): """Ensure SQLite transaction are started properly. For further details see "Foreign key support" sections on https://docs.sqlalchemy.org/en/rel_1_0/dialects/sqlite.html#pysqlite-serializable # noqa """ # emit our own BEGIN dbapi_connection.execute(text("BEGIN")) db = SQLAlchemy(metadata=metadata) """Shared database instance using Flask-SQLAlchemy extension. This object is initialized during initialization of ``InvenioDB`` extenstion that takes care about loading all entrypoints from key ``invenio_db.models``. """
PypiClean
/moosir_feature-1.4.35.tar.gz/moosir_feature-1.4.35/src/moosir_feature/transformers/feature_selections/feature_selector.py
import pandas as pd from sklearn.feature_selection import * from sklearn.ensemble import RandomForestRegressor RANDOM_FOREST_ESTIMATORS_N = 5 RANDOM_FOREST_DEPTH = 3 def remove_low_variance_features(features: pd.DataFrame): # todo: it is fixed, sensitive to scaling, ... # todo: how it works per feature? threshold = 0.01 sel = VarianceThreshold(threshold=threshold) _ = sel.fit_transform(features) return features[features.columns[sel.get_support()]] def remove_weak_linear(features: pd.DataFrame, targets: pd.DataFrame, percentile_to_keep=80): sp = SelectPercentile(f_regression, percentile=percentile_to_keep) sp.fit_transform(features, targets) return features[features.columns[sp.get_support()]] def remove_features_recursively(features: pd.DataFrame, targets: pd.DataFrame, n_features_to_select: int): estimator = RandomForestRegressor(n_estimators=RANDOM_FOREST_ESTIMATORS_N, max_depth=RANDOM_FOREST_DEPTH) rfe = RFE(estimator=estimator, n_features_to_select=n_features_to_select, step=1) rfe.fit(features.values, targets.values.ravel()) return features[features.columns[rfe.get_support()]] def remove_features_using_model(features: pd.DataFrame, targets: pd.DataFrame): estimator = RandomForestRegressor(n_estimators=RANDOM_FOREST_ESTIMATORS_N, max_depth=RANDOM_FOREST_DEPTH) sfm = SelectFromModel(estimator=estimator, threshold=0) sfm.fit(features.values, targets.values.ravel()) # importance return features[features.columns[sfm.get_support()]] def remove_features_sequentially(features: pd.DataFrame, targets: pd.DataFrame, n_features_to_select: int): estimator = RandomForestRegressor(n_estimators=RANDOM_FOREST_ESTIMATORS_N, max_depth=RANDOM_FOREST_DEPTH) seqfm = SequentialFeatureSelector(estimator=estimator, n_features_to_select=n_features_to_select, direction="forward") seqfm.fit(features.values, targets.values.ravel()) return features[features.columns[seqfm.get_support()]]
PypiClean
/PyRATA-0.4.1.tar.gz/PyRATA-0.4.1/CHANGES.md
Revision History ================ v0.4.1 --------------------- * demo implementation of phrase-extraction.py (Justeson and Katz 1995) and (Handel et al 2016) * demo implementation of sentiment-analysis.py: Illustration of implementation of some constraints present in (Hutto et al. 2014), evaluation with nltk movie_reviews corpus * api/engine implementation of match and fullmatch methods + some tests * api/engine implementation of pos and endpos arguments for all matching methods + some tests * grammar implementation of backslash in constraint value * enhancement of pyrata_re.py wi data path parameter, match and fullmatch methods, nlp with lc (lowercase) feature, lexicons parameter * fix+change the behavior of re methods when pos was lower than 0 or endpos parameter was greater than len(data), realign on O or len(data) instead of returning None * fix re finditer method when data was empty, a variable was not correctly initialized * fix nfa last_state_id computation for such a case '(a~"A|B"+) (b="B")' "[{'c':'C'}, {'a':'A'}, {'a':'A', 'b':'B'}, {'d':'D'}]" the id was a matching state but not mandatory a final state #M * fix re and nfa compile to pass lexicons argument in the CompiledPattern.compile() method * fix the `ImportError: No module named 'graph_tool'` issue by specifying the graph_tool installation procedure. * improvement of the separation of graph_tool from the business code (in pyrata_re.py and nfa.py) * doc user-guide grammar clarification about the change in 0.3.3 (negative constraint are for now only allowed in class elements) * enhancement of the time processing by a factor of 4: implementation of a dedicated deepcopy method instead of the one from copy module * fix bug due to sympy behavior when processing pattern token with colon characters in the value field * code refactoring by separating compiled_pattern from nfa * user-guide enhancements v0.4 (October 12, 2017) --------------------- * api/engine pattern parser and search engine replaced by Thompson NFA implementation of Guan Gui * api/engine extension of the finding methods with the possibility to set a greedy or a reluctant matching mode * grammar extension of the language to consider '.' as wildcard * ihm creation of a command line script pyrata_re.py with PDF drawing facility to export NFA * api/engine DFA extraction facility corresponding to matched parts of NFA on data * quality revise do_tests.py code by using the unittest library + tests extension * quality do_benchmark.py on simple noun phrases + includes comparison with spaCy * fix data immutability in nfa annotate (extend...) which works on a data copy. Switching 'data_copy = list(data)' with 'data_copy = copy.deepcopy(data)' * fix re extend fix action from extend to 'extend' with quotes * doc user-guide revised: grammar modification with wildcard, maching mode exploration (global, greedy, reluctant), pyrata_re, DFA generation, pdf export, logging, time performance * logging facilities partially maintained v0.3.3/v0.3.4 (July 25, 2017) ; latter one wo logging instructions to run faster --------------------- * implement annotation methods (annotate, sub, update, extend) for working with a compiled pattern (see compiled_pattern_re) * code refactoring to increase time performance: removing the semantic_step_parser and replacing it by compiling the step tokens into symbolic expressions (use the sympy module) evaluated on fly for each data token (changes in syntactic_pattern_parser and in evaluate of semantic_pattern_parser) * grammar does not accept anymore negative pattern step. '!pos="NNS"+' should be rewritten '[!pos="NNS"]+' * code refactoring to increase time performance: releasing a pip version without I/O logging and a verbose git version with scripts to generate the opimized pip version * code refactoring to increase time performance: substituting string concat to format * added code to benchmark * revised user-guide (logging, time performance and grammar modification) * removed old logging mechanism (verbosity argument) from the main code * fixed logging issues (output syntactic parsing problem and removing old verbosity facility) * fixed minor bug when falling in the b+b case (plus quantifier) of semantic_pattern_parser when finding all occurrences of '(pos="DT"? pos="JJ"*)+ pos="NN"' in the brown corpus v0.3.2 (April 22, 2017) --------------------- * added the chunk operator (as a re rewritting rule) * revised user-guide (group, alternative, chunk, ^$, compiled sections) * added more tests v0.3.1 (April 17, 2017) --------------------- * first upload on pypi server * adoption of a pattern compiled tree structure to represent sequences of quantified steps, quantified groups and alternatives * syntactic_pattern_parser supplies a pattern compiled tree structure * semantic_analysis consumes a pattern compiled tree structure * using logging module for syntactic_pattern_parser and semantic_analysis * renamed semantic_analysis into semantic_pattern_parser * renamed syntactic_analysis.py into compiled_pattern_re * added the chunk operator (as a re rewritting rule) * deprecated verbosity argument v0.3.0 (April 14, 2017) --------------------- * File creation of CHANGES.md * File creation of doc/user-guide.rst * Management of ^ and $ symbols in grammar parsing and pattern engine * Management of | (alternative sequence of steps) with quantifiers in grammar parsing and pattern engine * api/engine syntactic_pattern_parser code refactoring to define two methods for getting the position in lexdata and the other for getting the refered form of the pattern step (previously done via p.lexer.patternStep and setPatternStep)
PypiClean
/mindspore_ascend-1.10.0-cp39-none-any.whl/mindspore/_akg/akg/topi/x86/tensor_intrin.py
"""Core kernel of dot product of 4 Int8 operations""" #pylint: disable=invalid-name import tvm def dot_16x1x16_uint8_int8_int32(): """Dispatch the most optimized intrin depending on the target""" mcpu = tvm.target.current_target().mcpu assert mcpu in ("skylake-avx512", "cascadelake"), \ "An old Intel machine that does not have fast Int8 support." if mcpu == "skylake-avx512": return dot_16x1x16_uint8_int8_int32_skylake() # cascadelake return dot_16x1x16_uint8_int8_int32_cascadelake() def dot_16x1x16_uint8_int8_int32_skylake(): """ Int8 dot product by every 4 elements using AVX512 Skylake instructions. This function takes two arrays of uint8 and int8 datatype -- data[4] and kernel[16][4] -- and computes a dot product of data[4] with every 4 elements of kernels, resulting in output[16] of int32 datatype. The pseudo code is as follows. .. code-block:: c void dot_16x1x16_uint8_int8_int32(uint8 data[4], int8 kernel[16][4], int32 output[16]){ for (int i = 0; i < 16; i++){ output[i] = 0; for (int k = 0; k < 4; k++){ output[i] += data[k] * kernel[i][k] } } } Physically, the kernel array sits in an AVX512 vector register and the data[4] is broadcasted to another AVX512 vector register. This function returns a TensorIntrin that can be used to tensorize a schedule. Returns ------- intrin : TensorIntrin The Skylake int8 TensorIntrin that can be used in tensorizing schedule """ int32_lanes = 16 # 16 int32 lanes in AVX512 num_int8_elements = 4 # 4 int8 elements in int32 data = tvm.placeholder((num_int8_elements,), dtype='uint8', name='data') kernel = tvm.placeholder((int32_lanes, num_int8_elements), dtype='int8', name='kernel') k = tvm.reduce_axis((0, num_int8_elements), name='k') C = tvm.compute((int32_lanes,), lambda i: tvm.sum(data[k].astype('int32') * kernel[i, k].astype('int32'), axis=k), name="C") a_buffer = tvm.decl_buffer(data.shape, dtype='uint8', name="a_buffer", offset_factor=1, strides=[1]) b_buffer = tvm.decl_buffer(kernel.shape, dtype='int8', name="b_buffer", offset_factor=1, strides=[tvm.var('ldw'), 1]) def _intrin_func(ins, outs): def _instr(index): ib = tvm.ir_builder.create() if index == 1: ib.emit(outs[0].vstore(0, tvm.const(0, 'int32x16'))) return ib.get() a_int8 = ins[0].vload([0], "uint8x4") re_int32 = tvm.call_pure_intrin('int32', 'reinterpret', a_int8) vec_ai32 = re_int32.astype('int32x16') vec_a = tvm.call_pure_intrin('int8x64', 'reinterpret', vec_ai32) vec_b = ins[1].vload([0, 0], "int8x64") vec_one = tvm.const(1, "int16x32") pair_reduction = tvm.call_llvm_intrin('int16x32', 'llvm.x86.avx512.pmaddubs.w.512', tvm.const(0, 'uint32'), vec_a, vec_b) quad_reduction = tvm.call_llvm_intrin('int32x16', 'llvm.x86.avx512.pmaddw.d.512', tvm.const(0, 'uint32'), pair_reduction, vec_one) if index == 0: ib.emit(outs[0].vstore(0, quad_reduction)) else: ib.emit(outs[0].vstore(0, quad_reduction + outs[0].vload([0], 'int32x16'))) return ib.get() # body, reset, update return _instr(0), _instr(1), _instr(2) with tvm.build_config(offset_factor=1, partition_const_loop=True): return tvm.decl_tensor_intrin(C.op, _intrin_func, binds={data:a_buffer, kernel:b_buffer}) def dot_16x1x16_uint8_int8_int16(): """ Int8 dot product by every 2 elements using AVX512 Skylake instructions. This function takes two arrays of uint8 and int8 datatype -- data[2] and kernel[4][32][2] -- and computes a dot product of data[2] with every 2 elements of kernels, resulting in output[4][32] of int16 datatype. The pseudo code is as follows. .. code-block:: c void dot_16x1x16_uint8_int8_int16(uint8 data[2], int8 kernel[32*4][2], int16 output[32*4]){ for (int i = 0; i< 4; i++){ for (int j = 0; j < 32; j++){ output[i][i] = 0; for (int k = 0; k < 2; k++){ output[i][j][k] += data[k] * kernel[i][j][k] } } } } Physically, the kernel array sits in four AVX512 vector registers and the data[2] is broadcasted to another AVX512 vector register. This function returns a TensorIntrin that can be used to tensorize a schedule. Returns ------- intrin : TensorIntrin The Skylake int8 TensorIntrin that can be used in tensorizing schedule """ int16_lanes = 4*32 # 4*32 int32 lanes in 4 AVX512 vector registers num_int8_elements = 2 # 2 int8 elements in int16 data = tvm.placeholder((num_int8_elements,), dtype='uint8', name='data') kernel = tvm.placeholder((int16_lanes, num_int8_elements), dtype='int8', name='kernel') k = tvm.reduce_axis((0, num_int8_elements), name='k') C = tvm.compute((int16_lanes, ), lambda i: tvm.sum(data[k].astype('int16') * kernel[i, k].astype('int16'), axis=k), name="C") a_buffer = tvm.decl_buffer(data.shape, dtype='uint8', name="a_buffer", offset_factor=1, strides=[1]) b_buffer = tvm.decl_buffer(kernel.shape, dtype='int8', name="b_buffer", offset_factor=1) # strides=[tvm.var('ldw'), 1, 1]) def _intrin_func(ins, outs): def _instr(index): ib = tvm.ir_builder.create() if index == 1: for i in range(4): ib.emit(outs[0].vstore([i*32], tvm.const(0, 'int16x32'))) return ib.get() a_int8 = ins[0].vload([0], "uint8x2") re_int16 = tvm.call_pure_intrin('int16', 'reinterpret', a_int8) vec_ai16 = re_int16.astype('int16x32') vec_a = tvm.call_pure_intrin('int8x64', 'reinterpret', vec_ai16) for i in range(4): vec_b = ins[1].vload([i*32, 0], "int8x64") pair_reduction = tvm.call_llvm_intrin('int16x32', 'llvm.x86.avx512.pmaddubs.w.512', tvm.const(0, 'uint32'), vec_a, vec_b) if index == 0: ib.emit(outs[0].vstore([i*32], pair_reduction)) else: ib.emit(outs[0].vstore([i*32], pair_reduction + outs[0].vload([i*32], 'int16x32'))) return ib.get() # body, reset, update return _instr(0), _instr(1), _instr(2) with tvm.build_config(offset_factor=1, partition_const_loop=True): return tvm.decl_tensor_intrin(C.op, _intrin_func, binds={data:a_buffer, kernel:b_buffer}) def dot_16x1x16_uint8_int8_int32_cascadelake(): """ Int8 dot product by every 4 elements using AVX512VNNI Cascade Lake instructions. This function takes two arrays of uint8 and int8 datatype -- data[4] and kernel[16][4] -- and computes a dot product of data[4] with every 4 elements of kernels, resulting in output[16] of int32 datatype. The pseudo code is as follows. .. code-block:: c void dot_16x1x16_uint8_int8_int32_cascadelake(uint8 data[4], int8 kernel[16][4], int32 output[16]){ for (int i = 0; i < 16; i++){ output[i] = 0; for (int k = 0; k < 4; k++){ output[i] += data[k] * kernel[i][k] } } } Physically, the kernel array sits in an AVX512 vector register and the data[4] is broadcasted to another AVX512 vector register. This function returns a TensorIntrin that can be used to tensorize a schedule. Returns ------- intrin : TensorIntrin The Cascade Lake int8 TensorIntrin that can be used in tensorizing schedule """ int32_lanes = 16 # 16 int32 lanes in AVX512 num_int8_elements = 4 # 4 int8 elements in int32 data = tvm.placeholder((num_int8_elements,), dtype='uint8', name='data') kernel = tvm.placeholder((int32_lanes, num_int8_elements), dtype='int8', name='kernel') k = tvm.reduce_axis((0, num_int8_elements), name='k') C = tvm.compute((int32_lanes,), lambda i: tvm.sum(data[k].astype('int32') * kernel[i, k].astype('int32'), axis=k), name="C") a_buffer = tvm.decl_buffer(data.shape, dtype='uint8', name="a_buffer", offset_factor=1, strides=[1]) b_buffer = tvm.decl_buffer(kernel.shape, dtype='int8', name="b_buffer", offset_factor=1, strides=[tvm.var('ldw'), 1]) def _intrin_func(ins, outs): def _instr(index): ib = tvm.ir_builder.create() if index == 1: ib.emit(outs[0].vstore(0, tvm.const(0, 'int32x16'))) return ib.get() a_int8 = ins[0].vload([0], "uint8x4") re_int32 = tvm.call_pure_intrin('int32', 'reinterpret', a_int8) vec_ai32 = re_int32.astype('int32x16') vec_b = ins[1].vload([0, 0], "int8x64") vnni_inst_name = 'llvm.x86.avx512.vpdpbusd.512' llvm_id = tvm.codegen.llvm_lookup_intrinsic_id(vnni_inst_name) if llvm_id != 0: # VNNI is available for current LLVM version vec_bi32 = tvm.call_pure_intrin('int32x16', 'reinterpret', vec_b) vec_zero = tvm.const(0, "int32x16") quad_reduction = tvm.call_llvm_intrin('int32x16', 'llvm.x86.avx512.vpdpbusd.512', tvm.const(0, 'uint32'), vec_zero, vec_ai32, vec_bi32) else: # Fall back to the normal AVX512 vec_a = tvm.call_pure_intrin('int8x64', 'reinterpret', vec_ai32) vec_one = tvm.const(1, "int16x32") pair_reduction = tvm.call_llvm_intrin('int16x32', 'llvm.x86.avx512.pmaddubs.w.512', tvm.const(0, 'uint32'), vec_a, vec_b) quad_reduction = tvm.call_llvm_intrin('int32x16', 'llvm.x86.avx512.pmaddw.d.512', tvm.const(0, 'uint32'), pair_reduction, vec_one) if index == 0: ib.emit(outs[0].vstore(0, quad_reduction)) else: ib.emit(outs[0].vstore(0, quad_reduction + outs[0].vload([0], 'int32x16'))) return ib.get() # body, reset, update return _instr(0), _instr(1), _instr(2) with tvm.build_config(offset_factor=1, partition_const_loop=True): return tvm.decl_tensor_intrin(C.op, _intrin_func, binds={data:a_buffer, kernel:b_buffer})
PypiClean
/comk_django_plugin-1.4.4.tar.gz/comk_django_plugin-1.4.4/comk_django_plugin/utils/GeneralMethods.py
import json from django.http import JsonResponse def general_resolve_request_data(request): ''' 解析 request 的请求数据为 dict :param request: :return: ''' request_data = dict() try: request_data.update(json.loads(request.body)) except: pass request_data.update(request.GET.dict()) request_data.update(request.POST.dict()) return request_data def general_resolve_request(request, data_len_filter=False, data_max_len=0): ''' 解析 request 的所有参数为 str :param request: :return: ''' return_L = [] return_L.append(request.META.get('REMOTE_ADDR')) return_L.append(request.scheme) return_L.append(request.get_host()) return_L.append(request.path) return_L.append(request.method) if hasattr(request, 'user') and request.user.is_authenticated(): user_key = str(request.user.username) else: user_key = 'AnonymousUser' return_L.append(user_key) # 返回请求数据 req_data = str(general_resolve_request_data(request)) # 请求数据过滤 if req_data and data_len_filter and len(req_data) >= data_max_len: req_data = '请求数据过长,在此忽略' return_L.append(req_data) return ' -- '.join(return_L) def general_resolve_response_data(response): ''' 解析 response 的返回数据为 dict :param request: :return: ''' data = {} status_code = str(response.status_code) if status_code.startswith('2'): if isinstance(response, JsonResponse): data = json.loads(response.content) # elif isinstance(response, HttpResponse): # data = response.content.decode('utf-8') return data def general_resolve_response(response, data_len_filter=False, data_max_len=0): ''' 解析 response 的所有参数为 str :param response: :return: ''' return_L = [] status_code = str(response.status_code) return_L.append(status_code) # 获取返回数据 resp_data = str(general_resolve_response_data(response)) # 返回数据过滤 if resp_data and data_len_filter and len(resp_data) >= data_max_len: resp_data = '返回数据过长,在此忽略' return_L.append(resp_data) return ' -- '.join(return_L) def merge_dicts(dict_one: dict, dict_two: dict): ''' 深度合并两个dict :param dict_one: :param dict_two: :return: ''' one_keys = dict_one.keys() tow_keys = dict_two.keys() for tow_key in tow_keys: if tow_key in one_keys: one_value = dict_one.get(tow_key) two_value = dict_two.get(tow_key) if isinstance(one_value, dict) and isinstance(two_value, dict): merge_dicts(one_value, two_value) else: dict_one[tow_key] = dict_two.get(tow_key) return dict_one
PypiClean
/h2o_pysparkling_3.4-3.42.0.2.post1.tar.gz/h2o_pysparkling_3.4-3.42.0.2.post1/h2o/model/models/anomaly_detection.py
from h2o.model import ModelBase from h2o.utils.shared_utils import can_use_pandas class H2OAnomalyDetectionModel(ModelBase): def varsplits(self, use_pandas=False): """ Retrieve per-variable split information for a given Isolation Forest model. Output will include: - count The number of times a variable was used to make a split. - aggregated_split_ratios The split ratio is defined as ``abs(#left_observations - #right_observations) / #before_split``. Even splits (``#left_observations`` approx the same as ``#right_observations``) contribute less to the total aggregated split ratio value for the given feature; highly imbalanced splits (eg. ``#left_observations >> #right_observations``) contribute more. - aggregated_split_depths The sum of all depths of a variable used to make a split. (If a variable is used on level N of a tree, then it contributes with N to the total aggregate.) :param use_pandas: If ``True``, then the variable splits will be returned as a Pandas data frame. :returns: A list or Pandas DataFrame. :examples: >>> from h2o.estimators import H2OIsolationForestEstimator >>> h2o_df = h2o.import_file("https://raw.github.com/h2oai/h2o/master/smalldata/logreg/prostate.csv") >>> train,test = h2o_df.split_frame(ratios=[0.75]) >>> model = H2OIsolationForestEstimator(sample_rate = 0.1, ... max_depth = 20, ... ntrees = 50) >>> model.train(training_frame=train) >>> model.varsplits() """ model = self._model_json["output"] if "variable_splits" in list(model.keys()) and model["variable_splits"]: vals = model["variable_splits"].cell_values header = model["variable_splits"].col_header if use_pandas and can_use_pandas(): import pandas return pandas.DataFrame(vals, columns=header) else: return vals else: print("Warning: This model doesn't provide variable split information")
PypiClean
/Skailar-framework-5.0.tar.gz/Skailar-framework-5.0/skailar/urls/resolvers.py
import functools import inspect import re import string from importlib import import_module from pickle import PicklingError from urllib.parse import quote from asgiref.local import Local from skailar.conf import settings from skailar.core.checks import Error, Warning from skailar.core.checks.urls import check_resolver from skailar.core.exceptions import ImproperlyConfigured, ViewDoesNotExist from skailar.utils.datastructures import MultiValueDict from skailar.utils.functional import cached_property from skailar.utils.http import RFC3986_SUBDELIMS, escape_leading_slashes from skailar.utils.regex_helper import _lazy_re_compile, normalize from skailar.utils.translation import get_language from .converters import get_converter from .exceptions import NoReverseMatch, Resolver404 from .utils import get_callable class ResolverMatch: def __init__( self, func, args, kwargs, url_name=None, app_names=None, namespaces=None, route=None, tried=None, captured_kwargs=None, extra_kwargs=None, ): self.func = func self.args = args self.kwargs = kwargs self.url_name = url_name self.route = route self.tried = tried self.captured_kwargs = captured_kwargs self.extra_kwargs = extra_kwargs # If a URLRegexResolver doesn't have a namespace or app_name, it passes # in an empty value. self.app_names = [x for x in app_names if x] if app_names else [] self.app_name = ":".join(self.app_names) self.namespaces = [x for x in namespaces if x] if namespaces else [] self.namespace = ":".join(self.namespaces) if hasattr(func, "view_class"): func = func.view_class if not hasattr(func, "__name__"): # A class-based view self._func_path = func.__class__.__module__ + "." + func.__class__.__name__ else: # A function-based view self._func_path = func.__module__ + "." + func.__name__ view_path = url_name or self._func_path self.view_name = ":".join(self.namespaces + [view_path]) def __getitem__(self, index): return (self.func, self.args, self.kwargs)[index] def __repr__(self): if isinstance(self.func, functools.partial): func = repr(self.func) else: func = self._func_path return ( "ResolverMatch(func=%s, args=%r, kwargs=%r, url_name=%r, " "app_names=%r, namespaces=%r, route=%r%s%s)" % ( func, self.args, self.kwargs, self.url_name, self.app_names, self.namespaces, self.route, f", captured_kwargs={self.captured_kwargs!r}" if self.captured_kwargs else "", f", extra_kwargs={self.extra_kwargs!r}" if self.extra_kwargs else "", ) ) def __reduce_ex__(self, protocol): raise PicklingError(f"Cannot pickle {self.__class__.__qualname__}.") def get_resolver(urlconf=None): if urlconf is None: urlconf = settings.ROOT_URLCONF return _get_cached_resolver(urlconf) @functools.cache def _get_cached_resolver(urlconf=None): return URLResolver(RegexPattern(r"^/"), urlconf) @functools.cache def get_ns_resolver(ns_pattern, resolver, converters): # Build a namespaced resolver for the given parent URLconf pattern. # This makes it possible to have captured parameters in the parent # URLconf pattern. pattern = RegexPattern(ns_pattern) pattern.converters = dict(converters) ns_resolver = URLResolver(pattern, resolver.url_patterns) return URLResolver(RegexPattern(r"^/"), [ns_resolver]) class LocaleRegexDescriptor: def __init__(self, attr): self.attr = attr def __get__(self, instance, cls=None): """ Return a compiled regular expression based on the active language. """ if instance is None: return self # As a performance optimization, if the given regex string is a regular # string (not a lazily-translated string proxy), compile it once and # avoid per-language compilation. pattern = getattr(instance, self.attr) if isinstance(pattern, str): instance.__dict__["regex"] = instance._compile(pattern) return instance.__dict__["regex"] language_code = get_language() if language_code not in instance._regex_dict: instance._regex_dict[language_code] = instance._compile(str(pattern)) return instance._regex_dict[language_code] class CheckURLMixin: def describe(self): """ Format the URL pattern for display in warning messages. """ description = "'{}'".format(self) if self.name: description += " [name='{}']".format(self.name) return description def _check_pattern_startswith_slash(self): """ Check that the pattern does not begin with a forward slash. """ regex_pattern = self.regex.pattern if not settings.APPEND_SLASH: # Skip check as it can be useful to start a URL pattern with a slash # when APPEND_SLASH=False. return [] if regex_pattern.startswith(("/", "^/", "^\\/")) and not regex_pattern.endswith( "/" ): warning = Warning( "Your URL pattern {} has a route beginning with a '/'. Remove this " "slash as it is unnecessary. If this pattern is targeted in an " "include(), ensure the include() pattern has a trailing '/'.".format( self.describe() ), id="urls.W002", ) return [warning] else: return [] class RegexPattern(CheckURLMixin): regex = LocaleRegexDescriptor("_regex") def __init__(self, regex, name=None, is_endpoint=False): self._regex = regex self._regex_dict = {} self._is_endpoint = is_endpoint self.name = name self.converters = {} def match(self, path): match = ( self.regex.fullmatch(path) if self._is_endpoint and self.regex.pattern.endswith("$") else self.regex.search(path) ) if match: # If there are any named groups, use those as kwargs, ignoring # non-named groups. Otherwise, pass all non-named arguments as # positional arguments. kwargs = match.groupdict() args = () if kwargs else match.groups() kwargs = {k: v for k, v in kwargs.items() if v is not None} return path[match.end() :], args, kwargs return None def check(self): warnings = [] warnings.extend(self._check_pattern_startswith_slash()) if not self._is_endpoint: warnings.extend(self._check_include_trailing_dollar()) return warnings def _check_include_trailing_dollar(self): regex_pattern = self.regex.pattern if regex_pattern.endswith("$") and not regex_pattern.endswith(r"\$"): return [ Warning( "Your URL pattern {} uses include with a route ending with a '$'. " "Remove the dollar from the route to avoid problems including " "URLs.".format(self.describe()), id="urls.W001", ) ] else: return [] def _compile(self, regex): """Compile and return the given regular expression.""" try: return re.compile(regex) except re.error as e: raise ImproperlyConfigured( '"%s" is not a valid regular expression: %s' % (regex, e) ) from e def __str__(self): return str(self._regex) _PATH_PARAMETER_COMPONENT_RE = _lazy_re_compile( r"<(?:(?P<converter>[^>:]+):)?(?P<parameter>[^>]+)>" ) def _route_to_regex(route, is_endpoint=False): """ Convert a path pattern into a regular expression. Return the regular expression and a dictionary mapping the capture names to the converters. For example, 'foo/<int:pk>' returns '^foo\\/(?P<pk>[0-9]+)' and {'pk': <skailar.urls.converters.IntConverter>}. """ original_route = route parts = ["^"] converters = {} while True: match = _PATH_PARAMETER_COMPONENT_RE.search(route) if not match: parts.append(re.escape(route)) break elif not set(match.group()).isdisjoint(string.whitespace): raise ImproperlyConfigured( "URL route '%s' cannot contain whitespace in angle brackets " "<…>." % original_route ) parts.append(re.escape(route[: match.start()])) route = route[match.end() :] parameter = match["parameter"] if not parameter.isidentifier(): raise ImproperlyConfigured( "URL route '%s' uses parameter name %r which isn't a valid " "Python identifier." % (original_route, parameter) ) raw_converter = match["converter"] if raw_converter is None: # If a converter isn't specified, the default is `str`. raw_converter = "str" try: converter = get_converter(raw_converter) except KeyError as e: raise ImproperlyConfigured( "URL route %r uses invalid converter %r." % (original_route, raw_converter) ) from e converters[parameter] = converter parts.append("(?P<" + parameter + ">" + converter.regex + ")") if is_endpoint: parts.append(r"\Z") return "".join(parts), converters class RoutePattern(CheckURLMixin): regex = LocaleRegexDescriptor("_route") def __init__(self, route, name=None, is_endpoint=False): self._route = route self._regex_dict = {} self._is_endpoint = is_endpoint self.name = name self.converters = _route_to_regex(str(route), is_endpoint)[1] def match(self, path): match = self.regex.search(path) if match: # RoutePattern doesn't allow non-named groups so args are ignored. kwargs = match.groupdict() for key, value in kwargs.items(): converter = self.converters[key] try: kwargs[key] = converter.to_python(value) except ValueError: return None return path[match.end() :], (), kwargs return None def check(self): warnings = self._check_pattern_startswith_slash() route = self._route if "(?P<" in route or route.startswith("^") or route.endswith("$"): warnings.append( Warning( "Your URL pattern {} has a route that contains '(?P<', begins " "with a '^', or ends with a '$'. This was likely an oversight " "when migrating to skailar.urls.path().".format(self.describe()), id="2_0.W001", ) ) return warnings def _compile(self, route): return re.compile(_route_to_regex(route, self._is_endpoint)[0]) def __str__(self): return str(self._route) class LocalePrefixPattern: def __init__(self, prefix_default_language=True): self.prefix_default_language = prefix_default_language self.converters = {} @property def regex(self): # This is only used by reverse() and cached in _reverse_dict. return re.compile(re.escape(self.language_prefix)) @property def language_prefix(self): language_code = get_language() or settings.LANGUAGE_CODE if language_code == settings.LANGUAGE_CODE and not self.prefix_default_language: return "" else: return "%s/" % language_code def match(self, path): language_prefix = self.language_prefix if path.startswith(language_prefix): return path.removeprefix(language_prefix), (), {} return None def check(self): return [] def describe(self): return "'{}'".format(self) def __str__(self): return self.language_prefix class URLPattern: def __init__(self, pattern, callback, default_args=None, name=None): self.pattern = pattern self.callback = callback # the view self.default_args = default_args or {} self.name = name def __repr__(self): return "<%s %s>" % (self.__class__.__name__, self.pattern.describe()) def check(self): warnings = self._check_pattern_name() warnings.extend(self.pattern.check()) warnings.extend(self._check_callback()) return warnings def _check_pattern_name(self): """ Check that the pattern name does not contain a colon. """ if self.pattern.name is not None and ":" in self.pattern.name: warning = Warning( "Your URL pattern {} has a name including a ':'. Remove the colon, to " "avoid ambiguous namespace references.".format(self.pattern.describe()), id="urls.W003", ) return [warning] else: return [] def _check_callback(self): from skailar.views import View view = self.callback if inspect.isclass(view) and issubclass(view, View): return [ Error( "Your URL pattern %s has an invalid view, pass %s.as_view() " "instead of %s." % ( self.pattern.describe(), view.__name__, view.__name__, ), id="urls.E009", ) ] return [] def resolve(self, path): match = self.pattern.match(path) if match: new_path, args, captured_kwargs = match # Pass any default args as **kwargs. kwargs = {**captured_kwargs, **self.default_args} return ResolverMatch( self.callback, args, kwargs, self.pattern.name, route=str(self.pattern), captured_kwargs=captured_kwargs, extra_kwargs=self.default_args, ) @cached_property def lookup_str(self): """ A string that identifies the view (e.g. 'path.to.view_function' or 'path.to.ClassBasedView'). """ callback = self.callback if isinstance(callback, functools.partial): callback = callback.func if hasattr(callback, "view_class"): callback = callback.view_class elif not hasattr(callback, "__name__"): return callback.__module__ + "." + callback.__class__.__name__ return callback.__module__ + "." + callback.__qualname__ class URLResolver: def __init__( self, pattern, urlconf_name, default_kwargs=None, app_name=None, namespace=None ): self.pattern = pattern # urlconf_name is the dotted Python path to the module defining # urlpatterns. It may also be an object with an urlpatterns attribute # or urlpatterns itself. self.urlconf_name = urlconf_name self.callback = None self.default_kwargs = default_kwargs or {} self.namespace = namespace self.app_name = app_name self._reverse_dict = {} self._namespace_dict = {} self._app_dict = {} # set of dotted paths to all functions and classes that are used in # urlpatterns self._callback_strs = set() self._populated = False self._local = Local() def __repr__(self): if isinstance(self.urlconf_name, list) and self.urlconf_name: # Don't bother to output the whole list, it can be huge urlconf_repr = "<%s list>" % self.urlconf_name[0].__class__.__name__ else: urlconf_repr = repr(self.urlconf_name) return "<%s %s (%s:%s) %s>" % ( self.__class__.__name__, urlconf_repr, self.app_name, self.namespace, self.pattern.describe(), ) def check(self): messages = [] for pattern in self.url_patterns: messages.extend(check_resolver(pattern)) messages.extend(self._check_custom_error_handlers()) return messages or self.pattern.check() def _check_custom_error_handlers(self): messages = [] # All handlers take (request, exception) arguments except handler500 # which takes (request). for status_code, num_parameters in [(400, 2), (403, 2), (404, 2), (500, 1)]: try: handler = self.resolve_error_handler(status_code) except (ImportError, ViewDoesNotExist) as e: path = getattr(self.urlconf_module, "handler%s" % status_code) msg = ( "The custom handler{status_code} view '{path}' could not be " "imported." ).format(status_code=status_code, path=path) messages.append(Error(msg, hint=str(e), id="urls.E008")) continue signature = inspect.signature(handler) args = [None] * num_parameters try: signature.bind(*args) except TypeError: msg = ( "The custom handler{status_code} view '{path}' does not " "take the correct number of arguments ({args})." ).format( status_code=status_code, path=handler.__module__ + "." + handler.__qualname__, args="request, exception" if num_parameters == 2 else "request", ) messages.append(Error(msg, id="urls.E007")) return messages def _populate(self): # Short-circuit if called recursively in this thread to prevent # infinite recursion. Concurrent threads may call this at the same # time and will need to continue, so set 'populating' on a # thread-local variable. if getattr(self._local, "populating", False): return try: self._local.populating = True lookups = MultiValueDict() namespaces = {} apps = {} language_code = get_language() for url_pattern in reversed(self.url_patterns): p_pattern = url_pattern.pattern.regex.pattern p_pattern = p_pattern.removeprefix("^") if isinstance(url_pattern, URLPattern): self._callback_strs.add(url_pattern.lookup_str) bits = normalize(url_pattern.pattern.regex.pattern) lookups.appendlist( url_pattern.callback, ( bits, p_pattern, url_pattern.default_args, url_pattern.pattern.converters, ), ) if url_pattern.name is not None: lookups.appendlist( url_pattern.name, ( bits, p_pattern, url_pattern.default_args, url_pattern.pattern.converters, ), ) else: # url_pattern is a URLResolver. url_pattern._populate() if url_pattern.app_name: apps.setdefault(url_pattern.app_name, []).append( url_pattern.namespace ) namespaces[url_pattern.namespace] = (p_pattern, url_pattern) else: for name in url_pattern.reverse_dict: for ( matches, pat, defaults, converters, ) in url_pattern.reverse_dict.getlist(name): new_matches = normalize(p_pattern + pat) lookups.appendlist( name, ( new_matches, p_pattern + pat, {**defaults, **url_pattern.default_kwargs}, { **self.pattern.converters, **url_pattern.pattern.converters, **converters, }, ), ) for namespace, ( prefix, sub_pattern, ) in url_pattern.namespace_dict.items(): current_converters = url_pattern.pattern.converters sub_pattern.pattern.converters.update(current_converters) namespaces[namespace] = (p_pattern + prefix, sub_pattern) for app_name, namespace_list in url_pattern.app_dict.items(): apps.setdefault(app_name, []).extend(namespace_list) self._callback_strs.update(url_pattern._callback_strs) self._namespace_dict[language_code] = namespaces self._app_dict[language_code] = apps self._reverse_dict[language_code] = lookups self._populated = True finally: self._local.populating = False @property def reverse_dict(self): language_code = get_language() if language_code not in self._reverse_dict: self._populate() return self._reverse_dict[language_code] @property def namespace_dict(self): language_code = get_language() if language_code not in self._namespace_dict: self._populate() return self._namespace_dict[language_code] @property def app_dict(self): language_code = get_language() if language_code not in self._app_dict: self._populate() return self._app_dict[language_code] @staticmethod def _extend_tried(tried, pattern, sub_tried=None): if sub_tried is None: tried.append([pattern]) else: tried.extend([pattern, *t] for t in sub_tried) @staticmethod def _join_route(route1, route2): """Join two routes, without the starting ^ in the second route.""" if not route1: return route2 route2 = route2.removeprefix("^") return route1 + route2 def _is_callback(self, name): if not self._populated: self._populate() return name in self._callback_strs def resolve(self, path): path = str(path) # path may be a reverse_lazy object tried = [] match = self.pattern.match(path) if match: new_path, args, kwargs = match for pattern in self.url_patterns: try: sub_match = pattern.resolve(new_path) except Resolver404 as e: self._extend_tried(tried, pattern, e.args[0].get("tried")) else: if sub_match: # Merge captured arguments in match with submatch sub_match_dict = {**kwargs, **self.default_kwargs} # Update the sub_match_dict with the kwargs from the sub_match. sub_match_dict.update(sub_match.kwargs) # If there are *any* named groups, ignore all non-named groups. # Otherwise, pass all non-named arguments as positional # arguments. sub_match_args = sub_match.args if not sub_match_dict: sub_match_args = args + sub_match.args current_route = ( "" if isinstance(pattern, URLPattern) else str(pattern.pattern) ) self._extend_tried(tried, pattern, sub_match.tried) return ResolverMatch( sub_match.func, sub_match_args, sub_match_dict, sub_match.url_name, [self.app_name] + sub_match.app_names, [self.namespace] + sub_match.namespaces, self._join_route(current_route, sub_match.route), tried, captured_kwargs=sub_match.captured_kwargs, extra_kwargs={ **self.default_kwargs, **sub_match.extra_kwargs, }, ) tried.append([pattern]) raise Resolver404({"tried": tried, "path": new_path}) raise Resolver404({"path": path}) @cached_property def urlconf_module(self): if isinstance(self.urlconf_name, str): return import_module(self.urlconf_name) else: return self.urlconf_name @cached_property def url_patterns(self): # urlconf_module might be a valid set of patterns, so we default to it patterns = getattr(self.urlconf_module, "urlpatterns", self.urlconf_module) try: iter(patterns) except TypeError as e: msg = ( "The included URLconf '{name}' does not appear to have " "any patterns in it. If you see the 'urlpatterns' variable " "with valid patterns in the file then the issue is probably " "caused by a circular import." ) raise ImproperlyConfigured(msg.format(name=self.urlconf_name)) from e return patterns def resolve_error_handler(self, view_type): callback = getattr(self.urlconf_module, "handler%s" % view_type, None) if not callback: # No handler specified in file; use lazy import, since # skailar.conf.urls imports this file. from skailar.conf import urls callback = getattr(urls, "handler%s" % view_type) return get_callable(callback) def reverse(self, lookup_view, *args, **kwargs): return self._reverse_with_prefix(lookup_view, "", *args, **kwargs) def _reverse_with_prefix(self, lookup_view, _prefix, *args, **kwargs): if args and kwargs: raise ValueError("Don't mix *args and **kwargs in call to reverse()!") if not self._populated: self._populate() possibilities = self.reverse_dict.getlist(lookup_view) for possibility, pattern, defaults, converters in possibilities: for result, params in possibility: if args: if len(args) != len(params): continue candidate_subs = dict(zip(params, args)) else: if set(kwargs).symmetric_difference(params).difference(defaults): continue matches = True for k, v in defaults.items(): if k in params: continue if kwargs.get(k, v) != v: matches = False break if not matches: continue candidate_subs = kwargs # Convert the candidate subs to text using Converter.to_url(). text_candidate_subs = {} match = True for k, v in candidate_subs.items(): if k in converters: try: text_candidate_subs[k] = converters[k].to_url(v) except ValueError: match = False break else: text_candidate_subs[k] = str(v) if not match: continue # WSGI provides decoded URLs, without %xx escapes, and the URL # resolver operates on such URLs. First substitute arguments # without quoting to build a decoded URL and look for a match. # Then, if we have a match, redo the substitution with quoted # arguments in order to return a properly encoded URL. candidate_pat = _prefix.replace("%", "%%") + result if re.search( "^%s%s" % (re.escape(_prefix), pattern), candidate_pat % text_candidate_subs, ): # safe characters from `pchar` definition of RFC 3986 url = quote( candidate_pat % text_candidate_subs, safe=RFC3986_SUBDELIMS + "/~:@", ) # Don't allow construction of scheme relative urls. return escape_leading_slashes(url) # lookup_view can be URL name or callable, but callables are not # friendly in error messages. m = getattr(lookup_view, "__module__", None) n = getattr(lookup_view, "__name__", None) if m is not None and n is not None: lookup_view_s = "%s.%s" % (m, n) else: lookup_view_s = lookup_view patterns = [pattern for (_, pattern, _, _) in possibilities] if patterns: if args: arg_msg = "arguments '%s'" % (args,) elif kwargs: arg_msg = "keyword arguments '%s'" % kwargs else: arg_msg = "no arguments" msg = "Reverse for '%s' with %s not found. %d pattern(s) tried: %s" % ( lookup_view_s, arg_msg, len(patterns), patterns, ) else: msg = ( "Reverse for '%(view)s' not found. '%(view)s' is not " "a valid view function or pattern name." % {"view": lookup_view_s} ) raise NoReverseMatch(msg)
PypiClean
/askbot_selimgul-0.11.0-py3-none-any.whl/askbot/media/js/utils/file_upload_dialog.js
var FileUploadDialog = function () { ModalDialog.call(this); this._className = 'file-upload-dialog'; this._post_upload_handler = undefined; this._fileType = 'image'; this._headerEnabled = false; }; inherits(FileUploadDialog, ModalDialog); /** * allowed values: 'image', 'attachment' */ FileUploadDialog.prototype.setFileType = function (fileType) { this._fileType = fileType; }; FileUploadDialog.prototype.getFileType = function () { return this._fileType; }; FileUploadDialog.prototype.setButtonText = function (text) { this._fakeInput.val(text); }; FileUploadDialog.prototype.setPostUploadHandler = function (handler) { this._post_upload_handler = handler; }; FileUploadDialog.prototype.runPostUploadHandler = function (url, descr) { this._post_upload_handler(url, descr); }; FileUploadDialog.prototype.setInputId = function (id) { this._input_id = id; }; FileUploadDialog.prototype.getInputId = function () { return this._input_id; }; FileUploadDialog.prototype.setErrorText = function (text) { this.setLabelText(text); this._label.addClass('error'); }; FileUploadDialog.prototype.setLabelText = function (text) { this._label.html(text); this._label.removeClass('error'); }; FileUploadDialog.prototype.setUrlInputTooltip = function (text) { this._url_input_tooltip = text; }; FileUploadDialog.prototype.getUrl = function () { var url_input = this._url_input; if (url_input.isBlank() === false) { return url_input.getVal(); } return ''; }; //disable description for now //FileUploadDialog.prototype.getDescription = function () { // return this._description_input.getVal(); //}; FileUploadDialog.prototype.resetInputs = function () { this._url_input.reset(); //this._description_input.reset(); this._upload_input.val(''); }; FileUploadDialog.prototype.getInputElement = function () { return $('#' + this.getInputId()); }; FileUploadDialog.prototype.installFileUploadHandler = function (handler) { var upload_input = this.getInputElement(); upload_input.unbind('change'); //todo: fix this - make event handler reinstall work upload_input.change(handler); }; FileUploadDialog.prototype.show = function () { //hack around the ajaxFileUpload plugin FileUploadDialog.superClass_.show.call(this); var handler = this.getStartUploadHandler(); this.installFileUploadHandler(handler); }; FileUploadDialog.prototype.getUrlInputElement = function () { return this._url_input.getElement(); }; /* * argument startUploadHandler is very special it must * be a function calling this one!!! Todo: see if there * is a more civilized way to do this. */ FileUploadDialog.prototype.startFileUpload = function (startUploadHandler) { var spinner = this._spinner; var label = this._label; spinner.ajaxStart(function () { spinner.show(); label.hide(); }); spinner.ajaxComplete(function () { spinner.hide(); label.show(); }); /* important!!! upload input must be loaded by id * because ajaxFileUpload monkey-patches the upload form */ var uploadInput = this.getInputElement(); uploadInput.ajaxStart(function () { uploadInput.hide(); }); uploadInput.ajaxComplete(function () { uploadInput.show(); }); //var localFilePath = upload_input.val(); var me = this; $.ajaxFileUpload({ url: askbot.urls.upload, secureuri: false,//todo: check on https fileElementId: this.getInputId(), dataType: 'xml', success: function (data, status) { var fileURL = $(data).find('file_url').text(); var origFileName = $(data).find('orig_file_name').text(); var newStatus = interpolate( gettext('Uploaded file: %s'), [origFileName] ); /* * hopefully a fix for the "fakepath" issue * https://www.mediawiki.org/wiki/Special:Code/MediaWiki/83225 */ fileURL = fileURL.replace(/\w:.*\\(.*)$/, '$1'); var error = $(data).find('error').text(); if (error !== '') { me.setErrorText(error); } else { me.getUrlInputElement().attr('value', fileURL); me.setLabelText(newStatus); var buttonText = gettext('Choose a different file'); if (me.getFileType() === 'image') { buttonText = gettext('Choose a different image'); } me.setButtonText(buttonText); } /* re-install this as the upload extension * will remove the handler to prevent double uploading * this hack is a manipulation around the * ajaxFileUpload jQuery plugin. */ me.installFileUploadHandler(startUploadHandler); }, error: function (data, status, e) { /* re-install this as the upload extension * will remove the handler to prevent double uploading */ me.setErrorText(gettext('Oops, looks like we had an error. Sorry.')); me.installFileUploadHandler(startUploadHandler); } }); return false; }; FileUploadDialog.prototype.getStartUploadHandler = function () { var me = this; var handler = function () { /* the trick is that we need inside the function call * to have a reference to itself * in order to reinstall the handler later * because ajaxFileUpload jquery extension might be destroying it */ return me.startFileUpload(handler); }; return handler; }; FileUploadDialog.prototype.createDom = function () { var superClass = FileUploadDialog.superClass_; var me = this; superClass.setAcceptHandler.call(this, function () { var url = $.trim(me.getUrl()); //var description = me.getDescription(); //@todo: have url cleaning code here if (url.length > 0) { me.runPostUploadHandler(url);//, description); me.resetInputs(); } me.hide(); }); superClass.setRejectHandler.call(this, function () { me.resetInputs(); me.hide(); }); superClass.createDom.call(this); var form = this.makeElement('form'); form.addClass('ajax-file-upload'); form.css('margin-bottom', 0); this.prependContent(form); // Browser native file upload field var upload_input = this.makeElement('input'); upload_input.attr({ id: this._input_id, type: 'file', name: 'file-upload' //size: 26??? }); form.append(upload_input); this._upload_input = upload_input; var fakeInput = this.makeElement('input'); fakeInput.attr('type', 'button'); fakeInput.addClass('submit'); fakeInput.addClass('fake-file-input'); var buttonText = gettext('Choose a file to insert'); if (this._fileType === 'image') { buttonText = gettext('Choose an image to insert'); } fakeInput.val(buttonText); this._fakeInput = fakeInput; form.append(fakeInput); setupButtonEventHandlers(fakeInput, function () { upload_input.click(); }); // Label which will also serve as status display var label = this.makeElement('label'); label.attr('for', this._input_id); var types = askbot.settings.allowedUploadFileTypes; types = types.join(', '); label.html(gettext('Allowed file types are:') + ' ' + types + '.'); form.append(label); this._label = label; // The url input text box, probably unused in fact var url_input = new TippedInput(); url_input.setInstruction(this._url_input_tooltip || gettext('Or paste file url here')); var url_input_element = url_input.getElement(); url_input_element.css({ 'width': '200px', 'display': 'none' }); form.append(url_input_element); //form.append($('<br/>')); this._url_input = url_input; /* //Description input box var descr_input = new TippedInput(); descr_input.setInstruction(gettext('Describe the image here')); this.makeElement('input'); form.append(descr_input.getElement()); form.append($('<br/>')); this._description_input = descr_input; */ var spinner = this.makeElement('img'); spinner.attr('src', mediaUrl('media/images/ajax-loader.gif')); spinner.css('display', 'none'); spinner.addClass('spinner'); form.append(spinner); this._spinner = spinner; upload_input.change(this.getStartUploadHandler()); };
PypiClean
/intel_tensorflow_avx512-2.13.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/tensorflow/python/distribute/cluster_resolver/cluster_resolver.py
"""Cluster Resolvers are used for dynamic cluster IP/hostname resolution.""" import abc import collections import six from tensorflow.python.client import session from tensorflow.python.eager import context from tensorflow.python.framework import config from tensorflow.python.framework import ops from tensorflow.python.training.server_lib import ClusterSpec from tensorflow.python.util.tf_export import tf_export def format_master_url(master, rpc_layer=None): if rpc_layer: return '%s://%s' % (rpc_layer, master) else: return master def get_accelerator_devices(master, config_proto): """Returns accelerator devices given a master and a configuration.""" if context.executing_eagerly(): logical_devices = config.list_logical_devices() devices = [] for d in logical_devices: if d.device_type == 'CPU' or d.device_type == 'XLA_CPU': # Filter CPUs continue devices.append(session._DeviceAttributes(d.name, d.device_type, 0, 0)) # pylint: disable=protected-access return devices else: with ops.Graph().as_default(): with session.Session(master, config=config_proto) as s: devices = s.list_devices() return devices @tf_export('distribute.cluster_resolver.ClusterResolver') @six.add_metaclass(abc.ABCMeta) class ClusterResolver(object): """Abstract class for all implementations of ClusterResolvers. This defines the skeleton for all implementations of ClusterResolvers. ClusterResolvers are a way for TensorFlow to communicate with various cluster management systems (e.g. GCE, AWS, etc...) and gives TensorFlow necessary information to set up distributed training. By letting TensorFlow communicate with these systems, we will be able to automatically discover and resolve IP addresses for various TensorFlow workers. This will eventually allow us to automatically recover from underlying machine failures and scale TensorFlow worker clusters up and down. Note to Implementors of `tf.distribute.cluster_resolver.ClusterResolver` subclass: In addition to these abstract methods, when task_type, task_id, and rpc_layer attributes are applicable, you should also implement them either as properties with getters or setters, or directly set the attributes `self._task_type`, `self._task_id`, or `self._rpc_layer` so the base class' getters and setters are used. See `tf.distribute.cluster_resolver.SimpleClusterResolver.__init__` for an example. In general, multi-client tf.distribute strategies such as `tf.distribute.experimental.MultiWorkerMirroredStrategy` require task_type and task_id properties to be available in the `ClusterResolver` they are using. On the other hand, these concepts are not applicable in single-client strategies, such as `tf.distribute.experimental.TPUStrategy`, because the program is only expected to be run on one task, so there should not be a need to have code branches according to task type and task id. - task_type is the name of the server's current named job (e.g. 'worker', 'ps' in a distributed parameterized training job). - task_id is the ordinal index of the server within the task type. - rpc_layer is the protocol used by TensorFlow to communicate with other TensorFlow servers in a distributed environment. """ @abc.abstractmethod def cluster_spec(self): """Retrieve the current state of the cluster and return a `tf.train.ClusterSpec`. Returns: A `tf.train.ClusterSpec` representing the state of the cluster at the moment this function is called. Implementors of this function must take care in ensuring that the ClusterSpec returned is up-to-date at the time of calling this function. This usually means retrieving the information from the underlying cluster management system every time this function is invoked and reconstructing a cluster_spec, rather than attempting to cache anything. """ raise NotImplementedError() @abc.abstractmethod def master(self, task_type=None, task_id=None, rpc_layer=None): """Retrieves the name or URL of the session master. Note: this is only useful for TensorFlow 1.x. Args: task_type: (Optional) The type of the TensorFlow task of the master. task_id: (Optional) The index of the TensorFlow task of the master. rpc_layer: (Optional) The RPC protocol for the given cluster. Returns: The name or URL of the session master. Implementors of this function must take care in ensuring that the master returned is up-to-date at the time to calling this function. This usually means retrieving the master every time this function is invoked. """ raise NotImplementedError() def num_accelerators(self, task_type=None, task_id=None, config_proto=None): """Returns the number of accelerator cores per worker. This returns the number of accelerator cores (such as GPUs and TPUs) available per worker. Optionally, we allow callers to specify the task_type, and task_id, for if they want to target a specific TensorFlow task to query the number of accelerators. This is to support heterogenous environments, where the number of accelerators cores per host is different. Args: task_type: (Optional) The type of the TensorFlow task of the machine we want to query. task_id: (Optional) The index of the TensorFlow task of the machine we want to query. config_proto: (Optional) Configuration for starting a new session to query how many accelerator cores it has. Returns: A map of accelerator types to number of cores. """ master = self.master(task_type, task_id) # TODO(b/126786766): in eager mode, we should check whether # `tf.config.experimental_connect_to_cluster` is called or not. devices = get_accelerator_devices(master, config_proto) mapping = collections.defaultdict(int) for device in devices: if task_type is not None and task_id is not None: job_path = '/job:%s' % task_type task_path = '/task:%s' % task_id if job_path not in device.name or task_path not in device.name: continue mapping[device.device_type] += 1 return mapping @property def environment(self): """Returns the current environment which TensorFlow is running in. There are two possible return values, "google" (when TensorFlow is running in a Google-internal environment) or an empty string (when TensorFlow is running elsewhere). If you are implementing a ClusterResolver that works in both the Google environment and the open-source world (for instance, a TPU ClusterResolver or similar), you will have to return the appropriate string depending on the environment, which you will have to detect. Otherwise, if you are implementing a ClusterResolver that will only work in open-source TensorFlow, you do not need to implement this property. """ return '' @property def task_type(self): """Returns the task type this `ClusterResolver` indicates. In TensorFlow distributed environment, each job may have an applicable task type. Valid task types in TensorFlow include 'chief': a worker that is designated with more responsibility, 'worker': a regular worker for training/evaluation, 'ps': a parameter server, or 'evaluator': an evaluator that evaluates the checkpoints for metrics. See [Multi-worker configuration]( https://www.tensorflow.org/tutorials/distribute/multi_worker_with_keras#multi-worker_configuration) for more information about 'chief' and 'worker' task type, which are most commonly used. Having access to such information is useful when user needs to run specific code according to task types. For example, ```python cluster_spec = tf.train.ClusterSpec({ "ps": ["localhost:2222", "localhost:2223"], "worker": ["localhost:2224", "localhost:2225", "localhost:2226"] }) # SimpleClusterResolver is used here for illustration; other cluster # resolvers may be used for other source of task type/id. simple_resolver = SimpleClusterResolver(cluster_spec, task_type="worker", task_id=1) ... if cluster_resolver.task_type == 'worker': # Perform something that's only applicable on workers. This block # will run on this particular instance since we've specified this task to # be a worker in above cluster resolver. elif cluster_resolver.task_type == 'ps': # Perform something that's only applicable on parameter servers. This # block will not run on this particular instance. ``` Returns `None` if such information is not available or is not applicable in the current distributed environment, such as training with `tf.distribute.experimental.TPUStrategy`. For more information, please see `tf.distribute.cluster_resolver.ClusterResolver`'s class doc. """ return getattr(self, '_task_type', None) @property def task_id(self): """Returns the task id this `ClusterResolver` indicates. In TensorFlow distributed environment, each job may have an applicable task id, which is the index of the instance within its task type. This is useful when user needs to run specific code according to task index. For example, ```python cluster_spec = tf.train.ClusterSpec({ "ps": ["localhost:2222", "localhost:2223"], "worker": ["localhost:2224", "localhost:2225", "localhost:2226"] }) # SimpleClusterResolver is used here for illustration; other cluster # resolvers may be used for other source of task type/id. simple_resolver = SimpleClusterResolver(cluster_spec, task_type="worker", task_id=0) ... if cluster_resolver.task_type == 'worker' and cluster_resolver.task_id == 0: # Perform something that's only applicable on 'worker' type, id 0. This # block will run on this particular instance since we've specified this # task to be a 'worker', id 0 in above cluster resolver. else: # Perform something that's only applicable on other ids. This block will # not run on this particular instance. ``` Returns `None` if such information is not available or is not applicable in the current distributed environment, such as training with `tf.distribute.cluster_resolver.TPUClusterResolver`. For more information, please see `tf.distribute.cluster_resolver.ClusterResolver`'s class docstring. """ return getattr(self, '_task_id', None) @task_type.setter def task_type(self, task_type): """Setter of `task_type` property. See `task_type` property doc.""" self._task_type = task_type @task_id.setter def task_id(self, task_id): """Setter of `task_id` property. See `task_type` property doc.""" self._task_id = task_id @tf_export('distribute.cluster_resolver.SimpleClusterResolver') class SimpleClusterResolver(ClusterResolver): """Simple implementation of ClusterResolver that accepts all attributes. Please see the base class for documentation of arguments of its constructor. It is useful if you want to specify some or all attributes. Usage example with `tf.distribute.Strategy`: ```Python cluster = tf.train.ClusterSpec({"worker": ["worker0.example.com:2222", "worker1.example.com:2222"]}) # On worker 0 cluster_resolver = SimpleClusterResolver(cluster, task_type="worker", task_id=0, num_accelerators={"GPU": 8}, rpc_layer="grpc") strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy( cluster_resolver=cluster_resolver) # On worker 1 cluster_resolver = SimpleClusterResolver(cluster, task_type="worker", task_id=1, num_accelerators={"GPU": 8}, rpc_layer="grpc") strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy( cluster_resolver=cluster_resolver) ``` """ def __init__(self, cluster_spec, master='', task_type=None, task_id=None, environment='', num_accelerators=None, rpc_layer=None): """Creates a SimpleClusterResolver from a ClusterSpec.""" super(SimpleClusterResolver, self).__init__() self._task_type = task_type self._task_id = task_id self._environment = environment self._num_accelerators = num_accelerators self._rpc_layer = rpc_layer if not isinstance(cluster_spec, ClusterSpec): raise TypeError('cluster_spec must be a `tf.train.ClusterSpec`.') self._cluster_spec = cluster_spec if not isinstance(master, str): raise TypeError('master must be a string.') self._master = master def cluster_spec(self): """Returns the ClusterSpec passed into the constructor.""" return self._cluster_spec def master(self, task_type=None, task_id=None, rpc_layer=None): """Returns the master address to use when creating a session. Note: this is only useful for TensorFlow 1.x. Args: task_type: (Optional) The type of the TensorFlow task of the master. task_id: (Optional) The index of the TensorFlow task of the master. rpc_layer: (Optional) The RPC used by distributed TensorFlow. Returns: The name or URL of the session master. If a task_type and task_id is given, this will override the `master` string passed into the initialization function. """ if task_type is not None and task_id is not None: master = self.cluster_spec().task_address(task_type, task_id) else: master = self._master return format_master_url(master, rpc_layer=rpc_layer or self._rpc_layer) @property def task_type(self): return self._task_type @property def task_id(self): return self._task_id @task_type.setter def task_type(self, task_type): self._task_type = task_type @task_id.setter def task_id(self, task_id): self._task_id = task_id @property def environment(self): return self._environment def num_accelerators(self, task_type=None, task_id=None, config_proto=None): """Returns the number of accelerator cores per worker. The SimpleClusterResolver does not do automatic detection of accelerators, and thus all arguments are unused and we simply return the value provided in the constructor. Args: task_type: Unused. task_id: Unused. config_proto: Unused. """ # Unused del task_type, task_id, config_proto if self._num_accelerators is None: return {} return self._num_accelerators @property def rpc_layer(self): return self._rpc_layer @rpc_layer.setter def rpc_layer(self, rpc_layer): self._rpc_layer = rpc_layer @tf_export('distribute.cluster_resolver.UnionResolver') class UnionClusterResolver(ClusterResolver): """Performs a union on underlying ClusterResolvers. This class performs a union given two or more existing ClusterResolvers. It merges the underlying ClusterResolvers, and returns one unified ClusterSpec when cluster_spec is called. The details of the merge function is documented in the cluster_spec function. For additional ClusterResolver properties such as task type, task index, rpc layer, environment, etc..., we will return the value from the first ClusterResolver in the union. An example to combine two cluster resolvers: ```Python cluster_0 = tf.train.ClusterSpec({"worker": ["worker0.example.com:2222", "worker1.example.com:2222"]}) cluster_resolver_0 = SimpleClusterResolver(cluster, task_type="worker", task_id=0, rpc_layer="grpc") cluster_1 = tf.train.ClusterSpec({"ps": ["ps0.example.com:2222", "ps1.example.com:2222"]}) cluster_resolver_1 = SimpleClusterResolver(cluster, task_type="ps", task_id=0, rpc_layer="grpc") # Its task type would be "worker". cluster_resolver = UnionClusterResolver(cluster_resolver_0, cluster_resolver_1) ``` An example to override the number of GPUs in a TFConfigClusterResolver instance: ```Python tf_config = TFConfigClusterResolver() gpu_override = SimpleClusterResolver(tf_config.cluster_spec(), num_accelerators={"GPU": 1}) cluster_resolver = UnionResolver(gpu_override, tf_config) ``` """ def __init__(self, *args, **kwargs): """Initializes a UnionClusterResolver with other ClusterResolvers. Args: *args: `ClusterResolver` objects to be unionized. **kwargs: rpc_layer - (Optional) Override value for the RPC layer used by TensorFlow. task_type - (Optional) Override value for the current task type. task_id - (Optional) Override value for the current task index. Raises: TypeError: If any argument is not a subclass of `ClusterResolvers`. ValueError: If there are no arguments passed. """ super(UnionClusterResolver, self).__init__() self._rpc_layer = kwargs.pop('rpc_layer', None) self._task_type = kwargs.pop('task_type', None) self._task_id = kwargs.pop('task_id', None) if kwargs: raise ValueError('Unexpected kwargs provided {!r}'.format(kwargs)) if not args: raise ValueError('At least one ClusterResolver is required.') for cluster_resolver in args: if not isinstance(cluster_resolver, ClusterResolver): raise TypeError('All arguments must be a sub-class of ' '`ClusterResolver.`') self._cluster_resolvers = args def cluster_spec(self): """Returns a union of all the ClusterSpecs from the ClusterResolvers. Returns: A ClusterSpec containing host information merged from all the underlying ClusterResolvers. Raises: KeyError: If there are conflicting keys detected when merging two or more dictionaries, this exception is raised. Note: If there are multiple ClusterResolvers exposing ClusterSpecs with the same job name, we will merge the list/dict of workers. If *all* underlying ClusterSpecs expose the set of workers as lists, we will concatenate the lists of workers, starting with the list of workers from the first ClusterResolver passed into the constructor. If *any* of the ClusterSpecs expose the set of workers as a dict, we will treat all the sets of workers as dicts (even if they are returned as lists) and will only merge them into a dict if there is no conflicting keys. If there is a conflicting key, we will raise a `KeyError`. """ merged_cluster = {} # We figure out whether it is all lists for a particular job, or whether # there are dicts inside. for cluster_resolver in self._cluster_resolvers: cluster_spec = cluster_resolver.cluster_spec() cluster_dict = cluster_spec.as_dict() for job_name, tasks in cluster_dict.items(): if job_name in merged_cluster: # If we see a dict, then we write a dict out regardless. if isinstance(tasks, dict): merged_cluster[job_name] = {} else: # We take whichever type is present. if isinstance(tasks, list): merged_cluster[job_name] = [] else: merged_cluster[job_name] = {} # We then do the merge as appropriate in merged_cluster[job]. for cluster_resolver in self._cluster_resolvers: cluster_spec = cluster_resolver.cluster_spec() cluster_dict = cluster_spec.as_dict() for job_name, tasks in cluster_dict.items(): if isinstance(merged_cluster[job_name], list): # We all have lists, we can just concatenate and be done. merged_cluster[job_name].extend(tasks) else: if isinstance(tasks, list): # We convert to a dictionary if the type is a list. task_dict = dict(zip(range(0, len(tasks)), tasks)) else: # We can simply make a copy (for update) and be done. task_dict = tasks.copy() # We detect if there are duplicates, and raise an error if so. task_keys = set(task_dict) merged_keys = set(merged_cluster[job_name].keys()) intersected_keys = task_keys.intersection(merged_keys) if intersected_keys: raise KeyError('Duplicate keys detected when merging two ' 'ClusterSpecs: %s' % repr(intersected_keys)) # We do the merge after all the processing. merged_cluster[job_name].update(task_dict) return ClusterSpec(merged_cluster) def master(self, task_type=None, task_id=None, rpc_layer=None): """Returns the master address to use when creating a session. This usually returns the master from the first ClusterResolver passed in, but you can override this by specifying the task_type and task_id. Note: this is only useful for TensorFlow 1.x. Args: task_type: (Optional) The type of the TensorFlow task of the master. task_id: (Optional) The index of the TensorFlow task of the master. rpc_layer: (Optional) The RPC protocol for the given cluster. Returns: The name or URL of the session master. """ if task_type is not None and task_id is not None: master = self.cluster_spec().task_address(task_type, task_id) return format_master_url(master, rpc_layer or self._rpc_layer) return self._cluster_resolvers[0].master(rpc_layer=rpc_layer) @property def task_type(self): return self._task_type or self._cluster_resolvers[0].task_type @property def task_id(self): return self._task_id or self._cluster_resolvers[0].task_id @task_type.setter def task_type(self, task_type): self._task_type = task_type @task_id.setter def task_id(self, task_id): self._task_id = task_id @property def environment(self): return self._cluster_resolvers[0].environment def num_accelerators(self, task_type=None, task_id=None, config_proto=None): return self._cluster_resolvers[0].num_accelerators( task_type, task_id, config_proto) @property def rpc_layer(self): return self._rpc_layer or self._cluster_resolvers[0].rpc_layer @rpc_layer.setter def rpc_layer(self, rpc_layer): self._rpc_layer = rpc_layer
PypiClean
/aiida-yambo-wannier90-0.1.0b0.tar.gz/aiida-yambo-wannier90-0.1.0b0/aiida_yambo_wannier90/workflows/__init__.py
"""Base class for Yambo+Wannier90 workflow.""" from email.charset import QP import pathlib import typing as ty import numpy as np from aiida import orm from aiida.common import AttributeDict from aiida.common.lang import type_check from aiida.engine import ExitCode, ProcessBuilder, ToContext, WorkChain, if_ from aiida_quantumespresso.calculations.functions.seekpath_structure_analysis import ( seekpath_structure_analysis, ) from aiida_quantumespresso.common.types import ElectronicType, SpinType from aiida_quantumespresso.utils.mapping import prepare_process_inputs from aiida_quantumespresso.workflows.protocols.utils import ProtocolMixin from aiida_wannier90_workflows.common.types import ( WannierDisentanglementType, WannierFrozenType, WannierProjectionType, ) from aiida_wannier90_workflows.utils.kpoints import ( get_explicit_kpoints, get_mesh_from_kpoints, ) from aiida_wannier90_workflows.utils.workflows.builder import set_kpoints from aiida_wannier90_workflows.workflows import ( Wannier90BandsWorkChain, Wannier90BaseWorkChain, Wannier90OptimizeWorkChain, ) from aiida_yambo.workflows.yamboconvergence import YamboConvergence from aiida_yambo.workflows.yamborestart import YamboRestart from aiida_yambo.workflows.yambowf import YamboWorkflow from aiida_yambo.workflows.ypprestart import YppRestart from aiida_yambo_wannier90.calculations.functions.kmesh import ( find_commensurate_meshes, get_output_explicit_kpoints, is_commensurate, kmapper, ) from aiida_yambo_wannier90.calculations.gw2wannier90 import Gw2wannier90Calculation from aiida_yambo_wannier90.common.types import Gw2wannier90SortMode from aiida_yambo_wannier90.utils.workflows import ( get_yambo_converged_workchain, get_yambo_nscf, ) __all__ = ["validate_inputs", "YamboWannier90WorkChain"] # pylint: disable=too-many-lines # pylint: disable=fixme # TODO remove this todo disable def validate_inputs( # pylint: disable=inconsistent-return-statements,too-many-return-statements,too-many-branches,too-many-locals inputs: dict, ctx=None # pylint: disable=unused-argument ) -> ty.Union[None, str]: """Validate the inputs of the entire input namespace.""" # Must run steps sequentially order = ["yambo", "yambo_qp", "ypp", "wannier90", "gw2wannier90", "wannier90_qp"] non_empty = [_ in inputs for _ in order] first_input = non_empty.index(True) if not all(non_empty[first_input:]): first_no_input = first_input + non_empty[first_input:].index(False) return ( f"WorkChain must be run in order, `{order[first_input]}` is provided " f"but `{order[first_no_input]}` is empty." ) # Check inputs if previous steps are skipped should_run_yambo = "yambo" in inputs should_run_yambo_commensurate = "GW_mesh" in inputs should_run_wannier90 = "wannier90" in inputs should_run_yambo_qp = "yambo_qp" in inputs should_run_ypp = "ypp" in inputs should_run_gw2wannier90 = "gw2wannier90" in inputs should_run_wannier90_qp = "wannier90_qp" in inputs if should_run_yambo_qp: yambo_qp_inputs = inputs["yambo_qp"] if not should_run_yambo: if "parent_folder" not in yambo_qp_inputs and not should_run_yambo_commensurate: return "`yambo_qp.parent_folder` is empty." if should_run_ypp: ypp_inputs = inputs["ypp"] if not should_run_yambo_qp: if "QP_DB" not in ypp_inputs["ypp"]: return "`ypp.ypp.QP_DB` is empty." if "parent_folder" not in ypp_inputs: return "`ypp.parent_folder` is empty." # I need `wannier90` input to run a w90 postproc before `ypp`, # or if there is `nnkp`, I skip the postproc. if not should_run_wannier90: if "nnkp_file" not in ypp_inputs["ypp"]: return "`ypp.ypp.nnkp_file` is empty." if should_run_gw2wannier90: gw2wannier90_inputs = inputs["gw2wannier90"] if not should_run_wannier90: for tag in ["nnkp", "parent_folder"]: if tag not in gw2wannier90_inputs: return f"`gw2wannier90.{tag}` is empty." if not should_run_ypp: if "unsorted_eig" not in gw2wannier90_inputs: return "`gw2wannier90.unsorted_eig` is empty." if should_run_wannier90_qp: wannier90_qp_inputs = inputs["wannier90_qp"] if not should_run_gw2wannier90: if "remote_input_folder" not in wannier90_qp_inputs["wannier90"]: return "`wannier90_qp.wannier90.remote_input_folder` is empty." class YamboWannier90WorkChain( ProtocolMixin, WorkChain ): # pylint: disable=too-many-public-methods """Workchain to obtain GW-corrected maximally localised Wannier functions (MLWF).""" @classmethod def define(cls, spec): """Define the process spec.""" from aiida_wannier90_workflows.workflows.base.wannier90 import ( validate_inputs_base as validate_inputs_base_wannier90, ) super().define(spec) spec.input( "structure", valid_type=orm.StructureData, help="The input structure." ) spec.input( "clean_workdir", valid_type=orm.Bool, serializer=orm.to_aiida_type, default=lambda: orm.Bool(False), help=( "If True, work directories of all called calculation will be cleaned " "at the end of execution." ), ) spec.input( "bands_kpoints", valid_type=orm.KpointsData, required=False, help=( "Explicit kpoints to use for the band structure. " "If not specified, the workchain will run seekpath to generate " "a primitive cell and a bands_kpoints. Specify either this or `bands_kpoints_distance`." ), ) spec.input( "bands_kpoints_distance", valid_type=orm.Float, serializer=orm.to_aiida_type, required=False, help="Minimum kpoints distance for seekpath to generate a list of kpoints along the path. " "Specify either this or `bands_kpoints`.", ) spec.input( "kpoints_force_gw", valid_type=orm.Bool, serializer=orm.to_aiida_type, default=lambda: orm.Bool(False), help="If `True` will force W90 to use the GW converged k-point mesh.", ) spec.input( "GW_mesh", valid_type=orm.KpointsData, serializer=orm.to_aiida_type, required=False, help="GW mesh. This allow to start from yambo commensurate, skipping gw convergence", ) spec.expose_inputs( YamboConvergence, namespace="yambo", exclude=( "clean_workdir", "ywfl.scf.pw.structure", "ywfl.nscf.pw.structure", ), namespace_options={ "help": "Inputs for the `YamboConvergence` for yambo calculation.", "required": False, "populate_defaults": False, }, ) spec.expose_inputs( YamboWorkflow, namespace="yambo_qp", exclude=( "clean_workdir", "scf.pw.structure", "nscf.pw.structure", ), namespace_options={ "help": ( "Inputs for the `YamboConvergence` for yambo QP calculation. " "If not provided, it will be generated based on the previous converged inputs." ), "required": False, "populate_defaults": False, }, ) spec.expose_inputs( YppRestart, namespace="ypp", exclude=("clean_workdir",), namespace_options={ "help": "Inputs for the `YppRestart` calculation, to be used for unsorted.eig generation. ", "required": False, "populate_defaults": False, }, ) spec.expose_inputs( YppRestart, namespace="ypp_QP", exclude=("clean_workdir",), namespace_options={ "help": "Inputs for the `YppRestart` calculation, to be used for merging QP dbs. ", "required": False, "populate_defaults": False, }, ) spec.expose_inputs( Wannier90OptimizeWorkChain, namespace="wannier90", exclude=( "clean_workdir", "structure", "kpoint_path", "bands_kpoints", "bands_kpoints_distance", ), namespace_options={ "help": "Inputs for the `Wannier90OptimizeWorkChain` for wannier90 calculation.", "required": False, "populate_defaults": False, }, ) spec.expose_inputs( Gw2wannier90Calculation, namespace="gw2wannier90", exclude=("clean_workdir",), namespace_options={ "help": "Inputs for the `Gw2wannier90Calculation`. ", "required": False, "populate_defaults": False, }, ) spec.expose_inputs( Wannier90BaseWorkChain, namespace="wannier90_qp", exclude=( "clean_workdir", "wannier90.structure", "wannier90.kpoint_path", "wannier90.bands_kpoints", ), namespace_options={ "help": ( "Inputs for the `Wannier90BaseWorkChain` for wannier90 QP calculation. " "If not provided, it will be generated based on the previous wannier inputs." ), "required": True, }, ) spec.inputs["wannier90_qp"].validator = validate_inputs_base_wannier90 spec.inputs.validator = validate_inputs spec.output( "primitive_structure", valid_type=orm.StructureData, required=False, help="The normalized and primitivized structure for which the calculations are computed.", ) spec.output( "seekpath_parameters", valid_type=orm.Dict, required=False, help="The parameters used in the SeeKpath call to normalize the input or relaxed structure.", ) spec.expose_outputs( YamboConvergence, namespace="yambo", namespace_options={"required": False}, ) spec.expose_outputs( YamboWorkflow, namespace="yambo_commensurate", namespace_options={"required": False}, ) spec.expose_outputs( YamboWorkflow, namespace="yambo_qp", namespace_options={"required": False}, ) spec.expose_outputs( Wannier90BaseWorkChain, namespace="wannier90_pp", namespace_options={"required": False}, ) spec.expose_outputs( YppRestart, namespace="ypp", namespace_options={"required": False}, ) spec.expose_outputs( Wannier90OptimizeWorkChain, namespace="wannier90", namespace_options={"required": False}, ) spec.expose_outputs( Gw2wannier90Calculation, namespace="gw2wannier90", namespace_options={"required": False}, ) spec.expose_outputs( Wannier90BaseWorkChain, namespace="wannier90_qp", ) spec.output( "band_structures.wannier90", valid_type=orm.BandsData, required=False, help="The Wannier interpolated band structure at DFT level.", ) spec.output( "band_structures.wannier90_qp", valid_type=orm.BandsData, help="The Wannier interpolated band structure at G0W0 level.", ) spec.outline( cls.setup, if_(cls.should_run_seekpath)( cls.run_seekpath, ), if_(cls.should_run_yambo_convergence)( cls.run_yambo_convergence, cls.inspect_yambo_convergence, ), if_(cls.should_run_setup_kmesh)( cls.setup_kmesh, ), if_(cls.should_run_yambo_commensurate)( cls.run_yambo_commensurate, cls.inspect_yambo_commensurate, ), # TODO run an additional yambo_qp on shifted grid to check w90_qp bands if_(cls.should_run_yambo_qp)( cls.run_yambo_qp, cls.inspect_yambo_qp, ), # if_(cls.should_run_ypp_qp)( # cls.run_ypp_qp, # cls.inspect_ypp_qp, # ), if_(cls.should_run_wannier90_pp)( cls.run_wannier90_pp, cls.inspect_wannier90_pp, ), if_(cls.should_run_ypp)( cls.run_ypp, cls.inspect_ypp, ), if_(cls.should_run_wannier90)( cls.run_wannier90, cls.inspect_wannier90, ), if_(cls.should_run_gw2wannier90)( cls.run_gw2wannier90, cls.inspect_gw2wannier90, ), cls.run_wannier90_qp, cls.inspect_wannier90_qp, cls.results, ) spec.exit_code( 401, "ERROR_SUB_PROCESS_FAILED_SETUP", message="Unrecoverable error when running setup.", ) spec.exit_code( 402, "ERROR_SUB_PROCESS_FAILED_YAMBO_CONV", message="Unrecoverable error when running yambo convergence.", ) spec.exit_code( 403, "ERROR_SUB_PROCESS_FAILED_SETUP_KMESH", message="Unrecoverable error when running setup_kmesh.", ) spec.exit_code( 404, "ERROR_SUB_PROCESS_FAILED_WANNIER90_PP", message="Unrecoverable error when running wannier90 postproc.", ) spec.exit_code( 405, "ERROR_SUB_PROCESS_FAILED_YAMBO_COMMENSURATE", message="Unrecoverable error when running yambo on commensurate kmesh.", ) spec.exit_code( 406, "ERROR_SUB_PROCESS_FAILED_YAMBO_QP", message="Unrecoverable error when running yambo QP correction.", ) spec.exit_code( 407, "ERROR_SUB_PROCESS_FAILED_YPP", message="Unrecoverable error when running yambo ypp.", ) spec.exit_code( 408, "ERROR_SUB_PROCESS_FAILED_WANNIER90", message="Unrecoverable error when running wannier90.", ) spec.exit_code( 409, "ERROR_SUB_PROCESS_FAILED_GW2WANNIER90", message="Unrecoverable error when running gw2wannier90.", ) spec.exit_code( 410, "ERROR_SUB_PROCESS_FAILED_WANNIER90_QP", message="Unrecoverable error when running wannier90 with QP-corrected eig.", ) @classmethod def get_protocol_filepath(cls) -> pathlib.Path: """Return the ``pathlib.Path`` to the ``.yaml`` file that defines the protocols.""" # pylint: disable=import-outside-toplevel from importlib_resources import files from . import protocols return files(protocols) / "yambo_wannier90.yaml" @classmethod def get_builder_from_protocol( # pylint: disable=too-many-statements,too-many-locals cls, codes: ty.Dict[str, ty.Union[orm.Code, str, int]], structure: orm.StructureData, *, protocol: str = None, overrides: dict = None, pseudo_family: str = "PseudoDojo/0.4/PBE/SR/standard/upf", exclude_semicore: bool = False, electronic_type=ElectronicType.METAL, wannier_projection_type: WannierProjectionType = WannierProjectionType.ATOMIC_PROJECTORS_QE, NLCC: bool = True, RIM_v: bool = True, RIM_W: bool = False, ) -> ProcessBuilder: """Return a builder prepopulated with inputs selected according to the chosen protocol. :param codes: [description] :type codes: typing.Dict[str, typing.Union[aiida.orm.Code, str, int]] :param bxsf: [description] :type bxsf: aiida.orm.RemoteData :param protocol: [description], defaults to None :type protocol: str, optional :param overrides: [description], defaults to None :type overrides: dict, optional :return: [description] :rtype: aiida.engine.ProcessBuilder """ # pylint: disable=import-outside-toplevel,protected-access # from aiida_quantumespresso.workflows.protocols.utils import recursive_merge from aiida_wannier90_workflows.utils.workflows.builder import ( recursive_merge_builder, ) required_codes = [ "pw", "pw2wannier90", "wannier90", "yambo", "p2y", "ypp", "gw2wannier90", ] if not all(_ in codes for _ in required_codes): raise ValueError(f"`codes` must contain {required_codes}") for key, code in codes.items(): if not isinstance(code, orm.Code): codes[key] = orm.load_code(code) type_check(structure, orm.StructureData) inputs = cls.get_protocol_inputs(protocol, overrides) inputs["structure"] = structure # Prepare yambo yambo_overrides = { "ywfl": { "scf": {"pseudo_family": pseudo_family}, "nscf": {"pseudo_family": pseudo_family}, }, } yambo_builder = YamboConvergence.get_builder_from_protocol( pw_code=codes["pw"], preprocessing_code=codes["p2y"], code=codes["yambo"], protocol="moderate", structure=structure, electronic_type=electronic_type, overrides=yambo_overrides, NLCC=NLCC, RIM_v=RIM_v, RIM_W=RIM_W, ) inputs["yambo"] = yambo_builder._inputs(prune=True) inputs["yambo"]["ywfl"]["scf"]["pw"].pop("structure", None) inputs["yambo"]["ywfl"]["nscf"]["pw"].pop("structure", None) inputs["yambo"].pop("clean_workdir", None) # Prepare wannier # projection_type = WannierProjectionType.ATOMIC_PROJECTORS_QE # disentanglement_type = WannierDisentanglementType.SMV # frozen_type = WannierFrozenType.FIXED_PLUS_PROJECTABILITY # Auto guess from projection_type disentanglement_type = None frozen_type = None wannier_builder = Wannier90OptimizeWorkChain.get_builder_from_protocol( codes, structure, pseudo_family=pseudo_family, exclude_semicore=exclude_semicore, projection_type=wannier_projection_type, disentanglement_type=disentanglement_type, frozen_type=frozen_type, ) # No reference PW bands, so we stop optimization wannier_builder.optimize_disproj = False inputs["wannier90"] = wannier_builder._inputs(prune=True) inputs["wannier90"].pop("structure", None) inputs["wannier90"].pop("clean_workdir", None) # TODO Prepare yambo_qp # yambo_qp_builder = YamboRestart.get_builder_from_protocol( # pw_code=codes["pw"], # preprocessing_code=codes["p2y"], # code=codes["yambo"], # protocol="moderate", # NLCC=NLCC, # RIM_v=RIM_v, # RIM_W=RIM_W, # ) # inputs["yambo_qp"] = yambo_qp_builder._inputs(prune=True) inputs["yambo_qp"] = inputs["yambo"]["ywfl"] inputs["yambo_qp"].pop("clean_workdir", None) # Ypp; without a parent_folder for now. We should set it during the input preparation ypp_builder = YppRestart.get_builder_from_protocol( code=codes["ypp"], protocol="Wannier", ) # ypp_builder.ypp.QP_calculations = List( # list=[1948, 1980, 2006, 2064, 2151, 2176, 2215, 2253] # ) # ypp_builder.QP_DB = load_node(2329) inputs["ypp"] = ypp_builder._inputs(prune=True) inputs["ypp"].pop("clean_workdir", None) # ypp_QP ypp_builder = YppRestart.get_builder_from_protocol( code=codes["ypp"], protocol="merge_QP", ) inputs["ypp_QP"] = ypp_builder._inputs(prune=True) inputs["ypp_QP"].pop( "clean_workdir", None ) # but actually I want to clean the wdir # Prepare gw2wannier90 inputs["gw2wannier90"] = { "code": codes["gw2wannier90"], } # Prepare wannier90_qp wannier90_qp_builder = Wannier90BaseWorkChain.get_builder_from_protocol( code=codes["wannier90"], structure=structure, pseudo_family=pseudo_family, overrides={ "meta_parameters": { "exclude_semicore": exclude_semicore, } }, electronic_type=electronic_type, ) params = wannier90_qp_builder.wannier90.parameters.get_dict() params["bands_plot"] = True wannier90_qp_builder.wannier90.parameters = orm.Dict(dict=params) inputs["wannier90_qp"] = wannier90_qp_builder._inputs(prune=True) inputs["wannier90_qp"]["wannier90"].pop("structure", None) inputs["wannier90_qp"].pop("clean_workdir", None) builder = cls.get_builder() builder = recursive_merge_builder(builder, inputs) return builder def setup(self) -> None: # pylint: disable=inconsistent-return-statements """Initialize context variables.""" self.ctx.current_structure = self.inputs.structure if "bands_kpoints" in self.inputs: self.ctx.current_bands_kpoints = self.inputs.bands_kpoints # Converged mesh from YamboConvergence self.ctx.kpoints_gw_conv = None if self.should_run_setup_kmesh() and not self.should_run_yambo_convergence() and not "GW_mesh" in self.inputs: # `setup_kmesh` need `self.ctx.kpoints_gw_conv`, I assume that # the parent of `yambo_qp` is a converged mesh. # Since the workchain runs sequentially, the `yambo_qp` must be # in the workchain inputs. if "yambo_qp" in self.inputs: parent_folder = self.inputs.yambo_qp.parent_folder elif "ypp" in self.inputs: parent_folder = self.inputs.ypp.parent_folder # The creator is a YamboCalculation, caller is a YamboRestart wkchain_gw = parent_folder.creator.caller # Its parent_folder is the remote_folder of a pw.x nscf calc_nscf = wkchain_gw.inputs.parent_folder.creator self.ctx.kpoints_gw_conv = calc_nscf.inputs.kpoints # Input Wannier90 mesh self.ctx.kpoints_w90_input = None if self.should_run_wannier90(): self.ctx.kpoints_w90_input = self.inputs.wannier90.nscf.kpoints if ( not self.should_run_yambo_convergence() and not self.inputs.kpoints_force_gw # If starting wannier90+gw2wannier90+wannier90_qp from unsorted.eig, # then I don't know the gw converged mesh and self.ctx.kpoints_gw_conv is not None ): kmesh_gw_conv = get_mesh_from_kpoints(self.ctx.kpoints_gw_conv) kmesh_w90_input = get_mesh_from_kpoints(self.ctx.kpoints_w90_input) if not is_commensurate(kmesh_gw_conv, kmesh_w90_input) and not 'GW_mesh' in self.inputs: self.report( f"Skipping GW convergence, but GW converged mesh {kmesh_gw_conv} " f"is not commensurate with W90 input mesh {kmesh_w90_input}" ) return self.exit_codes.ERROR_SUB_PROCESS_FAILED_SETUP # Commensurate meshes for GW and W90 self.ctx.kpoints_gw = None # Initialize with input mesh self.ctx.kpoints_w90 = self.ctx.kpoints_w90_input def should_run_seekpath(self): """Run seekpath if the `inputs.bands_kpoints` is not provided.""" return "bands_kpoints" not in self.inputs def run_seekpath(self): """Run the structure through SeeKpath to get the primitive and normalized structure.""" args = { "structure": self.inputs.structure, "metadata": {"call_link_label": "seekpath_structure_analysis"}, } if "bands_kpoints_distance" in self.inputs: args["reference_distance"] = self.inputs["bands_kpoints_distance"] result = seekpath_structure_analysis(**args) self.ctx.current_structure = result["primitive_structure"] self.ctx.current_bands_kpoints = result["explicit_kpoints"] structure_formula = self.inputs.structure.get_formula() primitive_structure_formula = result["primitive_structure"].get_formula() self.report( f"launching seekpath: {structure_formula} -> {primitive_structure_formula}" ) self.out("primitive_structure", result["primitive_structure"]) self.out("seekpath_parameters", result["parameters"]) def should_run_yambo_convergence(self) -> bool: """Whether to run yambo convergence.""" if "yambo" in self.inputs: return True return False def prepare_yambo_convergence_inputs(self) -> AttributeDict: """Prepare inputs for ``YamboConvergence``.""" inputs = AttributeDict(self.exposed_inputs(YamboConvergence, namespace="yambo")) inputs.ywfl.scf.pw.structure = self.ctx.current_structure inputs.ywfl.nscf.pw.structure = self.ctx.current_structure return inputs def run_yambo_convergence(self) -> ty.Dict: """Run the ``YamboConvergence``.""" inputs = self.prepare_yambo_convergence_inputs() inputs.metadata.call_link_label = "yambo_convergence" inputs = prepare_process_inputs(YamboConvergence, inputs) running = self.submit(YamboConvergence, **inputs) self.report(f"launching {running.process_label}<{running.pk}>") return ToContext(wkchain_yambo_conv=running) def inspect_yambo_convergence( # pylint: disable=inconsistent-return-statements self, ) -> ty.Union[None, ExitCode]: """Verify that the `Wan2skeafCalculation` successfully finished.""" wkchain = self.ctx.wkchain_yambo_conv if not wkchain.is_finished_ok: self.report( f"{wkchain.process_label} failed with exit status {wkchain.exit_status}" ) return self.exit_codes.ERROR_SUB_PROCESS_FAILED_YAMBO_CONV # Find the converged kmesh converged_wkchain = get_yambo_converged_workchain(wkchain) nscf_wkchain = get_yambo_nscf(converged_wkchain) self.ctx.kpoints_gw_conv = nscf_wkchain.inputs.kpoints def should_run_setup_kmesh(self) -> bool: """Whether to run setup_kmesh.""" if "GW_mesh" in self.inputs: self.ctx.kpoints_gw_conv = self.inputs.GW_mesh return self.should_run_yambo_convergence() or self.should_run_wannier90_pp() def setup_kmesh(self) -> None: """Find commensurate kmeshes for both Yambo and Wannier90.""" kpoints_gw_conv = self.ctx.kpoints_gw_conv kpoints_w90_input = self.ctx.kpoints_w90_input kmesh_gw_conv = get_mesh_from_kpoints(kpoints_gw_conv) kmesh_w90_input = get_mesh_from_kpoints(kpoints_w90_input) if self.inputs.kpoints_force_gw: self.ctx.kpoints_gw = kpoints_gw_conv self.ctx.kpoints_w90 = get_explicit_kpoints(kpoints_gw_conv) self.report( f"Converged GW kmesh = {kmesh_gw_conv}, W90 input kmesh = {kmesh_w90_input}. " f"Force W90 using GW kmesh = {kmesh_gw_conv}." ) return result = find_commensurate_meshes( # pylint: disable=unexpected-keyword-arg dense_mesh=kpoints_gw_conv, coarse_mesh=kpoints_w90_input, metadata={"call_link_label": "find_commensurate_meshes"}, ) kpoints_dense = result["dense_mesh"] kpoints_coarse = result["coarse_mesh"] kmesh_dense = get_mesh_from_kpoints(kpoints_dense) kmesh_coarse = get_mesh_from_kpoints(kpoints_coarse) self.report( f"Converged GW kmesh = {kmesh_gw_conv}, W90 input kmesh = {kmesh_w90_input}. " f"Found commensurate meshes GW = {kmesh_dense}, W90 = {kmesh_coarse}." ) # Use theses meshes before submitting the corresponding workflow if np.allclose(kmesh_coarse, kmesh_w90_input): self.ctx.kpoints_w90 = kpoints_w90_input else: self.ctx.kpoints_w90 = get_explicit_kpoints(kpoints_coarse) if np.allclose(kmesh_dense, kmesh_gw_conv): self.ctx.kpoints_gw = kpoints_gw_conv else: self.ctx.kpoints_gw = kpoints_dense def should_run_yambo_commensurate(self) -> bool: """Whether to run again yambo on the commensurate kmesh.""" if "GW_mesh" in self.inputs and not 'parent_folder' in self.inputs["yambo_qp"]: return True if not self.should_run_yambo_convergence(): return False if self.ctx.kpoints_gw_conv != self.ctx.kpoints_gw: return True return False def prepare_yambo_commensurate_inputs(self) -> AttributeDict: """Prepare inputs for yambo commensurate.""" # Get and reuse the converged input from YamboWorkflow # pylint: disable=protected-access inputs = AttributeDict(self.exposed_inputs(YamboWorkflow, namespace="yambo_qp")) if "QP_subset_dict" in inputs: del inputs.QP_subset_dict inputs.scf.pw.structure = self.ctx.current_structure inputs.nscf.pw.structure = self.ctx.current_structure if self.should_run_yambo_convergence(): converged_wkchain = get_yambo_converged_workchain(self.ctx.wkchain_yambo_conv) inputs.yres.yambo.parameters = converged_wkchain.inputs._construct_attribute_dict(True) # Use commensurate mesh inputs.nscf.kpoints = self.ctx.kpoints_gw # Set parallelization, mpi_procs, npool, ... # `inputs.yambo_qp` always exists, but `inputs.yambo` might be empty if "scf" in inputs and "scf" in self.inputs.yambo_qp: inputs.scf.pw.metadata = self.inputs.yambo_qp.scf.pw.metadata if "parallelization" in self.inputs.yambo_qp.scf.pw: inputs.scf.pw.parallelization = ( self.inputs.yambo_qp.scf.pw.parallelization ) if "pw" in inputs.nscf and "pw" in self.inputs.yambo_qp.nscf: inputs.nscf.pw.metadata = self.inputs.yambo_qp.nscf.pw.metadata if "parallelization" in self.inputs.yambo_qp.nscf.pw: inputs.nscf.pw.parallelization = ( self.inputs.yambo_qp.nscf.pw.parallelization ) return inputs def run_yambo_commensurate(self) -> ty.Dict: """Run the `YamboWorkflow`.""" inputs = self.prepare_yambo_commensurate_inputs() inputs.metadata.call_link_label = "yambo_commensurate" inputs = prepare_process_inputs(YamboWorkflow, inputs) running = self.submit(YamboWorkflow, **inputs) self.report( f"launching {running.process_label}<{running.pk}> for yambo_commensurate" ) return ToContext(wkchain_yambo_commensurate=running) def inspect_yambo_commensurate( # pylint: disable=inconsistent-return-statements self, ) -> ty.Union[None, ExitCode]: """Verify that the `YamboWorkflow` successfully finished.""" wkchain = self.ctx.wkchain_yambo_commensurate if not wkchain.is_finished_ok: self.report( f"{wkchain.process_label} failed with exit status {wkchain.exit_status}" ) return self.exit_codes.ERROR_SUB_PROCESS_FAILED_YAMBO_COMMENSURATE def should_run_yambo_qp(self) -> bool: """Whether to run yambo_qp.""" if "yambo_qp" in self.inputs: return True return False def prepare_yambo_qp_inputs(self) -> AttributeDict: """Prepare inputs for yambo QP.""" # pylint: disable=too-many-locals # Get the converged input from YamboWorkflow inputs = AttributeDict(self.exposed_inputs(YamboWorkflow, namespace="yambo_qp")) yambo_params = inputs.yres.yambo.parameters.get_dict() # Prepare QPkrange if self.should_run_wannier90(): # w90_calc_inputs = self.ctx.wkchain_wannier90.inputs.wannier90.wannier90 w90_calc_inputs = self.inputs.wannier90.wannier90.wannier90 else: w90_calc_inputs = self.inputs.wannier90_qp.wannier90 w90_params = w90_calc_inputs.parameters.get_dict() num_bands = w90_params["num_bands"] exclude_bands = w90_params.get("exclude_bands", [0]) start_band = max(exclude_bands) + 1 end_band = start_band + num_bands - 1 if self.should_run_yambo_commensurate(): parent_wkchain = self.ctx.wkchain_yambo_commensurate yambo_params = parent_wkchain.inputs.yres.yambo.parameters.get_dict() else: if self.should_run_yambo_convergence(): parent_wkchain = get_yambo_converged_workchain( self.ctx.wkchain_yambo_conv ) yambo_params = parent_wkchain.inputs.yres.yambo.parameters.get_dict() else: # Assume the inputs.parent_folder is generated inside a YamboWorkflow parent_folder = inputs.parent_folder # The creator is a YamboCalculation, caller is a YamboRestart parent_wkchain = parent_folder.creator.caller # Assume its caller is a YamboWorkflow parent_wkchain = parent_wkchain.caller # Reuse converged inputs? Better keep the user provided inputs # inputs = parent_wkchain.inputs._construct_attribute_dict(True) nscf_wkchain = get_yambo_nscf(parent_wkchain) gw_kpoints = ( get_output_explicit_kpoints( # pylint: disable=unexpected-keyword-arg retrieved=nscf_wkchain.outputs.retrieved, metadata={"call_link_label": "get_output_explicit_kpoints"}, ) ) qpkrange = kmapper( # pylint: disable=unexpected-keyword-arg dense_mesh=gw_kpoints, coarse_mesh=self.ctx.kpoints_w90, start_band=orm.Int(start_band), end_band=orm.Int(end_band), metadata={"call_link_label": "kmapper"}, ) qpkrange = qpkrange.get_list() # Set QPkrange in GW parameters # yambo_params["variables"]["QPkrange"] = [qpkrange, ""] # To be set from input if not hasattr(inputs, "QP_subset_dict"): inputs.QP_subset_dict = orm.Dict( dict={ "qp_per_subset": 50, "parallel_runs": 4, "explicit": qpkrange, } ) else: QP_subset_dict = inputs.QP_subset_dict.get_dict() QP_subset_dict["explicit"] = qpkrange inputs.QP_subset_dict = orm.Dict(dict=QP_subset_dict) inputs.scf.pw.structure = self.ctx.current_structure inputs.nscf.pw.structure = self.ctx.current_structure inputs.yres.yambo.parameters = orm.Dict(dict=yambo_params) inputs.parent_folder = parent_wkchain.outputs.remote_folder # Use converged output folder settings: dict = inputs.yres.yambo.settings.get_dict() # TODO is this correct? settings.update({"INITIALISE": False, "COPY_SAVE": True, "COPY_DBS": True}) inputs.yres.yambo.settings = orm.Dict(dict=settings) return inputs def run_yambo_qp(self) -> ty.Dict: """Run the `YamboRestart` for QP.""" inputs = self.prepare_yambo_qp_inputs() inputs.metadata.call_link_label = "yambo_qp" inputs = prepare_process_inputs(YamboWorkflow, inputs) running = self.submit(YamboWorkflow, **inputs) self.report(f"launching {running.process_label}<{running.pk}> for yambo_qp") return ToContext(wkchain_yambo_qp=running) def inspect_yambo_qp( # pylint: disable=inconsistent-return-statements self, ) -> ty.Union[None, ExitCode]: """Verify that the `YamboWorkflow` successfully finished.""" wkchain = self.ctx.wkchain_yambo_qp if not wkchain.is_finished_ok: self.report( f"{wkchain.process_label} failed with exit status {wkchain.exit_status}" ) return self.exit_codes.ERROR_SUB_PROCESS_FAILED_YAMBO_QP def should_run_ypp_qp(self) -> bool: """Whether to run ypp_QP.""" if "ypp_QP" in self.inputs: if "parent_folder" in self.inputs.ypp_QP: self.ctx.wkchain_yambo_qp = ( self.inputs.ypp_QP.outputs.remote_folder.creator.caller.caller ) QP_list = ( self.ctx.wkchain_yambo_qp.outputs.splitted_QP_calculations.get_list() ) if len(QP_list) > 1: return True return False def prepare_ypp_inputs_qp(self) -> AttributeDict: """Prepare inputs for ypp.""" inputs = AttributeDict(self.exposed_inputs(YppRestart, namespace="ypp_QP")) inputs.ypp.QP_calculations = ( self.ctx.wkchain_yambo_qp.outputs.splitted_QP_calculations ) inputs.parent_folder = self.ctx.wkchain_yambo_qp.called[0].inputs.parent_folder return inputs def run_ypp_qp(self) -> ty.Dict: """Run the ``YppRestart``.""" inputs = self.prepare_ypp_inputs_qp() inputs.metadata.call_link_label = "ypp_QP" inputs = prepare_process_inputs(YppRestart, inputs) running = self.submit(YppRestart, **inputs) self.report(f"launching {running.process_label}<{running.pk}>") return ToContext(wkchain_ypp_QP=running) def inspect_ypp_qp( # pylint: disable=inconsistent-return-statements self, ) -> ty.Union[None, ExitCode]: """Verify that the ``YppRestart`` successfully finished.""" wkchain = self.ctx.wkchain_ypp_QP if not wkchain.is_finished_ok: self.report( f"{wkchain.process_label} failed with exit status {wkchain.exit_status}" ) return self.exit_codes.ERROR_SUB_PROCESS_FAILED_YPP def should_run_wannier90_pp(self) -> bool: """Whether to run wannier.""" if self.should_run_ypp() and "nnkp_file" not in self.inputs.ypp.ypp: return True return False def prepare_wannier90_pp_inputs(self) -> AttributeDict: """Prepare inputs for wannier90_pp, only for generating nnkp file.""" inputs = AttributeDict( self.exposed_inputs(Wannier90OptimizeWorkChain, namespace="wannier90") )["wannier90"] inputs.wannier90.structure = self.ctx.current_structure inputs.wannier90.bands_kpoints = self.ctx.current_bands_kpoints # Use commensurate kmesh if self.ctx.kpoints_w90_input != self.ctx.kpoints_w90: set_kpoints( inputs, self.ctx.kpoints_w90, process_class=Wannier90BaseWorkChain ) # Only for nnkp, no BandsData for shifting windows inputs.shift_energy_windows = False # Add `postproc_setup` if "settings" in inputs.wannier90: settings = inputs.wannier90["settings"].get_dict() else: settings = {} settings["postproc_setup"] = True inputs.wannier90["settings"] = settings return inputs def run_wannier90_pp(self) -> ty.Dict: """Run the `Wannier90BaseWorkChain` for postproc.""" inputs = self.prepare_wannier90_pp_inputs() inputs.metadata.call_link_label = "wannier90_pp" inputs = prepare_process_inputs(Wannier90BaseWorkChain, inputs) running = self.submit(Wannier90BaseWorkChain, **inputs) self.report(f"launching {running.process_label}<{running.pk}> for postproc") return ToContext(wkchain_wannier90_pp=running) def inspect_wannier90_pp( # pylint: disable=inconsistent-return-statements self, ) -> ty.Union[None, ExitCode]: """Verify that the `Wannier90BaseWorkChain` successfully finished.""" wkchain = self.ctx.wkchain_wannier90_pp if not wkchain.is_finished_ok: self.report( f"{wkchain.process_label} failed with exit status {wkchain.exit_status}" ) return self.exit_codes.ERROR_SUB_PROCESS_FAILED_WANNIER90_PP def should_run_ypp(self) -> bool: """Whether to run ypp.""" if "ypp" in self.inputs: return True return False def prepare_ypp_inputs(self) -> AttributeDict: """Prepare inputs for ypp.""" inputs = AttributeDict(self.exposed_inputs(YppRestart, namespace="ypp")) # if self.should_run_ypp_qp(): # ypp_wkchain = self.ctx.wkchain_ypp_QP # # Working if merge is not needed # inputs.ypp.QP_DB = ypp_wkchain.outputs.QP_DB # inputs.parent_folder = ypp_wkchain.outputs.remote_folder if self.should_run_yambo_qp(): yambo_wkchain = self.ctx.wkchain_yambo_qp # Working if merge is not needed if "merged_QP" in yambo_wkchain.outputs: inputs.ypp.QP_DB = yambo_wkchain.outputs.merged_QP else: inputs.ypp.QP_DB = yambo_wkchain.outputs.QP_DB inputs.parent_folder = self.ctx.wkchain_yambo_qp.called[ 0 ].inputs.parent_folder if self.should_run_wannier90_pp(): inputs.ypp.nnkp_file = self.ctx.wkchain_wannier90_pp.outputs.nnkp_file return inputs def run_ypp(self) -> ty.Dict: """Run the ``YppRestart``.""" inputs = self.prepare_ypp_inputs() inputs.metadata.call_link_label = "ypp" inputs = prepare_process_inputs(YppRestart, inputs) running = self.submit(YppRestart, **inputs) self.report(f"launching {running.process_label}<{running.pk}>") return ToContext(wkchain_ypp=running) def inspect_ypp( # pylint: disable=inconsistent-return-statements self, ) -> ty.Union[None, ExitCode]: """Verify that the ``YppRestart`` successfully finished.""" wkchain = self.ctx.wkchain_ypp if not wkchain.is_finished_ok: self.report( f"{wkchain.process_label} failed with exit status {wkchain.exit_status}" ) return self.exit_codes.ERROR_SUB_PROCESS_FAILED_YPP def should_run_wannier90(self) -> bool: """Whether to run wannier.""" if "wannier90" in self.inputs: return True return False def prepare_wannier90_inputs(self) -> AttributeDict: """Prepare inputs for wannier90.""" inputs = AttributeDict( self.exposed_inputs(Wannier90OptimizeWorkChain, namespace="wannier90") ) inputs.structure = self.ctx.current_structure inputs.bands_kpoints = self.ctx.current_bands_kpoints # Use commensurate kmesh if self.ctx.kpoints_w90_input != self.ctx.kpoints_w90: set_kpoints( inputs, self.ctx.kpoints_w90, process_class=Wannier90OptimizeWorkChain ) return inputs def run_wannier90(self) -> ty.Dict: """Run the `Wannier90BandsWorkChain`.""" inputs = self.prepare_wannier90_inputs() inputs.metadata.call_link_label = "wannier90" inputs = prepare_process_inputs(Wannier90OptimizeWorkChain, inputs) running = self.submit(Wannier90OptimizeWorkChain, **inputs) self.report(f"launching {running.process_label}<{running.pk}>") return ToContext(wkchain_wannier90=running) def inspect_wannier90( # pylint: disable=inconsistent-return-statements self, ) -> ty.Union[None, ExitCode]: """Verify that the `Wannier90BandsWorkChain` successfully finished.""" wkchain = self.ctx.wkchain_wannier90 if not wkchain.is_finished_ok: self.report( f"{wkchain.process_label} failed with exit status {wkchain.exit_status}" ) return self.exit_codes.ERROR_SUB_PROCESS_FAILED_WANNIER90 def should_run_gw2wannier90(self) -> bool: """Whether to run gw2wannier90.""" if "gw2wannier90" in self.inputs: return True return False def prepare_gw2wannier90_inputs(self) -> AttributeDict: """Prepare inputs for gw2wannier90.""" inputs = AttributeDict( self.exposed_inputs(Gw2wannier90Calculation, namespace="gw2wannier90") ) if self.should_run_wannier90(): w90_wkchain = self.ctx.wkchain_wannier90 inputs.nnkp = w90_wkchain.outputs.wannier90_pp.nnkp_file inputs.parent_folder = w90_wkchain.outputs.wannier90.remote_folder if self.should_run_ypp(): inputs.unsorted_eig = self.ctx.wkchain_ypp.outputs.unsorted_eig_file return inputs def run_gw2wannier90(self) -> ty.Dict: """Run the ``gw2wannier90``.""" inputs = self.prepare_gw2wannier90_inputs() inputs.metadata.call_link_label = "gw2wannier90" inputs = prepare_process_inputs(Gw2wannier90Calculation, inputs) running = self.submit(Gw2wannier90Calculation, **inputs) self.report(f"launching {running.process_label}<{running.pk}>") return ToContext(calc_gw2wannier90=running) def inspect_gw2wannier90( # pylint: disable=inconsistent-return-statements self, ) -> ty.Union[None, ExitCode]: """Verify that the `Gw2wannier90Calculation` successfully finished.""" calc = self.ctx.calc_gw2wannier90 if not calc.is_finished_ok: self.report( f"{calc.process_label} failed with exit status {calc.exit_status}" ) return self.exit_codes.ERROR_SUB_PROCESS_FAILED_GW2WANNIER90 def prepare_wannier90_qp_inputs(self) -> AttributeDict: """Prepare inputs for gw2wannier90.""" inputs = AttributeDict( self.exposed_inputs(Wannier90BaseWorkChain, namespace="wannier90_qp") ) inputs.wannier90.structure = self.ctx.current_structure inputs.wannier90.bands_kpoints = self.ctx.current_bands_kpoints if self.ctx.kpoints_w90_input != self.ctx.kpoints_w90: set_kpoints( inputs, self.ctx.kpoints_w90, process_class=Wannier90BaseWorkChain ) params = inputs.wannier90.parameters.get_dict() params["bands_plot"] = True if self.should_run_wannier90(): w90calc = self.ctx.wkchain_wannier90.outputs.wannier90.remote_folder.creator w90calc_params = w90calc.inputs.parameters.get_dict() fermi_energy = w90calc_params["fermi_energy"] params["fermi_energy"] = fermi_energy # TODO I should just restart w/o wannierisation # I reuse parameters from previous calculation, overwriting the user inputs if inputs.shift_energy_windows: keys = ("dis_froz_min", "dis_froz_max", "dis_win_min", "dis_win_max") for key in keys: if key in w90calc_params: params[key] = w90calc_params[key] inputs.shift_energy_windows = False if self.inputs.gw2wannier90.sort_mode in [ Gw2wannier90SortMode.DEFAULT_AND_CHK, Gw2wannier90SortMode.NO_SORT, ]: params["restart"] = "plot" inputs.wannier90.parameters = orm.Dict(dict=params) if self.should_run_gw2wannier90(): inputs.wannier90.remote_input_folder = ( self.ctx.calc_gw2wannier90.outputs.remote_folder ) return inputs def run_wannier90_qp(self) -> ty.Dict: """Run the `wannier90 qp`.""" inputs = self.prepare_wannier90_qp_inputs() inputs.metadata.call_link_label = "wannier90_qp" inputs = prepare_process_inputs(Wannier90BaseWorkChain, inputs) running = self.submit(Wannier90BaseWorkChain, **inputs) self.report(f"launching {running.process_label}<{running.pk}> for wannier90_qp") return ToContext(wkchain_wannier90_qp=running) def inspect_wannier90_qp( # pylint: disable=inconsistent-return-statements self, ) -> ty.Union[None, ExitCode]: """Verify that the `Wannier90BaseWorkChain` successfully finished.""" wkchain = self.ctx.wkchain_wannier90_qp if not wkchain.is_finished_ok: self.report( f"{wkchain.process_label} failed with exit status {wkchain.exit_status}" ) return self.exit_codes.ERROR_SUB_PROCESS_FAILED_WANNIER90_QP def results(self) -> None: """Attach the relevant output nodes.""" if "wkchain_yambo_conv" in self.ctx: self.out_many( self.exposed_outputs( self.ctx.wkchain_yambo_conv, YamboConvergence, namespace="yambo", ) ) if "wkchain_yambo_commensurate" in self.ctx: self.out_many( self.exposed_outputs( self.ctx.wkchain_yambo_commensurate, YamboWorkflow, namespace="yambo_commensurate", ) ) if "wkchain_yambo_qp" in self.ctx: self.out_many( self.exposed_outputs( self.ctx.wkchain_yambo_qp, YamboRestart, namespace="yambo_qp", ) ) if "wkchain_wannier90_pp" in self.ctx: self.out_many( self.exposed_outputs( self.ctx.wkchain_wannier90_pp, Wannier90BaseWorkChain, namespace="wannier90_pp", ) ) if "wkchain_ypp" in self.ctx: self.out_many( self.exposed_outputs( self.ctx.wkchain_ypp, YppRestart, namespace="ypp", ) ) if "wkchain_wannier90" in self.ctx: self.out_many( self.exposed_outputs( self.ctx.wkchain_wannier90, Wannier90OptimizeWorkChain, namespace="wannier90", ) ) if "calc_gw2wannier90" in self.ctx: self.out_many( self.exposed_outputs( self.ctx.calc_gw2wannier90, Gw2wannier90Calculation, namespace="gw2wannier90", ) ) self.out_many( self.exposed_outputs( self.ctx.wkchain_wannier90_qp, Wannier90BaseWorkChain, namespace="wannier90_qp", ) ) if self.should_run_wannier90(): bands_w90 = self.outputs["wannier90"]["band_structure"] self.out("band_structures.wannier90", bands_w90) bands_w90qp = self.outputs["wannier90_qp"]["interpolated_bands"] self.out("band_structures.wannier90_qp", bands_w90qp) self.report(f"{self.get_name()} successfully completed") def on_terminated(self): """Clean the working directories of all child calculations if `clean_workdir=True` in the inputs.""" super().on_terminated() if not self.inputs.clean_workdir: self.report("remote folders will not be cleaned") return cleaned_calcs = [] for called_descendant in self.node.called_descendants: if isinstance(called_descendant, orm.CalcJobNode): try: called_descendant.outputs.remote_folder._clean() # pylint: disable=protected-access cleaned_calcs.append(called_descendant.pk) except (OSError, KeyError): pass if cleaned_calcs: self.report( f"cleaned remote folders of calculations: {' '.join(map(str, cleaned_calcs))}" )
PypiClean
/meta-edc-demo-0.0.3.tar.gz/meta-edc-demo-0.0.3/meta_prn/migrations/0034_auto_20220630_1110.py
from django.core.exceptions import ObjectDoesNotExist from django.db import migrations from django.db.models.signals import post_save, pre_save from edc_constants.constants import HIGH from edc_protocol_incident.constants import PROTOCOL_INCIDENT_ACTION from edc_utils import DisableSignals from meta_prn.constants import PROTOCOL_DEVIATION_VIOLATION_ACTION def update_for_protocol_incident(apps, schema_editor): action_item_model_cls = apps.get_model("edc_action_item.actionitem") action_type_model_cls = apps.get_model("edc_action_item.actiontype") crf_metadata_model_cls = apps.get_model("edc_metadata.crfmetadata") with DisableSignals(disabled_signals=[pre_save, post_save]): # update crf metadata if there is any crf_metadata_model_cls.objects.filter( model="meta_prn.protocoldeviationviolation" ).update(model="meta_prn.protocolincident") # update action type try: action_type = action_type_model_cls.objects.get(name=PROTOCOL_INCIDENT_ACTION) except ObjectDoesNotExist: action_type = action_type_model_cls.objects.create( name=PROTOCOL_INCIDENT_ACTION, display_name=PROTOCOL_INCIDENT_ACTION, reference_model="meta_prn.protocolincident", priority=HIGH, show_on_dashboard=True, show_link_to_changelist=True, ) action_item_model_cls.objects.filter( action_type__name=PROTOCOL_DEVIATION_VIOLATION_ACTION ).update(action_type=action_type) # update crf metadata if there is any action_item_model_cls.objects.filter( reference_model="meta_prn.protocoldeviationviolation" ).update(reference_model="meta_prn.protocolincident") class Migration(migrations.Migration): dependencies = [ ("meta_prn", "0033_remove_historicalegfrnotification_action_item_and_more"), ] operations = [migrations.RunPython(update_for_protocol_incident)]
PypiClean
/charming-1.0.0a2.tar.gz/charming-1.0.0a2/examples/snake.py
import charming as cm from random import choice poetry = "I have eaten the plums that were in the icebox and which you were probably saving for breakfast Forgive me they were delicious so sweet and so cold" directions = [cm.LEFT, cm.RIGHT, cm.UP, cm.DOWN] snake = [] index = 0 direction = directions[0] food = None game_over = False @cm.setup def setup(): cm.full_screen() cm.no_cursor() cm.color_mode(cm.HSB) init_game() @cm.draw def draw(): cm.background(' ') if cm.get_frame_count() % 4 == 0: update_snake() collision_detection() draw_snake() draw_food() @cm.cursor_moved def cursor_moved(): global direction direction = cm.get_key_code() @cm.key_typed def key_typed(): if game_over: init_game() def init_game(): global snake, index, direction, game_over game_over = False index = 0 direction = choice(directions) x0, y0 = int(cm.get_width() / 2), int(cm.get_height() / 2) snake = [[x0, y0, 0]] generate_food() cm.set_cursor(x0, y0) cm.loop() def draw_snake(): for x, y, i in snake: h = cm.map(i, 0, len(poetry), 0, 360) cm.stroke(poetry[i], bg=(h, 100, 100)) cm.point(x, y) def draw_food(): h = cm.map(food[2], 0, len(poetry), 0, 360) cm.stroke(poetry[food[2]], bg=(h, 100, 100)) cm.point(food[0], food[1]) def update_snake(): global game_over for i in range(len(snake) - 1): p1, p2 = snake[i], snake[i + 1] p1[0], p1[1] = p2[0], p2[1] head = snake[-1] x_move = 0 if direction == cm.UP or direction == cm.DOWN else 1 y_move = 0 if direction == cm.LEFT or direction == cm.RIGHT else 1 x_d = 1 if direction == cm.RIGHT else -1 y_d = 1 if direction == cm.DOWN else -1 next_x = (head[0] + x_move * x_d) % cm.get_width() next_y = (head[1] + y_move * y_d) % cm.get_height() if in_snake(next_x, next_y): cm.no_loop() game_over = True else: head[0] = next_x head[1] = next_y def in_snake(x, y): for p in snake: if p[0] == x and p[1] == y: return True return False def collision_detection(): global snake head = snake[-1] if head[0] == food[0] and head[1] == food[1]: snake.append([food[0], food[1], index]) generate_food() def generate_food(): global food, index, game_over if index == len(poetry) - 1: game_over = True cm.no_loop() else: index = index + 1 while True: x = int(cm.random(cm.get_width())) y = int(cm.random(cm.get_height())) if not in_snake(x, y): break food = [x, y, index] if __name__ == "__main__": cm.run()
PypiClean
/ansutr_transliteration-1.1.3-py3-none-any.whl/ansutr/transliteration/transformer/indic2en.py
import os from collections.abc import Iterable import logging logging.basicConfig(level=logging.WARNING) from .base_engine import BaseEngineTransformer, LANG_LIST_FILE F_DIR = os.path.dirname(os.path.realpath(__file__)) MODEL_DOWNLOAD_URL = 'https://github.com/AI4Bharat/IndicXlit/releases/download/v1.0/indicxlit-indic-en-v1.0.zip' DICTS_DOWNLOAD_URL = 'https://github.com/AI4Bharat/IndicXlit/releases/download/v1.0/word_prob_dicts_en.zip' XLIT_VERSION = "v1.0" # If model/dict is changed on the storage, do not forget to change this variable in-order to force-download new assets def is_folder_writable(folder): try: os.makedirs(folder, exist_ok=True) tmp_file = os.path.join(folder, '.write_test') with open(tmp_file, 'w') as f: f.write('Permission Check') os.remove(tmp_file) return True except: return False def is_directory_writable(path): if os.name == 'nt': return is_folder_writable(path) return os.access(path, os.W_OK | os.X_OK) class XlitEngineTransformer_Indic2En(BaseEngineTransformer): """ For Managing the top level tasks and applications of transliteration TODO: Ability to pass `beam_width` dynamically """ def __init__(self, beam_width=4, rescore=True): if is_directory_writable(F_DIR): models_path = os.path.join(F_DIR, 'models') else: user_home = os.path.expanduser("~") models_path = os.path.join(user_home, '.AI4Bharat_Xlit_Models') models_path = os.path.join(models_path, "indic2en", XLIT_VERSION) os.makedirs(models_path, exist_ok=True) lang_list_file = os.path.join(models_path, LANG_LIST_FILE) _all_supported_langs = open(lang_list_file).read().strip().split('\n') self._all_supported_langs = set(_all_supported_langs) if "en" in self._all_supported_langs: self._all_supported_langs.remove("en") self._tgt_langs = set(["en"]) model_file_path = self.download_models(models_path, MODEL_DOWNLOAD_URL) if rescore: dicts_folder = self.download_dicts(models_path, DICTS_DOWNLOAD_URL) else: dicts_folder = None super().__init__(models_path, beam_width=beam_width, rescore=rescore) @property def all_supported_langs(self): return self._all_supported_langs @property def tgt_langs(self): return self._tgt_langs def translit_word(self, word, lang_code, topk=4): if lang_code not in self.all_supported_langs: raise NotImplementedError(f"Language: `{lang_code}` not yet supported") return self._transliterate_word(word, src_lang=lang_code, tgt_lang='en', topk=topk) def translit_sentence(self, indic_sentence, lang_code): if lang_code not in self.all_supported_langs: raise NotImplementedError(f"Language: `{lang_code}` not yet supported") return self._transliterate_sentence(indic_sentence, src_lang=lang_code, tgt_lang='en')
PypiClean
/music-chart-api-0.1.3.tar.gz/music-chart-api-0.1.3/muse/genie.py
from bs4 import BeautifulSoup as Soup from muse.util import HeadlessChrome from datetime import datetime from selenium.webdriver.common.by import By from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.support.ui import WebDriverWait import time """ Module for genie music chart API Attribute: SITE_URL: Path for genie web site REAL_TIME_CHART: Path for genie real time chart page """ SITE_URL = "https://www.genie.co.kr" REAL_TIME_CHART = "{0}/chart/top200".format(SITE_URL) def get_real_time_chart_songs(pages=2): """ Get top 50 x n songs from genie real time chart Args: pages(int): page counts how many read songs (50 x n songs) Return: list: Top 50 x n songs from genie real time chart """ songs = [] with HeadlessChrome() as chrome: # get current time now = datetime.now() for page in range(pages): # Move into genie real time chart page # we need query string ymd=(year)(month)(date), hh=(hour), pg=(page) chrome.get('{0}?ditc=D&ymd={1}{2:02d}{3:02d}&hh={4:02d}&rtm=Y&pg={5}'.format( REAL_TIME_CHART, now.year, now.month, now.day, now.hour, page + 1 )) wait = WebDriverWait(chrome, 10) wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, 'tr.list'))) time.sleep(0.5) soup = Soup(chrome.page_source, 'html.parser') for row in soup.select('tr.list'): # remove suffix from rank text rank_text = row.select('td.number')[0] rank_text.select('span.rank')[0].extract() song = { 'rank': rank_text.get_text().strip(), 'title': row.select('a.title')[0].get_text().strip(), 'artist': row.select('a.artist')[0].get_text().strip(), 'album': row.select('a.albumtitle')[0].get_text().strip() } songs.append(song) return songs
PypiClean
/panddas-0.2.14.tar.gz/panddas-0.2.14/lib-python/bamboo/rdkit_utils/contacts/utils.py
from rdkit import Chem from bamboo.constants import WATER_NAMES from bamboo.rdkit_utils.mol import check_pdb_readable def order_structures_by_minimum_distance_to_reference(refpdb, pdbs, mincutoff=1.5, maxcutoff=6): """Return the molecules have at least one non-H atom within a certain distance of the reference structure""" # Deal with cases where mincutoff, maxcutoff are not specified if not mincutoff: mincutoff = 0 if not maxcutoff: maxcutoff = 9999999 pdbs_to_return = [] refmol = check_pdb_readable(refpdb) for pdbfile in pdbs: pdbmol = check_pdb_readable(pdbfile) min_dist = calculate_minimum_distance_between_mols(refmol, pdbmol) # Reject if there is a clash if min_dist >= mincutoff and min_dist <= maxcutoff: pdbs_to_return.append((min_dist,pdbfile)) # Order by minimum distance sorted_files = sorted(pdbs_to_return, key=lambda tup: tup[0]) return [t[1] for t in sorted_files] def order_structures_by_number_of_contacts_to_reference(refpdb, pdbs, cutoff=3.5): """Calculate the number of contacts between `pdb` and refpdb, and return a list of decreasing contacts""" refmol = check_pdb_readable(refpdb) contacts_list = [] for pdbfile in pdbs: pdbmol = check_pdb_readable(pdbfile) # Add the number of contacts and the pdbfile to the list contacts_list.append((calculate_number_of_pairwise_distances_within_cutoff(refmol, pdbmol, cutoff=cutoff), pdbfile)) return sorted(contacts_list, key=lambda tup: tup[0], reverse=True) def calculate_minimum_distance_between_mols(mol1, mol2): """Takes two molecules and returns the minimum distance between the two structures""" distances = calculate_pairwise_distances_between_mols(mol1, mol2) if not distances: return None else: return min(distances) def calculate_number_of_pairwise_distances_within_cutoff(mol1, mol2, cutoff=3.5): """Count number of times a distance less than `cutoff` is observed between an atom of mol1 and an atom of mol2""" distances = calculate_pairwise_distances_between_mols(mol1, mol2) if not distances: return None within_cutoff = [(d<cutoff) for d in distances] counts = within_cutoff.count(True) return counts def calculate_pairwise_distances_between_mols(mol1, mol2): """Calculates pairwise distances between atoms in mol1 and mol2""" # TODO Change this so that it runs through all of the conformations... conf1 = mol1.GetConformer() conf2 = mol2.GetConformer() # Get the indexes of the atoms that aren't water mol1_idxs = [a.GetIdx() for a in mol1.GetAtoms() if a.GetMonomerInfo().GetResidueName() not in WATER_NAMES] mol2_idxs = [a.GetIdx() for a in mol2.GetAtoms() if a.GetMonomerInfo().GetResidueName() not in WATER_NAMES] # Iterate through all pairs of atoms and calculate pairwise distances distances = [conf1.GetAtomPosition(i1).Distance(conf2.GetAtomPosition(i2)) for i2 in mol2_idxs for i1 in mol1_idxs] return distances
PypiClean
/dsin100daysv30-6.0.1.tar.gz/dsin100daysv30-6.0.1/notebook/static/components/MathJax/extensions/MathMenu.js
(function(f,o,q,e,r){var p="2.7.5";var d=MathJax.Callback.Signal("menu");MathJax.Extension.MathMenu={version:p,signal:d};var t=function(u){return MathJax.Localization._.apply(MathJax.Localization,[["MathMenu",u]].concat([].slice.call(arguments,1)))};var i=MathJax.Object.isArray;var a=f.Browser.isPC,l=f.Browser.isMSIE,m=((document.documentMode||0)>8);var j=(a?null:"5px");var s=f.CombineConfig("MathMenu",{delay:150,showRenderer:true,showMathPlayer:true,showFontMenu:false,showContext:false,showDiscoverable:false,showLocale:true,showLocaleURL:false,semanticsAnnotations:{TeX:["TeX","LaTeX","application/x-tex"],StarMath:["StarMath 5.0"],Maple:["Maple"],ContentMathML:["MathML-Content","application/mathml-content+xml"],OpenMath:["OpenMath"]},windowSettings:{status:"no",toolbar:"no",locationbar:"no",menubar:"no",directories:"no",personalbar:"no",resizable:"yes",scrollbars:"yes",width:400,height:300,left:Math.round((screen.width-400)/2),top:Math.round((screen.height-300)/3)},styles:{"#MathJax_About":{position:"fixed",left:"50%",width:"auto","text-align":"center",border:"3px outset",padding:"1em 2em","background-color":"#DDDDDD",color:"black",cursor:"default","font-family":"message-box","font-size":"120%","font-style":"normal","text-indent":0,"text-transform":"none","line-height":"normal","letter-spacing":"normal","word-spacing":"normal","word-wrap":"normal","white-space":"nowrap","float":"none","z-index":201,"border-radius":"15px","-webkit-border-radius":"15px","-moz-border-radius":"15px","-khtml-border-radius":"15px","box-shadow":"0px 10px 20px #808080","-webkit-box-shadow":"0px 10px 20px #808080","-moz-box-shadow":"0px 10px 20px #808080","-khtml-box-shadow":"0px 10px 20px #808080",filter:"progid:DXImageTransform.Microsoft.dropshadow(OffX=2, OffY=2, Color='gray', Positive='true')"},"#MathJax_About.MathJax_MousePost":{outline:"none"},".MathJax_Menu":{position:"absolute","background-color":"white",color:"black",width:"auto",padding:(a?"2px":"5px 0px"),border:"1px solid #CCCCCC",margin:0,cursor:"default",font:"menu","text-align":"left","text-indent":0,"text-transform":"none","line-height":"normal","letter-spacing":"normal","word-spacing":"normal","word-wrap":"normal","white-space":"nowrap","float":"none","z-index":201,"border-radius":j,"-webkit-border-radius":j,"-moz-border-radius":j,"-khtml-border-radius":j,"box-shadow":"0px 10px 20px #808080","-webkit-box-shadow":"0px 10px 20px #808080","-moz-box-shadow":"0px 10px 20px #808080","-khtml-box-shadow":"0px 10px 20px #808080",filter:"progid:DXImageTransform.Microsoft.dropshadow(OffX=2, OffY=2, Color='gray', Positive='true')"},".MathJax_MenuItem":{padding:(a?"2px 2em":"1px 2em"),background:"transparent"},".MathJax_MenuArrow":{position:"absolute",right:".5em","padding-top":".25em",color:"#666666","font-family":(l?"'Arial unicode MS'":null),"font-size":".75em"},".MathJax_MenuActive .MathJax_MenuArrow":{color:"white"},".MathJax_MenuArrow.RTL":{left:".5em",right:"auto"},".MathJax_MenuCheck":{position:"absolute",left:".7em","font-family":(l?"'Arial unicode MS'":null)},".MathJax_MenuCheck.RTL":{right:".7em",left:"auto"},".MathJax_MenuRadioCheck":{position:"absolute",left:(a?"1em":".7em")},".MathJax_MenuRadioCheck.RTL":{right:(a?"1em":".7em"),left:"auto"},".MathJax_MenuLabel":{padding:(a?"2px 2em 4px 1.33em":"1px 2em 3px 1.33em"),"font-style":"italic"},".MathJax_MenuRule":{"border-top":(a?"1px solid #CCCCCC":"1px solid #DDDDDD"),margin:(a?"4px 1px 0px":"4px 3px")},".MathJax_MenuDisabled":{color:"GrayText"},".MathJax_MenuActive":{"background-color":(a?"Highlight":"#606872"),color:(a?"HighlightText":"white")},".MathJax_MenuDisabled:focus, .MathJax_MenuLabel:focus":{"background-color":"#E8E8E8"},".MathJax_ContextMenu:focus":{outline:"none"},".MathJax_ContextMenu .MathJax_MenuItem:focus":{outline:"none"},"#MathJax_AboutClose":{top:".2em",right:".2em"},".MathJax_Menu .MathJax_MenuClose":{top:"-10px",left:"-10px"},".MathJax_MenuClose":{position:"absolute",cursor:"pointer",display:"inline-block",border:"2px solid #AAA","border-radius":"18px","-webkit-border-radius":"18px","-moz-border-radius":"18px","-khtml-border-radius":"18px","font-family":"'Courier New',Courier","font-size":"24px",color:"#F0F0F0"},".MathJax_MenuClose span":{display:"block","background-color":"#AAA",border:"1.5px solid","border-radius":"18px","-webkit-border-radius":"18px","-moz-border-radius":"18px","-khtml-border-radius":"18px","line-height":0,padding:"8px 0 6px"},".MathJax_MenuClose:hover":{color:"white!important",border:"2px solid #CCC!important"},".MathJax_MenuClose:hover span":{"background-color":"#CCC!important"},".MathJax_MenuClose:hover:focus":{outline:"none"}}});var n,k,b;f.Register.StartupHook("MathEvents Ready",function(){n=MathJax.Extension.MathEvents.Event.False;k=MathJax.Extension.MathEvents.Hover;b=MathJax.Extension.MathEvents.Event.KEY});var h=MathJax.Object.Subclass({Keydown:function(u,v){switch(u.keyCode){case b.ESCAPE:this.Remove(u,v);break;case b.RIGHT:this.Right(u,v);break;case b.LEFT:this.Left(u,v);break;case b.UP:this.Up(u,v);break;case b.DOWN:this.Down(u,v);break;case b.RETURN:case b.SPACE:this.Space(u,v);break;default:return;break}return n(u)},Escape:function(u,v){},Right:function(u,v){},Left:function(u,v){},Up:function(u,v){},Down:function(u,v){},Space:function(u,v){}},{});var g=MathJax.Menu=h.Subclass({version:p,items:[],posted:false,title:null,margin:5,Init:function(u){this.items=[].slice.call(arguments,0)},With:function(u){if(u){f.Insert(this,u)}return this},Post:function(M,E,B){if(!M){M=window.event||{}}var I=document.getElementById("MathJax_MenuFrame");if(!I){I=g.Background(this);delete c.lastItem;delete c.lastMenu;delete g.skipUp;d.Post(["post",g.jax]);g.isRTL=(MathJax.Localization.fontDirection()==="rtl")}var v=o.Element("div",{onmouseup:g.Mouseup,ondblclick:n,ondragstart:n,onselectstart:n,oncontextmenu:n,menuItem:this,className:"MathJax_Menu",onkeydown:g.Keydown,role:"menu"});if(M.type==="contextmenu"||M.type==="mouseover"){v.className+=" MathJax_ContextMenu"}if(!B){MathJax.Localization.setCSS(v)}for(var N=0,K=this.items.length;N<K;N++){this.items[N].Create(v)}if(g.isMobile){o.addElement(v,"span",{className:"MathJax_MenuClose",menu:E,ontouchstart:g.Close,ontouchend:n,onmousedown:g.Close,onmouseup:n},[["span",{},"\u00D7"]])}I.appendChild(v);this.posted=true;if(v.offsetWidth){v.style.width=(v.offsetWidth+2)+"px"}var H=M.pageX,F=M.pageY;var u=document.body.getBoundingClientRect();var C=(window.getComputedStyle?window.getComputedStyle(document.body):{marginLeft:"0px"});var A=u.right-Math.min(0,u.left)+parseFloat(C.marginLeft);if(!H&&!F&&"clientX" in M){H=M.clientX+document.body.scrollLeft+document.documentElement.scrollLeft;F=M.clientY+document.body.scrollTop+document.documentElement.scrollTop}if(!E){var L=g.CurrentNode()||M.target;if((M.type==="keydown"||(!H&&!F))&&L){var P=window.pageXOffset||document.documentElement.scrollLeft;var O=window.pageYOffset||document.documentElement.scrollTop;var w=L.getBoundingClientRect();H=(w.right+w.left)/2+P;F=(w.bottom+w.top)/2+O}if(H+v.offsetWidth>A-this.margin){H=A-v.offsetWidth-this.margin}if(g.isMobile){H=Math.max(5,H-Math.floor(v.offsetWidth/2));F-=20}g.skipUp=M.isContextMenu}else{var z="left",J=E.offsetWidth;H=(g.isMobile?30:J-2);F=0;while(E&&E!==I){H+=E.offsetLeft;F+=E.offsetTop;E=E.parentNode}if(!g.isMobile){if((g.isRTL&&H-J-v.offsetWidth>this.margin)||(!g.isRTL&&H+v.offsetWidth>A-this.margin)){z="right";H=Math.max(this.margin,H-J-v.offsetWidth+6)}}if(!a){v.style["borderRadiusTop"+z]=0;v.style["WebkitBorderRadiusTop"+z]=0;v.style["MozBorderRadiusTop"+z]=0;v.style["KhtmlBorderRadiusTop"+z]=0}}v.style.left=H+"px";v.style.top=F+"px";if(document.selection&&document.selection.empty){document.selection.empty()}var G=window.pageXOffset||document.documentElement.scrollLeft;var D=window.pageYOffset||document.documentElement.scrollTop;g.Focus(v);if(M.type==="keydown"){g.skipMouseoverFromKey=true;setTimeout(function(){delete g.skipMouseoverFromKey},s.delay)}window.scrollTo(G,D);return n(M)},Remove:function(u,v){d.Post(["unpost",g.jax]);var w=document.getElementById("MathJax_MenuFrame");if(w){w.parentNode.removeChild(w);if(this.msieFixedPositionBug){detachEvent("onresize",g.Resize)}}if(g.jax.hover){delete g.jax.hover.nofade;k.UnHover(g.jax)}g.Unfocus(v);if(u.type==="mousedown"){g.CurrentNode().blur()}return n(u)},Find:function(u){return this.FindN(1,u,[].slice.call(arguments,1))},FindId:function(u){return this.FindN(0,u,[].slice.call(arguments,1))},FindN:function(y,v,x){for(var w=0,u=this.items.length;w<u;w++){if(this.items[w].name[y]===v){if(x.length){if(!this.items[w].submenu){return null}return this.items[w].submenu.FindN(y,x[0],x.slice(1))}return this.items[w]}}return null},IndexOf:function(u){return this.IndexOfN(1,u)},IndexOfId:function(u){return this.IndexOfN(0,u)},IndexOfN:function(x,v){for(var w=0,u=this.items.length;w<u;w++){if(this.items[w].name[x]===v){return w}}return null},Right:function(u,v){g.Right(u,v)},Left:function(u,v){g.Left(u,v)},Up:function(v,w){var u=w.lastChild;u.menuItem.Activate(v,u)},Down:function(v,w){var u=w.firstChild;u.menuItem.Activate(v,u)},Space:function(u,v){this.Remove(u,v)}},{config:s,Remove:function(u){return g.Event(u,this,"Remove")},Mouseover:function(u){return g.Event(u,this,"Mouseover")},Mouseout:function(u){return g.Event(u,this,"Mouseout")},Mousedown:function(u){return g.Event(u,this,"Mousedown")},Mouseup:function(u){return g.Event(u,this,"Mouseup")},Keydown:function(u){return g.Event(u,this,"Keydown")},Touchstart:function(u){return g.Event(u,this,"Touchstart")},Touchend:function(u){return g.Event(u,this,"Touchend")},Close:function(u){return g.Event(u,this.menu||this.parentNode,(this.menu?"Touchend":"Remove"))},Event:function(w,y,u,x){if(g.skipMouseover&&u==="Mouseover"&&!x){return n(w)}if(g.skipMouseoverFromKey&&u==="Mouseover"){delete g.skipMouseoverFromKey;return n(w)}if(g.skipUp){if(u.match(/Mouseup|Touchend/)){delete g.skipUp;return n(w)}if(u==="Touchstart"||(u==="Mousedown"&&!g.skipMousedown)){delete g.skipUp}}if(!w){w=window.event}var v=y.menuItem;if(v&&v[u]){return v[u](w,y)}return null},BGSTYLE:{position:"absolute",left:0,top:0,"z-index":200,width:"100%",height:"100%",border:0,padding:0,margin:0},Background:function(v){var w=o.addElement(document.body,"div",{style:this.BGSTYLE,id:"MathJax_MenuFrame"},[["div",{style:this.BGSTYLE,menuItem:v,onmousedown:this.Remove}]]);var u=w.firstChild;if(g.msieBackgroundBug){u.style.backgroundColor="white";u.style.filter="alpha(opacity=0)"}if(g.msieFixedPositionBug){w.width=w.height=0;this.Resize();attachEvent("onresize",this.Resize)}else{u.style.position="fixed"}return w},Resize:function(){setTimeout(g.SetWH,0)},SetWH:function(){var u=document.getElementById("MathJax_MenuFrame");if(u){u=u.firstChild;u.style.width=u.style.height="1px";u.style.width=document.body.scrollWidth+"px";u.style.height=document.body.scrollHeight+"px"}},posted:false,active:null,GetNode:function(u){var v=document.getElementById(u.inputID+"-Frame");return v.isMathJax?v:v.firstChild},CurrentNode:function(){return g.GetNode(g.jax)},AllNodes:function(){var v=MathJax.Hub.getAllJax();var w=[];for(var x=0,u;u=v[x];x++){w.push(g.GetNode(u))}return w},ActiveNode:function(){return g.active},FocusNode:function(u){g.active=u;u.focus()},Focus:function(u){!g.posted?g.Activate(u):g.ActiveNode().tabIndex=-1;u.tabIndex=0;g.FocusNode(u)},Activate:function(u,v){g.UnsetTabIndex();g.posted=true},Unfocus:function(){g.ActiveNode().tabIndex=-1;g.SetTabIndex();g.FocusNode(g.CurrentNode());g.posted=false},MoveHorizontal:function(y,z,w){if(!y.shiftKey){return}var v=g.AllNodes();var u=v.length;if(u===0){return}var x=v[g.Mod(w(g.IndexOf(v,g.CurrentNode())),u)];if(x===g.CurrentNode()){return}g.menu.Remove(y,z);g.jax=MathJax.Hub.getJaxFor(x);g.FocusNode(x);g.menu.Post(null)},Right:function(u,v){g.MoveHorizontal(u,v,function(w){return w+1})},Left:function(u,v){g.MoveHorizontal(u,v,function(w){return w-1})},UnsetTabIndex:function(){var v=g.AllNodes();for(var w=0,u;u=v[w];w++){if(u.tabIndex>0){u.oldTabIndex=u.tabIndex}u.tabIndex=-1}},SetTabIndex:function(){var v=g.AllNodes();for(var w=0,u;u=v[w];w++){if(u.oldTabIndex!==undefined){u.tabIndex=u.oldTabIndex;delete u.oldTabIndex}else{u.tabIndex=f.getTabOrder(u)}}},Mod:function(u,v){return((u%v)+v)%v},IndexOf:(Array.prototype.indexOf?function(u,v,w){return u.indexOf(v,w)}:function(u,x,y){for(var w=(y||0),v=u.length;w<v;w++){if(x===u[w]){return w}}return -1}),saveCookie:function(){o.Cookie.Set("menu",this.cookie)},getCookie:function(){this.cookie=o.Cookie.Get("menu")}});MathJax.Menu.NAV=h;var c=g.ITEM=h.Subclass({name:"",node:null,menu:null,Attributes:function(u){return f.Insert({onmouseup:g.Mouseup,ondragstart:n,onselectstart:n,onselectend:n,ontouchstart:g.Touchstart,ontouchend:g.Touchend,className:"MathJax_MenuItem",role:this.role,menuItem:this},u)},Create:function(w){if(!this.hidden){var v=this.Attributes();var u=this.Label(v,w);o.addElement(w,"div",v,u)}},Name:function(){return t(this.name[0],this.name[1])},Mouseover:function(u,v){if(v.parentNode===g.ActiveNode().parentNode){this.Deactivate(g.ActiveNode())}this.Activate(u,v)},Mouseout:function(u,v){this.Deactivate(v)},Mouseup:function(u,v){return this.Remove(u,v)},DeactivateSubmenus:function(z){var y=document.getElementById("MathJax_MenuFrame").childNodes,v=c.GetMenuNode(z).childNodes;for(var w=0,u=v.length;w<u;w++){var x=v[w].menuItem;if(x&&x.submenu&&x.submenu.posted&&x!==z.menuItem){x.Deactivate(v[w])}}this.RemoveSubmenus(z,y)},RemoveSubmenus:function(w,v){v=v||document.getElementById("MathJax_MenuFrame").childNodes;var u=v.length-1;while(u>=0&&c.GetMenuNode(w).menuItem!==v[u].menuItem){v[u].menuItem.posted=false;v[u].parentNode.removeChild(v[u]);u--}},Touchstart:function(u,v){return this.TouchEvent(u,v,"Mousedown")},Touchend:function(u,v){return this.TouchEvent(u,v,"Mouseup")},TouchEvent:function(v,w,u){if(this!==c.lastItem){if(c.lastMenu){g.Event(v,c.lastMenu,"Mouseout")}g.Event(v,w,"Mouseover",true);c.lastItem=this;c.lastMenu=w}if(this.nativeTouch){return null}g.Event(v,w,u);return false},Remove:function(u,v){v=v.parentNode.menuItem;return v.Remove(u,v)},With:function(u){if(u){f.Insert(this,u)}return this},isRTL:function(){return g.isRTL},rtlClass:function(){return(this.isRTL()?" RTL":"")}},{GetMenuNode:function(u){return u.parentNode}});g.ENTRY=g.ITEM.Subclass({role:"menuitem",Attributes:function(u){u=f.Insert({onmouseover:g.Mouseover,onmouseout:g.Mouseout,onmousedown:g.Mousedown,onkeydown:g.Keydown,"aria-disabled":!!this.disabled},u);u=this.SUPER(arguments).Attributes.call(this,u);if(this.disabled){u.className+=" MathJax_MenuDisabled"}return u},MoveVertical:function(u,E,w){var x=c.GetMenuNode(E);var D=[];for(var z=0,C=x.menuItem.items,y;y=C[z];z++){if(!y.hidden){D.push(y)}}var B=g.IndexOf(D,this);if(B===-1){return}var A=D.length;var v=x.childNodes;do{B=g.Mod(w(B),A)}while(D[B].hidden||!v[B].role||v[B].role==="separator");this.Deactivate(E);D[B].Activate(u,v[B])},Up:function(v,u){this.MoveVertical(v,u,function(w){return w-1})},Down:function(v,u){this.MoveVertical(v,u,function(w){return w+1})},Right:function(v,u){this.MoveHorizontal(v,u,g.Right,!this.isRTL())},Left:function(v,u){this.MoveHorizontal(v,u,g.Left,this.isRTL())},MoveHorizontal:function(A,z,u,B){var x=c.GetMenuNode(z);if(x.menuItem===g.menu&&A.shiftKey){u(A,z)}if(B){return}if(x.menuItem!==g.menu){this.Deactivate(z)}var v=x.previousSibling.childNodes;var y=v.length;while(y--){var w=v[y];if(w.menuItem.submenu&&w.menuItem.submenu===x.menuItem){g.Focus(w);break}}this.RemoveSubmenus(z)},Space:function(u,v){this.Mouseup(u,v)},Activate:function(u,v){this.Deactivate(v);if(!this.disabled){v.className+=" MathJax_MenuActive"}this.DeactivateSubmenus(v);g.Focus(v)},Deactivate:function(u){u.className=u.className.replace(/ MathJax_MenuActive/,"")}});g.ITEM.COMMAND=g.ENTRY.Subclass({action:function(){},Init:function(u,w,v){if(!i(u)){u=[u,u]}this.name=u;this.action=w;this.With(v)},Label:function(u,v){return[this.Name()]},Mouseup:function(u,v){if(!this.disabled){this.Remove(u,v);d.Post(["command",this]);this.action.call(this,u)}return n(u)}});g.ITEM.SUBMENU=g.ENTRY.Subclass({submenu:null,marker:"\u25BA",markerRTL:"\u25C4",Attributes:function(u){u=f.Insert({"aria-haspopup":"true"},u);u=this.SUPER(arguments).Attributes.call(this,u);return u},Init:function(u,w){if(!i(u)){u=[u,u]}this.name=u;var v=1;if(!(w instanceof g.ITEM)){this.With(w),v++}this.submenu=g.apply(g,[].slice.call(arguments,v))},Label:function(u,v){this.submenu.posted=false;return[this.Name()+" ",["span",{className:"MathJax_MenuArrow"+this.rtlClass()},[this.isRTL()?this.markerRTL:this.marker]]]},Timer:function(u,v){this.ClearTimer();u={type:u.type,clientX:u.clientX,clientY:u.clientY};this.timer=setTimeout(e(["Mouseup",this,u,v]),s.delay)},ClearTimer:function(){if(this.timer){clearTimeout(this.timer)}},Touchend:function(v,x){var w=this.submenu.posted;var u=this.SUPER(arguments).Touchend.apply(this,arguments);if(w){this.Deactivate(x);delete c.lastItem;delete c.lastMenu}return u},Mouseout:function(u,v){if(!this.submenu.posted){this.Deactivate(v)}this.ClearTimer()},Mouseover:function(u,v){this.Activate(u,v)},Mouseup:function(u,v){if(!this.disabled){if(!this.submenu.posted){this.ClearTimer();this.submenu.Post(u,v,this.ltr);g.Focus(v)}else{this.DeactivateSubmenus(v)}}return n(u)},Activate:function(u,v){if(!this.disabled){this.Deactivate(v);v.className+=" MathJax_MenuActive"}if(!this.submenu.posted){this.DeactivateSubmenus(v);if(!g.isMobile){this.Timer(u,v)}}g.Focus(v)},MoveVertical:function(w,v,u){this.ClearTimer();this.SUPER(arguments).MoveVertical.apply(this,arguments)},MoveHorizontal:function(w,y,v,x){if(!x){this.SUPER(arguments).MoveHorizontal.apply(this,arguments);return}if(this.disabled){return}if(!this.submenu.posted){this.Activate(w,y);return}var u=c.GetMenuNode(y).nextSibling.childNodes;if(u.length>0){this.submenu.items[0].Activate(w,u[0])}}});g.ITEM.RADIO=g.ENTRY.Subclass({variable:null,marker:(a?"\u25CF":"\u2713"),role:"menuitemradio",Attributes:function(v){var u=s.settings[this.variable]===this.value?"true":"false";v=f.Insert({"aria-checked":u},v);v=this.SUPER(arguments).Attributes.call(this,v);return v},Init:function(v,u,w){if(!i(v)){v=[v,v]}this.name=v;this.variable=u;this.With(w);if(this.value==null){this.value=this.name[0]}},Label:function(v,w){var u={className:"MathJax_MenuRadioCheck"+this.rtlClass()};if(s.settings[this.variable]!==this.value){u={style:{display:"none"}}}return[["span",u,[this.marker]]," "+this.Name()]},Mouseup:function(x,y){if(!this.disabled){var z=y.parentNode.childNodes;for(var v=0,u=z.length;v<u;v++){var w=z[v].menuItem;if(w&&w.variable===this.variable){z[v].firstChild.style.display="none"}}y.firstChild.display="";s.settings[this.variable]=this.value;g.cookie[this.variable]=s.settings[this.variable];g.saveCookie();d.Post(["radio button",this])}this.Remove(x,y);if(this.action&&!this.disabled){this.action.call(g,this)}return n(x)}});g.ITEM.CHECKBOX=g.ENTRY.Subclass({variable:null,marker:"\u2713",role:"menuitemcheckbox",Attributes:function(v){var u=s.settings[this.variable]?"true":"false";v=f.Insert({"aria-checked":u},v);v=this.SUPER(arguments).Attributes.call(this,v);return v},Init:function(v,u,w){if(!i(v)){v=[v,v]}this.name=v;this.variable=u;this.With(w)},Label:function(v,w){var u={className:"MathJax_MenuCheck"+this.rtlClass()};if(!s.settings[this.variable]){u={style:{display:"none"}}}return[["span",u,[this.marker]]," "+this.Name()]},Mouseup:function(u,v){if(!this.disabled){v.firstChild.display=(s.settings[this.variable]?"none":"");s.settings[this.variable]=!s.settings[this.variable];g.cookie[this.variable]=s.settings[this.variable];g.saveCookie();d.Post(["checkbox",this])}this.Remove(u,v);if(this.action&&!this.disabled){this.action.call(g,this)}return n(u)}});g.ITEM.LABEL=g.ENTRY.Subclass({role:"menuitem",Init:function(u,v){if(!i(u)){u=[u,u]}this.name=u;this.With(v)},Label:function(u,v){u.className+=" MathJax_MenuLabel";return[this.Name()]},Activate:function(u,v){this.Deactivate(v);g.Focus(v)},Mouseup:function(u,v){}});g.ITEM.RULE=g.ITEM.Subclass({role:"separator",Attributes:function(u){u=f.Insert({"aria-orientation":"vertical"},u);u=this.SUPER(arguments).Attributes.call(this,u);return u},Label:function(u,v){u.className+=" MathJax_MenuRule";return null}});g.About=function(y){var v=g.About.GetFont();var A=g.About.GetFormat();var u=["MathJax.js v"+MathJax.fileversion,["br"]];u.push(["div",{style:{"border-top":"groove 2px",margin:".25em 0"}}]);g.About.GetJax(u,MathJax.InputJax,["InputJax","%1 Input Jax v%2"]);g.About.GetJax(u,MathJax.OutputJax,["OutputJax","%1 Output Jax v%2"]);g.About.GetJax(u,MathJax.ElementJax,["ElementJax","%1 Element Jax v%2"]);u.push(["div",{style:{"border-top":"groove 2px",margin:".25em 0"}}]);g.About.GetJax(u,MathJax.Extension,["Extension","%1 Extension v%2"],true);u.push(["div",{style:{"border-top":"groove 2px",margin:".25em 0"}}],["center",{},[f.Browser+" v"+f.Browser.version+(A?" \u2014 "+t(A.replace(/ /g,""),A):"")]]);g.About.div=g.Background(g.About);var x=o.addElement(g.About.div,"div",{id:"MathJax_About",tabIndex:0,onkeydown:g.About.Keydown},[["b",{style:{fontSize:"120%"}},["MathJax"]]," v"+MathJax.version,["br"],t(v.replace(/ /g,""),"using "+v),["br"],["br"],["span",{style:{display:"inline-block","text-align":"left","font-size":"80%","max-height":"20em",overflow:"auto","background-color":"#E4E4E4",padding:".4em .6em",border:"1px inset"},tabIndex:0},u],["br"],["br"],["a",{href:"http://www.mathjax.org/"},["www.mathjax.org"]],["span",{className:"MathJax_MenuClose",id:"MathJax_AboutClose",onclick:g.About.Remove,onkeydown:g.About.Keydown,tabIndex:0,role:"button","aria-label":t("CloseAboutDialog","Close about MathJax dialog")},[["span",{},"\u00D7"]]]]);if(y.type==="mouseup"){x.className+=" MathJax_MousePost"}x.focus();MathJax.Localization.setCSS(x);var z=(document.documentElement||{});var w=window.innerHeight||z.clientHeight||z.scrollHeight||0;if(g.prototype.msieAboutBug){x.style.width="20em";x.style.position="absolute";x.style.left=Math.floor((document.documentElement.scrollWidth-x.offsetWidth)/2)+"px";x.style.top=(Math.floor((w-x.offsetHeight)/3)+document.body.scrollTop)+"px"}else{x.style.marginLeft=Math.floor(-x.offsetWidth/2)+"px";x.style.top=Math.floor((w-x.offsetHeight)/3)+"px"}};g.About.Remove=function(u){if(g.About.div){document.body.removeChild(g.About.div);delete g.About.div}};g.About.Keydown=function(u){if(u.keyCode===b.ESCAPE||(this.id==="MathJax_AboutClose"&&(u.keyCode===b.SPACE||u.keyCode===b.RETURN))){g.About.Remove(u);g.CurrentNode().focus();n(u)}},g.About.GetJax=function(v,A,y,x){var z=[];for(var B in A){if(A.hasOwnProperty(B)&&A[B]){if((x&&A[B].version)||(A[B].isa&&A[B].isa(A))){z.push(t(y[0],y[1],(A[B].id||B),A[B].version))}}}z.sort();for(var w=0,u=z.length;w<u;w++){v.push(z[w],["br"])}return v};g.About.GetFont=function(){var u=MathJax.Hub.outputJax["jax/mml"][0]||{};var v={SVG:"web SVG",CommonHTML:"web TeX","HTML-CSS":(u.imgFonts?"image":(u.webFonts?"web":"local")+" "+u.fontInUse)}[u.id]||"generic";return v+" fonts"};g.About.GetFormat=function(){var u=MathJax.Hub.outputJax["jax/mml"][0]||{};if(u.id!=="HTML-CSS"||!u.webFonts||u.imgFonts){return}return u.allowWebFonts.replace(/otf/,"woff or otf")+" fonts"};g.Help=function(u){q.Require("[MathJax]/extensions/HelpDialog.js",function(){MathJax.Extension.Help.Dialog({type:u.type})})};g.ShowSource=function(y){if(!y){y=window.event}var x={screenX:y.screenX,screenY:y.screenY};if(!g.jax){return}if(this.format==="MathML"){var v=MathJax.ElementJax.mml;if(v&&typeof(v.mbase.prototype.toMathML)!=="undefined"){try{g.ShowSource.Text(g.jax.root.toMathML("",g.jax),y)}catch(w){if(!w.restart){throw w}e.After([this,g.ShowSource,x],w.restart)}}else{if(!q.loadingToMathML){q.loadingToMathML=true;g.ShowSource.Window(y);e.Queue(q.Require("[MathJax]/extensions/toMathML.js"),function(){delete q.loadingToMathML;if(!v.mbase.prototype.toMathML){v.mbase.prototype.toMathML=function(){}}},[this,g.ShowSource,x]);return}}}else{if(this.format==="Error"){g.ShowSource.Text(g.jax.errorText,y)}else{if(s.semanticsAnnotations[this.format]){var u=g.jax.root.getAnnotation(this.format);if(u.data[0]){g.ShowSource.Text(u.data[0].toString())}}else{if(g.jax.originalText==null){alert(t("NoOriginalForm","No original form available"));return}g.ShowSource.Text(g.jax.originalText,y)}}}};g.ShowSource.Window=function(v){if(!g.ShowSource.w){var w=[],u=s.windowSettings;for(var x in u){if(u.hasOwnProperty(x)){w.push(x+"="+u[x])}}g.ShowSource.w=window.open("","_blank",w.join(","))}return g.ShowSource.w};g.ShowSource.Text=function(z,x){var u=g.ShowSource.Window(x);delete g.ShowSource.w;z=z.replace(/^\s*/,"").replace(/\s*$/,"");z=z.replace(/&/g,"&amp;").replace(/</g,"&lt;").replace(/>/g,"&gt;");var y=t("EqSource","MathJax Equation Source");if(g.isMobile){u.document.open();u.document.write("<html><head><meta name='viewport' content='width=device-width, initial-scale=1.0' /><title>"+y+"</title></head><body style='font-size:85%'>");u.document.write("<pre>"+z+"</pre>");u.document.write("<hr><input type='button' value='"+t("Close","Close")+"' onclick='window.close()' />");u.document.write("</body></html>");u.document.close()}else{u.document.open();u.document.write("<html><head><title>"+y+"</title></head><body style='font-size:85%'>");u.document.write("<table><tr><td><pre>"+z+"</pre></td></tr></table>");u.document.write("</body></html>");u.document.close();var v=u.document.body.firstChild;setTimeout(function(){var B=(u.outerHeight-u.innerHeight)||30,A=(u.outerWidth-u.innerWidth)||30,w,E;A=Math.max(140,Math.min(Math.floor(0.5*screen.width),v.offsetWidth+A+25));B=Math.max(40,Math.min(Math.floor(0.5*screen.height),v.offsetHeight+B+25));if(g.prototype.msieHeightBug){B+=35}u.resizeTo(A,B);var D;try{D=x.screenX}catch(C){}if(x&&D!=null){w=Math.max(0,Math.min(x.screenX-Math.floor(A/2),screen.width-A-20));E=Math.max(0,Math.min(x.screenY-Math.floor(B/2),screen.height-B-20));u.moveTo(w,E)}},50)}};g.Scale=function(){var z=["CommonHTML","HTML-CSS","SVG","NativeMML","PreviewHTML"],u=z.length,y=100,w,v;for(w=0;w<u;w++){v=r[z[w]];if(v){y=v.config.scale;break}}var x=prompt(t("ScaleMath","Scale all mathematics (compared to surrounding text) by"),y+"%");if(x){if(x.match(/^\s*\d+(\.\d*)?\s*%?\s*$/)){x=parseFloat(x);if(x){if(x!==y){for(w=0;w<u;w++){v=r[z[w]];if(v){v.config.scale=x}}g.cookie.scale=f.config.scale=x;g.saveCookie();f.Queue(["Rerender",f])}}else{alert(t("NonZeroScale","The scale should not be zero"))}}else{alert(t("PercentScale","The scale should be a percentage (e.g., 120%%)"))}}};g.Zoom=function(){if(!MathJax.Extension.MathZoom){q.Require("[MathJax]/extensions/MathZoom.js")}};g.Renderer=function(){var v=f.outputJax["jax/mml"];if(v[0]!==s.settings.renderer){var y=f.Browser,x,u=g.Renderer.Messages,w;switch(s.settings.renderer){case"NativeMML":if(!s.settings.warnedMML){if(y.isChrome&&y.version.substr(0,3)!=="24."){x=u.MML.WebKit}else{if(y.isSafari&&!y.versionAtLeast("5.0")){x=u.MML.WebKit}else{if(y.isMSIE){if(!y.hasMathPlayer){x=u.MML.MSIE}}else{if(y.isEdge){x=u.MML.WebKit}else{x=u.MML[y]}}}}w="warnedMML"}break;case"SVG":if(!s.settings.warnedSVG){if(y.isMSIE&&!m){x=u.SVG.MSIE}}break}if(x){x=t(x[0],x[1]);x+="\n\n";x+=t("SwitchAnyway","Switch the renderer anyway?\n\n(Press OK to switch, CANCEL to continue with the current renderer)");g.cookie.renderer=v[0].id;g.saveCookie();if(!confirm(x)){g.cookie.renderer=s.settings.renderer=o.Cookie.Get("menu").renderer;g.saveCookie();return}if(w){g.cookie.warned=s.settings.warned=true}g.cookie.renderer=s.settings.renderer;g.saveCookie()}f.Queue(["setRenderer",f,s.settings.renderer,"jax/mml"],["Rerender",f])}};g.Renderer.Messages={MML:{WebKit:["WebkitNativeMMLWarning","Your browser doesn't seem to support MathML natively, so switching to MathML output may cause the mathematics on the page to become unreadable."],MSIE:["MSIENativeMMLWarning","Internet Explorer requires the MathPlayer plugin in order to process MathML output."],Opera:["OperaNativeMMLWarning","Opera's support for MathML is limited, so switching to MathML output may cause some expressions to render poorly."],Safari:["SafariNativeMMLWarning","Your browser's native MathML does not implement all the features used by MathJax, so some expressions may not render properly."],Firefox:["FirefoxNativeMMLWarning","Your browser's native MathML does not implement all the features used by MathJax, so some expressions may not render properly."]},SVG:{MSIE:["MSIESVGWarning","SVG is not implemented in Internet Explorer prior to IE9 or when it is emulating IE8 or below. Switching to SVG output will cause the mathematics to not display properly."]}};g.AssistiveMML=function(w,u){var v=MathJax.Extension.AssistiveMML;if(!v){if(!u){q.Require("[MathJax]/extensions/AssistiveMML.js",["AssistiveMML",g,w,true])}return}MathJax.Hub.Queue([(s.settings.assistiveMML?"Add":"Remove")+"AssistiveMathML",v])};g.Font=function(){var u=r["HTML-CSS"];if(!u){return}document.location.reload()};g.Locale=function(){MathJax.Localization.setLocale(s.settings.locale);MathJax.Hub.Queue(["Reprocess",MathJax.Hub])};g.LoadLocale=function(){var u=prompt(t("LoadURL","Load translation data from this URL:"));if(u){if(!u.match(/\.js$/)){alert(t("BadURL","The URL should be for a javascript file that defines MathJax translation data. Javascript file names should end with '.js'"))}q.Require(u,function(v){if(v!=q.STATUS.OK){alert(t("BadData","Failed to load translation data from %1",u))}})}};g.MPEvents=function(w){var v=s.settings.discoverable,u=g.MPEvents.Messages;if(!m){if(s.settings.mpMouse&&!confirm(t.apply(t,u.IE8warning))){delete g.cookie.mpContext;delete s.settings.mpContext;delete g.cookie.mpMouse;delete s.settings.mpMouse;g.saveCookie();return}s.settings.mpContext=s.settings.mpMouse;g.cookie.mpContext=g.cookie.mpMouse=s.settings.mpMouse;g.saveCookie();MathJax.Hub.Queue(["Rerender",MathJax.Hub])}else{if(!v&&w.name[1]==="Menu Events"&&s.settings.mpContext){alert(t.apply(t,u.IE9warning))}}};g.MPEvents.Messages={IE8warning:["IE8warning","This will disable the MathJax menu and zoom features, but you can Alt-Click on an expression to obtain the MathJax menu instead.\n\nReally change the MathPlayer settings?"],IE9warning:["IE9warning","The MathJax contextual menu will be disabled, but you can Alt-Click on an expression to obtain the MathJax menu instead."]};f.Browser.Select({MSIE:function(u){var v=(document.compatMode==="BackCompat");var w=u.versionAtLeast("8.0")&&document.documentMode>7;g.Augment({margin:20,msieBackgroundBug:((document.documentMode||0)<9),msieFixedPositionBug:(v||!w),msieAboutBug:v,msieHeightBug:((document.documentMode||0)<9)});if(m){delete s.styles["#MathJax_About"].filter;delete s.styles[".MathJax_Menu"].filter}},Firefox:function(u){g.skipMouseover=u.isMobile&&u.versionAtLeast("6.0");g.skipMousedown=u.isMobile}});g.isMobile=f.Browser.isMobile;g.noContextMenu=f.Browser.noContextMenu;g.CreateLocaleMenu=function(){if(!g.menu){return}var z=g.menu.Find("Language").submenu,w=z.items;var v=[],B=MathJax.Localization.strings;for(var A in B){if(B.hasOwnProperty(A)){v.push(A)}}v=v.sort();z.items=[];for(var x=0,u=v.length;x<u;x++){var y=B[v[x]].menuTitle;if(y){y+=" ("+v[x]+")"}else{y=v[x]}z.items.push(c.RADIO([v[x],y],"locale",{action:g.Locale}))}z.items.push(w[w.length-2],w[w.length-1])};g.CreateAnnotationMenu=function(){if(!g.menu){return}var w=g.menu.Find("Show Math As","Annotation").submenu;var v=s.semanticsAnnotations;for(var u in v){if(v.hasOwnProperty(u)){w.items.push(c.COMMAND([u,u],g.ShowSource,{hidden:true,nativeTouch:true,format:u}))}}};f.Register.StartupHook("End Config",function(){s.settings=f.config.menuSettings;if(typeof(s.settings.showRenderer)!=="undefined"){s.showRenderer=s.settings.showRenderer}if(typeof(s.settings.showFontMenu)!=="undefined"){s.showFontMenu=s.settings.showFontMenu}if(typeof(s.settings.showContext)!=="undefined"){s.showContext=s.settings.showContext}g.getCookie();g.menu=g(c.SUBMENU(["Show","Show Math As"],c.COMMAND(["MathMLcode","MathML Code"],g.ShowSource,{nativeTouch:true,format:"MathML"}),c.COMMAND(["Original","Original Form"],g.ShowSource,{nativeTouch:true}),c.SUBMENU(["Annotation","Annotation"],{disabled:true}),c.RULE(),c.CHECKBOX(["texHints","Show TeX hints in MathML"],"texHints"),c.CHECKBOX(["semantics","Add original form as annotation"],"semantics")),c.RULE(),c.SUBMENU(["Settings","Math Settings"],c.SUBMENU(["ZoomTrigger","Zoom Trigger"],c.RADIO(["Hover","Hover"],"zoom",{action:g.Zoom}),c.RADIO(["Click","Click"],"zoom",{action:g.Zoom}),c.RADIO(["DoubleClick","Double-Click"],"zoom",{action:g.Zoom}),c.RADIO(["NoZoom","No Zoom"],"zoom",{value:"None"}),c.RULE(),c.LABEL(["TriggerRequires","Trigger Requires:"]),c.CHECKBOX((f.Browser.isMac?["Option","Option"]:["Alt","Alt"]),"ALT"),c.CHECKBOX(["Command","Command"],"CMD",{hidden:!f.Browser.isMac}),c.CHECKBOX(["Control","Control"],"CTRL",{hidden:f.Browser.isMac}),c.CHECKBOX(["Shift","Shift"],"Shift")),c.SUBMENU(["ZoomFactor","Zoom Factor"],c.RADIO("125%","zscale"),c.RADIO("133%","zscale"),c.RADIO("150%","zscale"),c.RADIO("175%","zscale"),c.RADIO("200%","zscale"),c.RADIO("250%","zscale"),c.RADIO("300%","zscale"),c.RADIO("400%","zscale")),c.RULE(),c.SUBMENU(["Renderer","Math Renderer"],{hidden:!s.showRenderer},c.RADIO(["HTML-CSS","HTML-CSS"],"renderer",{action:g.Renderer}),c.RADIO(["CommonHTML","Common HTML"],"renderer",{action:g.Renderer,value:"CommonHTML"}),c.RADIO(["PreviewHTML","Preview HTML"],"renderer",{action:g.Renderer,value:"PreviewHTML"}),c.RADIO(["MathML","MathML"],"renderer",{action:g.Renderer,value:"NativeMML"}),c.RADIO(["SVG","SVG"],"renderer",{action:g.Renderer}),c.RADIO(["PlainSource","Plain Source"],"renderer",{action:g.Renderer,value:"PlainSource"}),c.RULE(),c.CHECKBOX(["FastPreview","Fast Preview"],"FastPreview")),c.SUBMENU("MathPlayer",{hidden:!f.Browser.isMSIE||!s.showMathPlayer,disabled:!f.Browser.hasMathPlayer},c.LABEL(["MPHandles","Let MathPlayer Handle:"]),c.CHECKBOX(["MenuEvents","Menu Events"],"mpContext",{action:g.MPEvents,hidden:!m}),c.CHECKBOX(["MouseEvents","Mouse Events"],"mpMouse",{action:g.MPEvents,hidden:!m}),c.CHECKBOX(["MenuAndMouse","Mouse and Menu Events"],"mpMouse",{action:g.MPEvents,hidden:m})),c.SUBMENU(["FontPrefs","Font Preference"],{hidden:!s.showFontMenu},c.LABEL(["ForHTMLCSS","For HTML-CSS:"]),c.RADIO(["Auto","Auto"],"font",{action:g.Font}),c.RULE(),c.RADIO(["TeXLocal","TeX (local)"],"font",{action:g.Font}),c.RADIO(["TeXWeb","TeX (web)"],"font",{action:g.Font}),c.RADIO(["TeXImage","TeX (image)"],"font",{action:g.Font}),c.RULE(),c.RADIO(["STIXLocal","STIX (local)"],"font",{action:g.Font}),c.RADIO(["STIXWeb","STIX (web)"],"font",{action:g.Font}),c.RULE(),c.RADIO(["AsanaMathWeb","Asana Math (web)"],"font",{action:g.Font}),c.RADIO(["GyrePagellaWeb","Gyre Pagella (web)"],"font",{action:g.Font}),c.RADIO(["GyreTermesWeb","Gyre Termes (web)"],"font",{action:g.Font}),c.RADIO(["LatinModernWeb","Latin Modern (web)"],"font",{action:g.Font}),c.RADIO(["NeoEulerWeb","Neo Euler (web)"],"font",{action:g.Font})),c.SUBMENU(["ContextMenu","Contextual Menu"],{hidden:!s.showContext},c.RADIO(["MathJax","MathJax"],"context"),c.RADIO(["Browser","Browser"],"context")),c.COMMAND(["Scale","Scale All Math ..."],g.Scale),c.RULE().With({hidden:!s.showDiscoverable,name:["","discover_rule"]}),c.CHECKBOX(["Discoverable","Highlight on Hover"],"discoverable",{hidden:!s.showDiscoverable})),c.SUBMENU(["Accessibility","Accessibility"],c.CHECKBOX(["AssistiveMML","Assistive MathML"],"assistiveMML",{action:g.AssistiveMML}),c.CHECKBOX(["InTabOrder","Include in Tab Order"],"inTabOrder")),c.SUBMENU(["Locale","Language"],{hidden:!s.showLocale,ltr:true},c.RADIO("en","locale",{action:g.Locale}),c.RULE().With({hidden:!s.showLocaleURL,name:["","localURL_rule"]}),c.COMMAND(["LoadLocale","Load from URL ..."],g.LoadLocale,{hidden:!s.showLocaleURL})),c.RULE(),c.COMMAND(["About","About MathJax"],g.About),c.COMMAND(["Help","MathJax Help"],g.Help));if(g.isMobile){(function(){var v=s.settings;var u=g.menu.Find("Math Settings","Zoom Trigger").submenu;u.items[0].disabled=u.items[1].disabled=true;if(v.zoom==="Hover"||v.zoom=="Click"){v.zoom="None"}u.items=u.items.slice(0,4);if(navigator.appVersion.match(/[ (]Android[) ]/)){g.ITEM.SUBMENU.Augment({marker:"\u00BB"})}})()}g.CreateLocaleMenu();g.CreateAnnotationMenu()});g.showRenderer=function(u){g.cookie.showRenderer=s.showRenderer=u;g.saveCookie();g.menu.Find("Math Settings","Math Renderer").hidden=!u};g.showMathPlayer=function(u){g.cookie.showMathPlayer=s.showMathPlayer=u;g.saveCookie();g.menu.Find("Math Settings","MathPlayer").hidden=!u};g.showFontMenu=function(u){g.cookie.showFontMenu=s.showFontMenu=u;g.saveCookie();g.menu.Find("Math Settings","Font Preference").hidden=!u};g.showContext=function(u){g.cookie.showContext=s.showContext=u;g.saveCookie();g.menu.Find("Math Settings","Contextual Menu").hidden=!u};g.showDiscoverable=function(u){g.cookie.showDiscoverable=s.showDiscoverable=u;g.saveCookie();g.menu.Find("Math Settings","Highlight on Hover").hidden=!u;g.menu.Find("Math Settings","discover_rule").hidden=!u};g.showLocale=function(u){g.cookie.showLocale=s.showLocale=u;g.saveCookie();g.menu.Find("Language").hidden=!u};MathJax.Hub.Register.StartupHook("HTML-CSS Jax Ready",function(){if(!MathJax.OutputJax["HTML-CSS"].config.imageFont){g.menu.Find("Math Settings","Font Preference","TeX (image)").disabled=true}});e.Queue(f.Register.StartupHook("End Config",{}),["Styles",q,s.styles],["Post",f.Startup.signal,"MathMenu Ready"],["loadComplete",q,"[MathJax]/extensions/MathMenu.js"])})(MathJax.Hub,MathJax.HTML,MathJax.Ajax,MathJax.CallBack,MathJax.OutputJax);
PypiClean
/hestia_earth_models-0.49.0-py3-none-any.whl/hestia_earth/models/geospatialDatabase/clayContent.py
from hestia_earth.schema import MeasurementStatsDefinition, MeasurementMethodClassification from hestia_earth.models.log import logRequirements, logShouldRun from hestia_earth.models.utils.measurement import _new_measurement from .utils import download, find_existing_measurement, has_geospatial_data, should_download from . import MODEL REQUIREMENTS = { "Site": { "or": [ {"latitude": "", "longitude": ""}, {"boundary": {}}, {"region": {"@type": "Term", "termType": "region"}} ] } } RETURNS = { "Measurement": [{ "value": "", "depthUpper": "0", "depthLower": "30", "statsDefinition": "spatial", "methodClassification": "geospatial dataset" }] } TERM_ID = 'clayContent' EE_PARAMS = { 'collection': 'T_CLAY', 'ee_type': 'raster', 'reducer': 'mean', 'fields': 'mean' } BIBLIO_TITLE = 'Harmonized World Soil Database Version 1.2. Food and Agriculture Organization of the United Nations (FAO).' # noqa: E501 def _measurement(value: int): measurement = _new_measurement(TERM_ID, None, BIBLIO_TITLE) measurement['value'] = [value] measurement['depthUpper'] = 0 measurement['depthLower'] = 30 measurement['statsDefinition'] = MeasurementStatsDefinition.SPATIAL.value measurement['methodClassification'] = MeasurementMethodClassification.GEOSPATIAL_DATASET.value return measurement def _download(site: dict): value = download(TERM_ID, site, EE_PARAMS, EE_PARAMS['reducer']) return None if value is None else round(value, 2) def _run(site: dict): value = find_existing_measurement(TERM_ID, site) or _download(site) return [_measurement(value)] if value is not None else [] def _should_run(site: dict): contains_geospatial_data = has_geospatial_data(site) below_max_area_size = should_download(TERM_ID, site) logRequirements(site, model=MODEL, term=TERM_ID, contains_geospatial_data=contains_geospatial_data, below_max_area_size=below_max_area_size) should_run = all([contains_geospatial_data, below_max_area_size]) logShouldRun(site, MODEL, TERM_ID, should_run) return should_run def run(site: dict): return _run(site) if _should_run(site) else []
PypiClean
/geoslurp-2.2.1-py3-none-any.whl/geoslurp_userplugins/orsiFronts.py
# geoslurp is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # You should have received a copy of the GNU Lesser General Public # License along with geoslurp; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # Author Roelof Rietbroek ([email protected]), 2019 from geoslurp.dataset import DataSet from sqlalchemy import MetaData from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import Column,Integer,String from geoalchemy2.types import Geography from osgeo import ogr from geoalchemy2.elements import WKBElement from geoslurp.datapull.http import Uri as http from datetime import datetime from zipfile import ZipFile from geoslurp.config.slurplogger import slurplogger from geoslurp.datapull import UriFile from geoslurp.datapull import findFiles from geoslurp.config.catalogue import geoslurpCatalogue import re import os scheme="oceanobs" FrontsTBase=declarative_base(metadata=MetaData(schema=scheme)) geoLineStrType = Geography(geometry_type="MULTILINESTRING", srid='4326', spatial_index=True,dimension=2) class OrsifrontsTable(FrontsTBase): """Defines the Orsifonts PostgreSQL table""" __tablename__='orsifronts' id=Column(Integer,primary_key=True) name=Column(String) acronym=Column(String) geom=Column(geoLineStrType) def orsiMetaExtractor(uri): """extract table data from the files""" lookup={"stf":"Subtropical front", "saf":"Subantarctic front", "pf":"Polar front","saccf":"Southern Antarctic circumpolar current front", "sbdy":"Southern Boundary of the Antarctic circumpolar current"} abbr=os.path.basename(uri.url)[:-4] geofront=ogr.Geometry(ogr.wkbMultiLineString) frontsegment=ogr.Geometry(ogr.wkbLineString) recomm=re.compile("^%") with open(uri.url,'r') as fid: for ln in fid: if recomm.search(ln): #everytime we encounter a % we need to start a new segment and possibly update the multilinestring if frontsegment.GetPointCount() > 1: geofront.AddGeometry(frontsegment) frontsegment=ogr.Geometry(ogr.wkbLineString) continue lonlat=ln.split() frontsegment.AddPoint(float(lonlat[0]),float(lonlat[1])) geofront.FlattenTo2D() meta={"acronym":abbr,"name":lookup[abbr],"geom":WKBElement(geofront.ExportToIsoWkb(),srid=4326)} return meta class Orsifronts(DataSet): """Orsifronts table""" version=(0,0,0) table=OrsifrontsTable scheme=scheme def __init__(self,dbcon): super().__init__(dbcon) self.table.metadata.create_all(self.db.dbeng, checkfirst=True) self._dbinvent.data={"citation":"Orsi, A. H., T. Whitworth III and W. D. Nowlin, Jr. (1995). On the meridional extent and fronts of the Antarctic Circumpolar Current, Deep-Sea Research I, 42, 641-67"} def pull(self): """Download acsii files in zip and unpack ascii data""" httpserv=http('https://github.com/AustralianAntarcticDivision/orsifronts/raw/master/data-raw/fronts.zip',lastmod=datetime(2018,1,1)) uri,upd=httpserv.download(self.cacheDir(),check=True) if upd: #unpack zip with ZipFile(uri.url,'r') as zp: zp.extractall(self.cacheDir()) def register(self): """ Register all downloaded fronts (in text files)""" slurplogger().info("Building file list..") files=[UriFile(file) for file in findFiles(self.cacheDir(),'.*txt',self._dbinvent.lastupdate)] if len(files) == 0: slurplogger().info("Orsifronts: No new files found since last update") return #possibly empty table self.truncateTable() #loop over files for uri in files: slurplogger().info("adding %s"%(uri.url)) self.addEntry(orsiMetaExtractor(uri)) self.updateInvent() #register dataset geoslurpCatalogue.addDataset(Orsifronts)
PypiClean
/bee-django-referral-0.1.24.tar.gz/bee-django-referral-0.1.24/bee_django_referral/static/bee_django_referral/cityselect/js/city.min.js
{ "citylist" : [{ "p": "北京", "c": [{"n": "东城区"}, {"n": "西城区"}, {"n": "崇文区"}, {"n": "宣武区"}, {"n": "朝阳区"}, {"n": "丰台区"}, {"n": "石景山区"}, {"n": "海淀区"}, {"n": "门头沟区"}, {"n": "房山区"}, {"n": "通州区"}, {"n": "顺义区"}, {"n": "昌平区"}, {"n": "大兴区"}, {"n": "平谷区"}, {"n": "怀柔区"}, {"n": "密云县"}, {"n": "延庆县"}] }, { "p": "天津", "c": [{"n": "和平区"}, {"n": "河东区"}, {"n": "河西区"}, {"n": "南开区"}, {"n": "河北区"}, {"n": "红挢区"}, {"n": "滨海新区"}, {"n": "东丽区"}, {"n": "西青区"}, {"n": "津南区"}, {"n": "北辰区"}, {"n": "宁河区"}, {"n": "武清区"}, {"n": "静海县"}, {"n": "宝坻区"}, {"n": "蓟县"}] }, { "p": "河北", "c": [{ "n": "石家庄", "a": [{"s": "长安区"}, {"s": "桥东区"}, {"s": "桥西区"}, {"s": "新华区"}, {"s": "井陉矿区"}, {"s": "裕华区"}, {"s": "井陉县"}, {"s": "正定县"}, {"s": "栾城县"}, {"s": "行唐县"}, {"s": "灵寿县"}, {"s": "高邑县"}, {"s": "深泽县"}, {"s": "赞皇县"}, {"s": "无极县"}, {"s": "平山县"}, {"s": "元氏县"}, {"s": "赵县"}, {"s": "辛集市"}, {"s": "藁城市"}, {"s": "晋州市"}, {"s": "新乐市"}, {"s": "鹿泉市"}] }, { "n": "唐山", "a": [{"s": "路南区"}, {"s": "路北区"}, {"s": "古冶区"}, {"s": "开平区"}, {"s": "丰南区"}, {"s": "丰润区"}, {"s": "滦县"}, {"s": "滦南县"}, {"s": "乐亭县"}, {"s": "迁西县"}, {"s": "玉田县"}, {"s": "唐海县"}, {"s": "遵化市"}, {"s": "迁安市"}] }, { "n": "秦皇岛", "a": [{"s": "海港区"}, {"s": "山海关区"}, {"s": "北戴河区"}, {"s": "青龙满族自治县"}, {"s": "昌黎县"}, {"s": "抚宁县"}, {"s": "卢龙县"}] }, { "n": "邯郸", "a": [{"s": "邯山区"}, {"s": "丛台区"}, {"s": "复兴区"}, {"s": "峰峰矿区"}, {"s": "邯郸县"}, {"s": "临漳县"}, {"s": "成安县"}, {"s": "大名县"}, {"s": "涉县"}, {"s": "磁县"}, {"s": "肥乡县"}, {"s": "永年县"}, {"s": "邱县"}, {"s": "鸡泽县"}, {"s": "广平县"}, {"s": "馆陶县"}, {"s": "魏县"}, {"s": "曲周县"}, {"s": "武安市"}] }, { "n": "邢台", "a": [{"s": "桥东区"}, {"s": "桥西区"}, {"s": "邢台县"}, {"s": "临城县"}, {"s": "内丘县"}, {"s": "柏乡县"}, {"s": "隆尧县"}, {"s": "任县"}, {"s": "南和县"}, {"s": "宁晋县"}, {"s": "巨鹿县"}, {"s": "新河县"}, {"s": "广宗县"}, {"s": "平乡县"}, {"s": "威县"}, {"s": "清河县"}, {"s": "临西县"}, {"s": "南宫市"}, {"s": "沙河市"}] }, { "n": "保定", "a": [{"s": "新市区"}, {"s": "北市区"}, {"s": "南市区"}, {"s": "满城县"}, {"s": "清苑县"}, {"s": "涞水县"}, {"s": "阜平县"}, {"s": "徐水县"}, {"s": "定兴县"}, {"s": "唐县"}, {"s": "高阳县"}, {"s": "容城县"}, {"s": "涞源县"}, {"s": "望都县"}, {"s": "安新县"}, {"s": "易县"}, {"s": "曲阳县"}, {"s": "蠡县"}, {"s": "顺平县"}, {"s": "博野县"}, {"s": "雄县"}, {"s": "涿州市"}, {"s": "定州市"}, {"s": "安国市"}, {"s": "高碑店市"}] }, { "n": "张家口", "a": [{"s": "桥东区"}, {"s": "桥西区"}, {"s": "宣化区"}, {"s": "下花园区"}, {"s": "宣化县"}, {"s": "张北县"}, {"s": "康保县"}, {"s": "沽源县"}, {"s": "尚义县"}, {"s": "蔚县"}, {"s": "阳原县"}, {"s": "怀安县"}, {"s": "万全县"}, {"s": "怀来县"}, {"s": "涿鹿县"}, {"s": "赤城县"}, {"s": "崇礼县"}] }, { "n": "承德", "a": [{"s": "双桥区"}, {"s": "双滦区"}, {"s": "鹰手营子矿区"}, {"s": "承德县"}, {"s": "兴隆县"}, {"s": "平泉县"}, {"s": "滦平县"}, {"s": "隆化县"}, {"s": "丰宁满族自治县"}, {"s": "宽城满族自治县"}, {"s": "围场满族蒙古族自治县"}] }, { "n": "沧州", "a": [{"s": "新华区"}, {"s": "运河区"}, {"s": "沧县"}, {"s": "青县"}, {"s": "东光县"}, {"s": "海兴县"}, {"s": "盐山县"}, {"s": "肃宁县"}, {"s": "南皮县"}, {"s": "吴桥县"}, {"s": "献县"}, {"s": "孟村回族自治县"}, {"s": "泊头市"}, {"s": "任丘市"}, {"s": "黄骅市"}, {"s": "河间市"}] }, { "n": "廊坊", "a": [{"s": "安次区"}, {"s": "广阳区"}, {"s": "固安县"}, {"s": "永清县"}, {"s": "香河县"}, {"s": "大城县"}, {"s": "文安县"}, {"s": "大厂回族自治县"}, {"s": "霸州市"}, {"s": "三河市"}] }, { "n": "衡水", "a": [{"s": "桃城区"}, {"s": "枣强县"}, {"s": "武邑县"}, {"s": "武强县"}, {"s": "饶阳县"}, {"s": "安平县"}, {"s": "故城县"}, {"s": "景县"}, {"s": "阜城县"}, {"s": "冀州市"}, {"s": "深州市"}] }] }, { "p": "山西", "c": [{ "n": "太原", "a": [{"s": "小店区"}, {"s": "迎泽区"}, {"s": "杏花岭区"}, {"s": "尖草坪区"}, {"s": "万柏林区"}, {"s": "晋源区"}, {"s": "清徐县"}, {"s": "阳曲县"}, {"s": "娄烦县"}, {"s": "古交市"}] }, { "n": "大同", "a": [{"s": "城区"}, {"s": "矿区"}, {"s": "南郊区"}, {"s": "新荣区"}, {"s": "阳高县"}, {"s": "天镇县"}, {"s": "广灵县"}, {"s": "灵丘县"}, {"s": "浑源县"}, {"s": "左云县"}, {"s": "大同县"}] }, {"n": "阳泉", "a": [{"s": "城区"}, {"s": "矿区"}, {"s": "郊区"}, {"s": "平定县"}, {"s": "盂县"}]}, { "n": "长治", "a": [{"s": "城区"}, {"s": "郊区"}, {"s": "长治县"}, {"s": "襄垣县"}, {"s": "屯留县"}, {"s": "平顺县"}, {"s": "黎城县"}, {"s": "壶关县"}, {"s": "长子县"}, {"s": "武乡县"}, {"s": "沁县"}, {"s": "沁源县"}, {"s": "潞城市"}] }, { "n": "晋城", "a": [{"s": "城区"}, {"s": "沁水县"}, {"s": "阳城县"}, {"s": "陵川县"}, {"s": "泽州县"}, {"s": "高平市"}] }, { "n": "朔州", "a": [{"s": "朔城区"}, {"s": "平鲁区"}, {"s": "山阴县"}, {"s": "应县"}, {"s": "右玉县"}, {"s": "怀仁县"}] }, { "n": "晋中", "a": [{"s": "榆次区"}, {"s": "榆社县"}, {"s": "左权县"}, {"s": "和顺县"}, {"s": "昔阳县"}, {"s": "寿阳县"}, {"s": "太谷县"}, {"s": "祁县"}, {"s": "平遥县"}, {"s": "灵石县"}, {"s": "介休市"}] }, { "n": "运城", "a": [{"s": "盐湖区"}, {"s": "临猗县"}, {"s": "万荣县"}, {"s": "闻喜县"}, {"s": "稷山县"}, {"s": "新绛县"}, {"s": "绛县"}, {"s": "垣曲县"}, {"s": "夏县"}, {"s": "平陆县"}, {"s": "芮城县"}, {"s": "永济市"}, {"s": "河津市"}] }, { "n": "忻州", "a": [{"s": "忻府区"}, {"s": "定襄县"}, {"s": "五台县"}, {"s": "代县"}, {"s": "繁峙县"}, {"s": "宁武县"}, {"s": "静乐县"}, {"s": "神池县"}, {"s": "五寨县"}, {"s": "岢岚县"}, {"s": "河曲县"}, {"s": "保德县"}, {"s": "偏关县"}, {"s": "原平市"}] }, { "n": "临汾", "a": [{"s": "尧都区"}, {"s": "曲沃县"}, {"s": "翼城县"}, {"s": "襄汾县"}, {"s": "洪洞县"}, {"s": "古县"}, {"s": "安泽县"}, {"s": "浮山县"}, {"s": "吉县"}, {"s": "乡宁县"}, {"s": "大宁县"}, {"s": "隰县"}, {"s": "永和县"}, {"s": "蒲县"}, {"s": "汾西县"}, {"s": "侯马市"}, {"s": "霍州市"}] }, { "n": "吕梁", "a": [{"s": "离石区"}, {"s": "文水县"}, {"s": "交城县"}, {"s": "兴县"}, {"s": "临县"}, {"s": "柳林县"}, {"s": "石楼县"}, {"s": "岚县"}, {"s": "方山县"}, {"s": "中阳县"}, {"s": "交口县"}, {"s": "孝义市"}, {"s": "汾阳市"}] }] }, { "p": "内蒙古", "c": [{ "n": "呼和浩特", "a": [{"s": "新城区"}, {"s": "回民区"}, {"s": "玉泉区"}, {"s": "玉泉区"}, {"s": "赛罕区"}, {"s": "土默特左旗"}, {"s": "托克托县"}, {"s": "和林格尔县"}, {"s": "清水河县"}, {"s": "武川县"}] }, { "n": "包头", "a": [{"s": "东河区"}, {"s": "昆都仑区"}, {"s": "青山区"}, {"s": "石拐区"}, {"s": "白云矿区"}, {"s": "九原区"}, {"s": "土默特右旗"}, {"s": "固阳县"}, {"s": "达尔罕茂明安联合旗"}] }, {"n": "乌海", "a": [{"s": "海勃湾区"}, {"s": "海南区"}, {"s": "乌达区"}]}, { "n": "赤峰", "a": [{"s": "红山区"}, {"s": "元宝山区"}, {"s": "松山区"}, {"s": "阿鲁科尔沁旗"}, {"s": "巴林左旗"}, {"s": "巴林右旗"}, {"s": "林西县"}, {"s": "克什克腾旗"}, {"s": "翁牛特旗"}, {"s": "喀喇沁旗"}, {"s": "宁城县"}, {"s": "敖汉旗"}] }, { "n": "通辽", "a": [{"s": "科尔沁区"}, {"s": "科尔沁左翼中旗"}, {"s": "科尔沁左翼后旗"}, {"s": "开鲁县"}, {"s": "库伦旗"}, {"s": "奈曼旗"}, {"s": "扎鲁特旗"}, {"s": "霍林郭勒市"}] }, { "n": "鄂尔多斯", "a": [{"s": "东胜区"}, {"s": "达拉特旗"}, {"s": "准格尔旗"}, {"s": "鄂托克前旗"}, {"s": "鄂托克旗"}, {"s": "杭锦旗"}, {"s": "乌审旗"}, {"s": "伊金霍洛旗"}] }, { "n": "呼伦贝尔", "a": [{"s": "海拉尔区"}, {"s": "阿荣旗"}, {"s": "莫力达瓦达斡尔族自治旗"}, {"s": "鄂伦春自治旗"}, {"s": "鄂温克族自治旗"}, {"s": "陈巴尔虎旗"}, {"s": "新巴尔虎左旗"}, {"s": "新巴尔虎右旗"}, {"s": "满洲里市"}, {"s": "牙克石市"}, {"s": "扎兰屯市"}, {"s": "额尔古纳市"}, {"s": "根河市"}] }, { "n": "巴彦淖尔", "a": [{"s": "临河区"}, {"s": "五原县"}, {"s": "磴口县"}, {"s": "乌拉特前旗"}, {"s": "乌拉特中旗"}, {"s": "乌拉特后旗"}, {"s": "杭锦后旗"}] }, { "n": "乌兰察布", "a": [{"s": "集宁区"}, {"s": "卓资县"}, {"s": "化德县"}, {"s": "商都县"}, {"s": "兴和县"}, {"s": "凉城县"}, {"s": "察哈尔右翼前旗"}, {"s": "察哈尔右翼中旗"}, {"s": "察哈尔右翼后旗"}, {"s": "四子王旗"}, {"s": "丰镇市"}] }, { "n": "兴安", "a": [{"s": "乌兰浩特市"}, {"s": "阿尔山市"}, {"s": "科尔沁右翼前旗"}, {"s": "科尔沁右翼中旗"}, {"s": "扎赉特旗"}, {"s": "突泉县"}] }, { "n": "锡林郭勒", "a": [{"s": "二连浩特市"}, {"s": "锡林浩特市"}, {"s": "阿巴嘎旗"}, {"s": "苏尼特左旗"}, {"s": "苏尼特右旗"}, {"s": "东乌珠穆沁旗"}, {"s": "西乌珠穆沁旗"}, {"s": "太仆寺旗"}, {"s": "镶黄旗"}, {"s": "正镶白旗"}, {"s": "正蓝旗"}, {"s": "多伦县"}] }, {"n": "阿拉善", "a": [{"s": "阿拉善左旗"}, {"s": "阿拉善右旗"}, {"s": "额济纳旗"}]}] }, { "p": "辽宁", "c": [{ "n": "沈阳", "a": [{"s": "和平区"}, {"s": "沈河区"}, {"s": "大东区"}, {"s": "皇姑区"}, {"s": "铁西区"}, {"s": "苏家屯区"}, {"s": "东陵区"}, {"s": "新城子区"}, {"s": "于洪区"}, {"s": "辽中县"}, {"s": "康平县"}, {"s": "法库县"}, {"s": "新民市"}] }, { "n": "大连", "a": [{"s": "中山区"}, {"s": "西岗区"}, {"s": "沙河口区"}, {"s": "甘井子区"}, {"s": "旅顺口区"}, {"s": "金州区"}, {"s": "长海县"}, {"s": "瓦房店市"}, {"s": "普兰店市"}, {"s": "庄河市"}] }, { "n": "鞍山", "a": [{"s": "铁东区"}, {"s": "铁西区"}, {"s": "立山区"}, {"s": "千山区"}, {"s": "台安县"}, {"s": "210323"}, {"s": "海城市"}] }, { "n": "抚顺", "a": [{"s": "新抚区"}, {"s": "东洲区"}, {"s": "望花区"}, {"s": "顺城区"}, {"s": "抚顺县"}, {"s": "新宾满族自治县"}, {"s": "清原满族自治县"}] }, { "n": "本溪", "a": [{"s": "平山区"}, {"s": "溪湖区"}, {"s": "明山区"}, {"s": "南芬区"}, {"s": "本溪满族自治县"}, {"s": "桓仁满族自治县"}] }, { "n": "丹东", "a": [{"s": "元宝区"}, {"s": "振兴区"}, {"s": "振安区"}, {"s": "宽甸满族自治县"}, {"s": "东港市"}, {"s": "凤城市"}] }, { "n": "锦州", "a": [{"s": "古塔区"}, {"s": "凌河区"}, {"s": "太和区"}, {"s": "黑山县"}, {"s": "义县"}, {"s": "凌海市"}, {"s": "北镇市"}] }, { "n": "营口", "a": [{"s": "站前区"}, {"s": "西市区"}, {"s": "鲅鱼圈区"}, {"s": "老边区"}, {"s": "盖州市"}, {"s": "大石桥市"}] }, { "n": "阜新", "a": [{"s": "海州区"}, {"s": "新邱区"}, {"s": "太平区"}, {"s": "清河门区"}, {"s": "细河区"}, {"s": "阜新蒙古族自治县"}, {"s": "彰武县"}] }, { "n": "辽阳", "a": [{"s": "白塔区"}, {"s": "文圣区"}, {"s": "宏伟区"}, {"s": "弓长岭区"}, {"s": "太子河区"}, {"s": "辽阳县"}, {"s": "灯塔市"}] }, {"n": "盘锦", "a": [{"s": "双台子区"}, {"s": "兴隆台区"}, {"s": "大洼县"}, {"s": "盘山县"}]}, { "n": "铁岭", "a": [{"s": "银州区"}, {"s": "清河区"}, {"s": "铁岭县"}, {"s": "西丰县"}, {"s": "昌图县"}, {"s": "调兵山市"}, {"s": "开原市"}] }, { "n": "朝阳", "a": [{"s": "双塔区"}, {"s": "龙城区"}, {"s": "朝阳县"}, {"s": "建平县"}, {"s": "喀喇沁左翼蒙古族自治县"}, {"s": "北票市"}, {"s": "凌源市"}] }, {"n": "葫芦岛", "a": [{"s": "连山区"}, {"s": "龙港区"}, {"s": "南票区"}, {"s": "绥中县"}, {"s": "建昌县"}, {"s": "兴城市"}]}] }, { "p": "吉林", "c": [{ "n": "长春", "a": [{"s": "南关区"}, {"s": "宽城区"}, {"s": "朝阳区"}, {"s": "二道区"}, {"s": "绿园区"}, {"s": "双阳区"}, {"s": "农安县"}, {"s": "九台市"}, {"s": "榆树市"}, {"s": "德惠市"}] }, { "n": "吉林", "a": [{"s": "昌邑区"}, {"s": "龙潭区"}, {"s": "船营区"}, {"s": "丰满区"}, {"s": "永吉县"}, {"s": "蛟河市"}, {"s": "桦甸市"}, {"s": "舒兰市"}, {"s": "磐石市"}] }, { "n": "四平", "a": [{"s": "铁西区"}, {"s": "铁东区"}, {"s": "梨树县"}, {"s": "伊通满族自治县"}, {"s": "公主岭市"}, {"s": "双辽市"}] }, {"n": "辽源", "a": [{"s": "龙山区"}, {"s": "西安区"}, {"s": "东丰县"}, {"s": "东辽县"}]}, { "n": "通化", "a": [{"s": "东昌区"}, {"s": "二道江区"}, {"s": "通化县"}, {"s": "辉南县"}, {"s": "柳河县"}, {"s": "梅河口市"}, {"s": "集安市"}] }, { "n": "白山", "a": [{"s": "八道江区"}, {"s": "江源区"}, {"s": "抚松县"}, {"s": "靖宇县"}, {"s": "长白朝鲜族自治县"}, {"s": "临江市"}] }, {"n": "松原", "a": [{"s": "宁江区"}, {"s": "前郭尔罗斯蒙古族自治县"}, {"s": "长岭县"}, {"s": "乾安县"}, {"s": "扶余县"}]}, { "n": "白城", "a": [{"s": "洮北区"}, {"s": "镇赉县"}, {"s": "通榆县"}, {"s": "洮南市"}, {"s": "大安市"}] }, { "n": "延边", "a": [{"s": "延吉市"}, {"s": "图们市"}, {"s": "敦化市"}, {"s": "珲春市"}, {"s": "龙井市"}, {"s": "和龙市"}, {"s": "汪清县"}, {"s": "安图县"}] }] }, { "p": "黑龙江", "c": [{ "n": "哈尔滨", "a": [{"s": "道里区"}, {"s": "南岗区"}, {"s": "道外区"}, {"s": "平房区"}, {"s": "松北区"}, {"s": "香坊区"}, {"s": "呼兰区"}, {"s": "阿城区"}, {"s": "依兰县"}, {"s": "方正县"}, {"s": "宾县"}, {"s": "巴彦县"}, {"s": "木兰县"}, {"s": "通河县"}, {"s": "延寿县"}, {"s": "双城市"}, {"s": "尚志市"}, {"s": "五常市"}] }, { "n": "齐齐哈尔", "a": [{"s": "龙沙区"}, {"s": "建华区"}, {"s": "铁锋区"}, {"s": "昂昂溪区"}, {"s": "富拉尔基区"}, {"s": "碾子山区"}, {"s": "梅里斯达斡尔族区"}, {"s": "龙江县"}, {"s": "依安县"}, {"s": "泰来县"}, {"s": "甘南县"}, {"s": "富裕县"}, {"s": "克山县"}, {"s": "克东县"}, {"s": "拜泉县"}, {"s": "讷河市"}] }, { "n": "鸡西", "a": [{"s": "鸡冠区"}, {"s": "恒山区"}, {"s": "滴道区"}, {"s": "梨树区"}, {"s": "城子河区"}, {"s": "麻山区"}, {"s": "鸡东县"}, {"s": "虎林市"}, {"s": "密山市"}] }, { "n": "鹤岗", "a": [{"s": "向阳区"}, {"s": "工农区"}, {"s": "南山区"}, {"s": "兴安区"}, {"s": "东山区"}, {"s": "兴山区"}, {"s": "萝北县"}, {"s": "绥滨县"}] }, { "n": "双鸭山", "a": [{"s": "尖山区"}, {"s": "岭东区"}, {"s": "四方台区"}, {"s": "宝山区"}, {"s": "集贤县"}, {"s": "友谊县"}, {"s": "宝清县"}, {"s": "饶河县"}] }, { "n": "大庆", "a": [{"s": "萨尔图区"}, {"s": "龙凤区"}, {"s": "让胡路区"}, {"s": "红岗区"}, {"s": "大同区"}, {"s": "肇州县"}, {"s": "肇源县"}, {"s": "林甸县"}, {"s": "杜尔伯特蒙古族自治县"}] }, { "n": "伊春", "a": [{"s": "伊春区"}, {"s": "南岔区"}, {"s": "友好区"}, {"s": "西林区"}, {"s": "翠峦区"}, {"s": "新青区"}, {"s": "美溪区"}, {"s": "金山屯区"}, {"s": "五营区"}, {"s": "乌马河区"}, {"s": "汤旺河区"}, {"s": "带岭区"}, {"s": "乌伊岭区"}, {"s": "红星区"}, {"s": "上甘岭区"}, {"s": "嘉荫县"}, {"s": "铁力市"}] }, { "n": "佳木斯", "a": [{"s": "向阳区"}, {"s": "前进区"}, {"s": "东风区"}, {"s": "郊区"}, {"s": "桦南县"}, {"s": "桦川县"}, {"s": "汤原县"}, {"s": "抚远县"}, {"s": "同江市"}, {"s": "富锦市"}] }, {"n": "七台河", "a": [{"s": "新兴区"}, {"s": "桃山区"}, {"s": "茄子河区"}, {"s": "勃利县"}]}, { "n": "牡丹江", "a": [{"s": "东安区"}, {"s": "阳明区"}, {"s": "爱民区"}, {"s": "西安区"}, {"s": "东宁县"}, {"s": "林口县"}, {"s": "绥芬河市"}, {"s": "海林市"}, {"s": "宁安市"}, {"s": "穆棱市"}] }, { "n": "黑河", "a": [{"s": "爱辉区"}, {"s": "嫩江县"}, {"s": "逊克县"}, {"s": "孙吴县"}, {"s": "北安市"}, {"s": "五大连池市"}] }, { "n": "绥化", "a": [{"s": "北林区"}, {"s": "望奎县"}, {"s": "兰西县"}, {"s": "青冈县"}, {"s": "庆安县"}, {"s": "明水县"}, {"s": "绥棱县"}, {"s": "安达市"}, {"s": "肇东市"}, {"s": "海伦市"}] }, { "n": "大兴安岭", "a": [{"s": "加格达奇区"}, {"s": "松岭区"}, {"s": "新林区"}, {"s": "呼中区"}, {"s": "呼玛县"}, {"s": "塔河县"}, {"s": "漠河县"}] }] }, { "p": "上海", "c": [{"n": "黄浦区"}, {"n": "卢湾区"}, {"n": "徐汇区"}, {"n": "长宁区"}, {"n": "静安区"}, {"n": "普陀区"}, {"n": "闸北区"}, {"n": "虹口区"}, {"n": "杨浦区"}, {"n": "闵行区"}, {"n": "宝山区"}, {"n": "嘉定区"}, {"n": "浦东新区"}, {"n": "金山区"}, {"n": "松江区"}, {"n": "奉贤区"}, {"n": "青浦区"}, {"n": "崇明县"}] }, { "p": "江苏", "c": [{ "n": "南京", "a": [{"s": "玄武区"}, {"s": "白下区"}, {"s": "秦淮区"}, {"s": "建邺区"}, {"s": "鼓楼区"}, {"s": "下关区"}, {"s": "浦口区"}, {"s": "栖霞区"}, {"s": "雨花台区"}, {"s": "江宁区"}, {"s": "六合区"}, {"s": "溧水县"}, {"s": "高淳县"}] }, { "n": "无锡", "a": [{"s": "崇安区"}, {"s": "南长区"}, {"s": "北塘区"}, {"s": "锡山区"}, {"s": "惠山区"}, {"s": "滨湖区"}, {"s": "江阴市"}, {"s": "宜兴市"}] }, { "n": "徐州", "a": [{"s": "鼓楼区"}, {"s": "云龙区"}, {"s": "九里区"}, {"s": "贾汪区"}, {"s": "泉山区"}, {"s": "丰县"}, {"s": "沛县"}, {"s": "铜山县"}, {"s": "睢宁县"}, {"s": "新沂市"}, {"s": "邳州市"}] }, { "n": "常州", "a": [{"s": "天宁区"}, {"s": "钟楼区"}, {"s": "戚墅堰区"}, {"s": "新北区"}, {"s": "武进区"}, {"s": "溧阳市"}, {"s": "金坛市"}] }, { "n": "苏州", "a": [{"s": "沧浪区"}, {"s": "平江区"}, {"s": "金阊区"}, {"s": "虎丘区"}, {"s": "吴中区"}, {"s": "相城区"}, {"s": "常熟市"}, {"s": "张家港市"}, {"s": "昆山市"}, {"s": "吴江市"}, {"s": "太仓市"}] }, { "n": "南通", "a": [{"s": "崇川区"}, {"s": "港闸区"}, {"s": "海安县"}, {"s": "如东县"}, {"s": "启东市"}, {"s": "如皋市"}, {"s": "通州市"}, {"s": "海门市"}] }, { "n": "连云港", "a": [{"s": "连云区"}, {"s": "新浦区"}, {"s": "海州区"}, {"s": "赣榆县"}, {"s": "东海县"}, {"s": "灌云县"}, {"s": "灌南县"}] }, { "n": "淮安", "a": [{"s": "清河区"}, {"s": "楚州区"}, {"s": "淮阴区"}, {"s": "清浦区"}, {"s": "涟水县"}, {"s": "洪泽县"}, {"s": "盱眙县"}, {"s": "金湖县"}] }, { "n": "盐城", "a": [{"s": "亭湖区"}, {"s": "盐都区"}, {"s": "响水县"}, {"s": "滨海县"}, {"s": "阜宁县"}, {"s": "射阳县"}, {"s": "建湖县"}, {"s": "东台市"}, {"s": "大丰市"}] }, { "n": "扬州", "a": [{"s": "广陵区"}, {"s": "邗江区"}, {"s": "维扬区"}, {"s": "宝应县"}, {"s": "仪征市"}, {"s": "高邮市"}, {"s": "江都市"}] }, { "n": "镇江", "a": [{"s": "京口区"}, {"s": "润州区"}, {"s": "丹徒区"}, {"s": "丹阳市"}, {"s": "扬中市"}, {"s": "句容市"}] }, { "n": "泰州", "a": [{"s": "海陵区"}, {"s": "高港区"}, {"s": "兴化市"}, {"s": "靖江市"}, {"s": "泰兴市"}, {"s": "姜堰市"}] }, {"n": "宿迁", "a": [{"s": "宿城区"}, {"s": "宿豫区"}, {"s": "沭阳县"}, {"s": "泗阳县"}, {"s": "泗洪县"}]}] }, { "p": "浙江", "c": [{ "n": "杭州", "a": [{"s": "上城区"}, {"s": "下城区"}, {"s": "江干区"}, {"s": "拱墅区"}, {"s": "西湖区"}, {"s": "滨江区"}, {"s": "萧山区"}, {"s": "余杭区"}, {"s": "桐庐县"}, {"s": "淳安县"}, {"s": "建德市"}, {"s": "富阳市"}, {"s": "临安市"}] }, { "n": "宁波", "a": [{"s": "海曙区"}, {"s": "江东区"}, {"s": "江北区"}, {"s": "北仑区"}, {"s": "镇海区"}, {"s": "鄞州区"}, {"s": "象山县"}, {"s": "宁海县"}, {"s": "余姚市"}, {"s": "慈溪市"}, {"s": "奉化市"}] }, { "n": "温州", "a": [{"s": "鹿城区"}, {"s": "龙湾区"}, {"s": "瓯海区"}, {"s": "洞头县"}, {"s": "永嘉县"}, {"s": "平阳县"}, {"s": "苍南县"}, {"s": "文成县"}, {"s": "泰顺县"}, {"s": "瑞安市"}, {"s": "乐清市"}] }, { "n": "嘉兴", "a": [{"s": "南湖区"}, {"s": "秀洲区"}, {"s": "嘉善县"}, {"s": "海盐县"}, {"s": "海宁市"}, {"s": "平湖市"}, {"s": "桐乡市"}] }, {"n": "湖州", "a": [{"s": "吴兴区"}, {"s": "南浔区"}, {"s": "德清县"}, {"s": "长兴县"}, {"s": "安吉县"}]}, { "n": "绍兴", "a": [{"s": "越城区"}, {"s": "绍兴县"}, {"s": "新昌县"}, {"s": "诸暨市"}, {"s": "上虞市"}, {"s": "嵊州市"}] }, { "n": "金华", "a": [{"s": "婺城区"}, {"s": "金东区"}, {"s": "武义县"}, {"s": "浦江县"}, {"s": "磐安县"}, {"s": "兰溪市"}, {"s": "义乌市"}, {"s": "东阳市"}, {"s": "永康市"}] }, { "n": "衢州", "a": [{"s": "柯城区"}, {"s": "衢江区"}, {"s": "常山县"}, {"s": "开化县"}, {"s": "龙游县"}, {"s": "江山市"}] }, {"n": "舟山", "a": [{"s": "定海区"}, {"s": "普陀区"}, {"s": "岱山县"}, {"s": "嵊泗县"}]}, { "n": "台州", "a": [{"s": "椒江区"}, {"s": "黄岩区"}, {"s": "路桥区"}, {"s": "玉环县"}, {"s": "三门县"}, {"s": "天台县"}, {"s": "仙居县"}, {"s": "温岭市"}, {"s": "临海市"}] }, { "n": "丽水", "a": [{"s": "莲都区"}, {"s": "青田县"}, {"s": "缙云县"}, {"s": "遂昌县"}, {"s": "松阳县"}, {"s": "云和县"}, {"s": "庆元县"}, {"s": "景宁畲族自治县"}, {"s": "龙泉市"}] }] }, { "p": "安徽", "c": [{ "n": "合肥", "a": [{"s": "瑶海区"}, {"s": "庐阳区"}, {"s": "蜀山区"}, {"s": "包河区"}, {"s": "长丰县"}, {"s": "肥东县"}, {"s": "肥西县"}] }, { "n": "芜湖", "a": [{"s": "镜湖区"}, {"s": "弋江区"}, {"s": "鸠江区"}, {"s": "三山区"}, {"s": "芜湖县"}, {"s": "繁昌县"}, {"s": "南陵县"}] }, { "n": "蚌埠", "a": [{"s": "龙子湖区"}, {"s": "蚌山区"}, {"s": "禹会区"}, {"s": "淮上区"}, {"s": "怀远县"}, {"s": "五河县"}, {"s": "固镇县"}] }, { "n": "淮南", "a": [{"s": "大通区"}, {"s": "田家庵区"}, {"s": "谢家集区"}, {"s": "八公山区"}, {"s": "潘集区"}, {"s": "凤台县"}] }, {"n": "马鞍山", "a": [{"s": "金家庄区"}, {"s": "花山区"}, {"s": "雨山区"}, {"s": "当涂县"}]}, { "n": "淮北", "a": [{"s": "杜集区"}, {"s": "相山区"}, {"s": "烈山区"}, {"s": "濉溪县"}] }, {"n": "铜陵", "a": [{"s": "铜官山区"}, {"s": "狮子山区"}, {"s": "郊区"}, {"s": "铜陵县"}]}, { "n": "安庆", "a": [{"s": "迎江区"}, {"s": "大观区"}, {"s": "宜秀区"}, {"s": "怀宁县"}, {"s": "枞阳县"}, {"s": "潜山县"}, {"s": "太湖县"}, {"s": "宿松县"}, {"s": "望江县"}, {"s": "岳西县"}, {"s": "桐城市"}] }, { "n": "黄山", "a": [{"s": "屯溪区"}, {"s": "黄山区"}, {"s": "徽州区"}, {"s": "歙县"}, {"s": "休宁县"}, {"s": "黟县"}, {"s": "祁门县"}] }, { "n": "滁州", "a": [{"s": "琅琊区"}, {"s": "南谯区"}, {"s": "来安县"}, {"s": "全椒县"}, {"s": "定远县"}, {"s": "凤阳县"}, {"s": "天长市"}, {"s": "明光市"}] }, { "n": "阜阳", "a": [{"s": "颍州区"}, {"s": "颍东区"}, {"s": "颍泉区"}, {"s": "临泉县"}, {"s": "太和县"}, {"s": "阜南县"}, {"s": "颍上县"}, {"s": "界首市"}] }, {"n": "宿州", "a": [{"s": "埇桥区"}, {"s": "砀山县"}, {"s": "萧县"}, {"s": "灵璧县"}, {"s": "泗县"}]}, { "n": "巢湖", "a": [{"s": "居巢区"}, {"s": "庐江县"}, {"s": "无为县"}, {"s": "含山县"}, {"s": "和县"}] }, { "n": "六安", "a": [{"s": "金安区"}, {"s": "裕安区"}, {"s": "寿县"}, {"s": "霍邱县"}, {"s": "舒城县"}, {"s": "金寨县"}, {"s": "霍山县"}] }, {"n": "亳州", "a": [{"s": "谯城区"}, {"s": "涡阳县"}, {"s": "蒙城县"}, {"s": "利辛县"}]}, { "n": "池州", "a": [{"s": "贵池区"}, {"s": "东至县"}, {"s": "石台县"}, {"s": "青阳县"}] }, { "n": "宣城", "a": [{"s": "宣州区"}, {"s": "郎溪县"}, {"s": "广德县"}, {"s": "泾县"}, {"s": "绩溪县"}, {"s": "旌德县"}, {"s": "宁国市"}] }] }, { "p": "福建", "c": [{ "n": "福州", "a": [{"s": "鼓楼区"}, {"s": "台江区"}, {"s": "仓山区"}, {"s": "马尾区"}, {"s": "晋安区"}, {"s": "闽侯县"}, {"s": "连江县"}, {"s": "罗源县"}, {"s": "闽清县"}, {"s": "永泰县"}, {"s": "平潭县"}, {"s": "福清市"}, {"s": "长乐市"}] }, { "n": "厦门", "a": [{"s": "思明区"}, {"s": "海沧区"}, {"s": "湖里区"}, {"s": "集美区"}, {"s": "同安区"}, {"s": "翔安区"}] }, {"n": "莆田", "a": [{"s": "城厢区"}, {"s": "涵江区"}, {"s": "荔城区"}, {"s": "秀屿区"}, {"s": "仙游县"}]}, { "n": "三明", "a": [{"s": "梅列区"}, {"s": "三元区"}, {"s": "明溪县"}, {"s": "清流县"}, {"s": "宁化县"}, {"s": "大田县"}, {"s": "尤溪县"}, {"s": "沙县"}, {"s": "将乐县"}, {"s": "泰宁县"}, {"s": "建宁县"}, {"s": "永安市"}] }, { "n": "泉州", "a": [{"s": "鲤城区"}, {"s": "丰泽区"}, {"s": "洛江区"}, {"s": "泉港区"}, {"s": "惠安县"}, {"s": "安溪县"}, {"s": "永春县"}, {"s": "德化县"}, {"s": "金门县"}, {"s": "石狮市"}, {"s": "晋江市"}, {"s": "南安市"}] }, { "n": "漳州", "a": [{"s": "芗城区"}, {"s": "龙文区"}, {"s": "云霄县"}, {"s": "漳浦县"}, {"s": "诏安县"}, {"s": "长泰县"}, {"s": "东山县"}, {"s": "南靖县"}, {"s": "平和县"}, {"s": "华安县"}, {"s": "龙海市"}] }, { "n": "南平", "a": [{"s": "延平区"}, {"s": "顺昌县"}, {"s": "浦城县"}, {"s": "光泽县"}, {"s": "松溪县"}, {"s": "政和县"}, {"s": "邵武市"}, {"s": "武夷山市"}, {"s": "建瓯市"}, {"s": "建阳市"}] }, { "n": "龙岩", "a": [{"s": "新罗区"}, {"s": "长汀县"}, {"s": "永定县"}, {"s": "上杭县"}, {"s": "武平县"}, {"s": "连城县"}, {"s": "漳平市"}] }, { "n": "宁德", "a": [{"s": "蕉城区"}, {"s": "霞浦县"}, {"s": "古田县"}, {"s": "屏南县"}, {"s": "寿宁县"}, {"s": "周宁县"}, {"s": "柘荣县"}, {"s": "福安市"}, {"s": "福鼎市"}] }] }, { "p": "江西", "c": [{ "n": "南昌", "a": [{"s": "东湖区"}, {"s": "西湖区"}, {"s": "青云谱区"}, {"s": "湾里区"}, {"s": "青山湖区"}, {"s": "南昌县"}, {"s": "新建县"}, {"s": "安义县"}, {"s": "进贤县"}] }, {"n": "景德镇", "a": [{"s": "昌江区"}, {"s": "珠山区"}, {"s": "浮梁县"}, {"s": "乐平市"}]}, { "n": "萍乡", "a": [{"s": "安源区"}, {"s": "湘东区"}, {"s": "莲花县"}, {"s": "上栗县"}, {"s": "芦溪县"}] }, { "n": "九江", "a": [{"s": "庐山区"}, {"s": "浔阳区"}, {"s": "九江县"}, {"s": "武宁县"}, {"s": "修水县"}, {"s": "永修县"}, {"s": "德安县"}, {"s": "星子县"}, {"s": "都昌县"}, {"s": "湖口县"}, {"s": "彭泽县"}, {"s": "瑞昌市"}] }, {"n": "新余", "a": [{"s": "渝水区"}, {"s": "分宜县"}]}, { "n": "鹰潭", "a": [{"s": "月湖区"}, {"s": "余江县"}, {"s": "贵溪市"}] }, { "n": "赣州", "a": [{"s": "章贡区"}, {"s": "赣县"}, {"s": "信丰县"}, {"s": "大余县"}, {"s": "上犹县"}, {"s": "崇义县"}, {"s": "安远县"}, {"s": "龙南县"}, {"s": "定南县"}, {"s": "全南县"}, {"s": "宁都县"}, {"s": "于都县"}, {"s": "兴国县"}, {"s": "会昌县"}, {"s": "寻乌县"}, {"s": "石城县"}, {"s": "瑞金市"}, {"s": "南康市"}] }, { "n": "吉安", "a": [{"s": "吉州区"}, {"s": "青原区"}, {"s": "吉安县"}, {"s": "吉水县"}, {"s": "峡江县"}, {"s": "新干县"}, {"s": "永丰县"}, {"s": "泰和县"}, {"s": "遂川县"}, {"s": "万安县"}, {"s": "安福县"}, {"s": "永新县"}, {"s": "井冈山市"}] }, { "n": "宜春", "a": [{"s": "袁州区"}, {"s": "奉新县"}, {"s": "万载县"}, {"s": "上高县"}, {"s": "宜丰县"}, {"s": "靖安县"}, {"s": "铜鼓县"}, {"s": "丰城市"}, {"s": "樟树市"}, {"s": "高安市"}] }, { "n": "抚州", "a": [{"s": "临川区"}, {"s": "南城县"}, {"s": "黎川县"}, {"s": "南丰县"}, {"s": "崇仁县"}, {"s": "乐安县"}, {"s": "宜黄县"}, {"s": "金溪县"}, {"s": "资溪县"}, {"s": "东乡县"}, {"s": "广昌县"}] }, { "n": "上饶", "a": [{"s": "信州区"}, {"s": "上饶县"}, {"s": "广丰县"}, {"s": "玉山县"}, {"s": "铅山县"}, {"s": "横峰县"}, {"s": "弋阳县"}, {"s": "余干县"}, {"s": "鄱阳县"}, {"s": "万年县"}, {"s": "婺源县"}, {"s": "德兴市"}] }, { "n": "丰城" }] }, { "p": "山东", "c": [{ "n": "济南", "a": [{"s": "历下区"}, {"s": "市中区"}, {"s": "槐荫区"}, {"s": "天桥区"}, {"s": "历城区"}, {"s": "长清区"}, {"s": "平阴县"}, {"s": "济阳县"}, {"s": "商河县"}, {"s": "章丘市"}] }, { "n": "青岛", "a": [{"s": "市南区"}, {"s": "市北区"}, {"s": "四方区"}, {"s": "黄岛区"}, {"s": "崂山区"}, {"s": "李沧区"}, {"s": "城阳区"}, {"s": "胶州市"}, {"s": "即墨市"}, {"s": "平度市"}, {"s": "胶南市"}, {"s": "莱西市"}] }, { "n": "淄博", "a": [{"s": "淄川区"}, {"s": "张店区"}, {"s": "博山区"}, {"s": "临淄区"}, {"s": "周村区"}, {"s": "桓台县"}, {"s": "高青县"}, {"s": "沂源县"}] }, { "n": "枣庄", "a": [{"s": "市中区"}, {"s": "薛城区"}, {"s": "峄城区"}, {"s": "台儿庄区"}, {"s": "山亭区"}, {"s": "滕州市"}] }, {"n": "东营", "a": [{"s": "东营区"}, {"s": "河口区"}, {"s": "垦利县"}, {"s": "利津县"}, {"s": "广饶县"}]}, { "n": "烟台", "a": [{"s": "芝罘区"}, {"s": "福山区"}, {"s": "牟平区"}, {"s": "莱山区"}, {"s": "长岛县"}, {"s": "龙口市"}, {"s": "莱阳市"}, {"s": "莱州市"}, {"s": "蓬莱市"}, {"s": "招远市"}, {"s": "栖霞市"}, {"s": "海阳市"}] }, { "n": "潍坊", "a": [{"s": "潍城区"}, {"s": "寒亭区"}, {"s": "坊子区"}, {"s": "奎文区"}, {"s": "临朐县"}, {"s": "昌乐县"}, {"s": "青州市"}, {"s": "诸城市"}, {"s": "寿光市"}, {"s": "安丘市"}, {"s": "高密市"}, {"s": "昌邑市"}] }, { "n": "济宁", "a": [{"s": "市中区"}, {"s": "任城区"}, {"s": "微山县"}, {"s": "鱼台县"}, {"s": "金乡县"}, {"s": "嘉祥县"}, {"s": "汶上县"}, {"s": "泗水县"}, {"s": "梁山县"}, {"s": "曲阜市"}, {"s": "兖州市"}, {"s": "邹城市"}] }, { "n": "泰安", "a": [{"s": "泰山区"}, {"s": "岱岳区"}, {"s": "宁阳县"}, {"s": "东平县"}, {"s": "新泰市"}, {"s": "肥城市"}] }, {"n": "威海", "a": [{"s": "环翠区"}, {"s": "文登市"}, {"s": "荣成市"}, {"s": "乳山市"}]}, { "n": "日照", "a": [{"s": "东港区"}, {"s": "岚山区"}, {"s": "五莲县"}, {"s": "莒县"}] }, {"n": "莱芜", "a": [{"s": "莱城区"}, {"s": "钢城区"}]}, { "n": "临沂", "a": [{"s": "兰山区"}, {"s": "罗庄区"}, {"s": "河东区"}, {"s": "沂南县"}, {"s": "郯城县"}, {"s": "沂水县"}, {"s": "苍山县"}, {"s": "费县"}, {"s": "平邑县"}, {"s": "莒南县"}, {"s": "蒙阴县"}, {"s": "临沭县"}] }, { "n": "德州", "a": [{"s": "德城区"}, {"s": "陵县"}, {"s": "宁津县"}, {"s": "庆云县"}, {"s": "临邑县"}, {"s": "齐河县"}, {"s": "平原县"}, {"s": "夏津县"}, {"s": "武城县"}, {"s": "乐陵市"}, {"s": "禹城市"}] }, { "n": "聊城", "a": [{"s": "东昌府区"}, {"s": "阳谷县"}, {"s": "莘县"}, {"s": "茌平县"}, {"s": "东阿县"}, {"s": "冠县"}, {"s": "高唐县"}, {"s": "临清市"}] }, { "n": "滨州", "a": [{"s": "滨城区"}, {"s": "惠民县"}, {"s": "阳信县"}, {"s": "无棣县"}, {"s": "沾化县"}, {"s": "博兴县"}, {"s": "邹平县"}] }, { "n": "菏泽", "a": [{"s": "牡丹区"}, {"s": "曹县"}, {"s": "单县"}, {"s": "成武县"}, {"s": "巨野县"}, {"s": "郓城县"}, {"s": "鄄城县"}, {"s": "定陶县"}, {"s": "东明县"}] },{"n":"邹城"}] }, { "p": "河南", "c": [{ "n": "郑州", "a": [{"s": "中原区"}, {"s": "二七区"}, {"s": "管城回族区"}, {"s": "金水区"}, {"s": "上街区"}, {"s": "惠济区"}, {"s": "中牟县"}, {"s": "巩义市"}, {"s": "荥阳市"}, {"s": "新密市"}, {"s": "新郑市"}, {"s": "登封市"}] }, { "n": "开封", "a": [{"s": "龙亭区"}, {"s": "顺河回族区"}, {"s": "鼓楼区"}, {"s": "禹王台区"}, {"s": "金明区"}, {"s": "杞县"}, {"s": "通许县"}, {"s": "尉氏县"}, {"s": "开封县"}, {"s": "兰考县"}] }, { "n": "洛阳", "a": [{"s": "老城区"}, {"s": "西工区"}, {"s": "廛河回族区"}, {"s": "涧西区"}, {"s": "吉利区"}, {"s": "洛龙区"}, {"s": "孟津县"}, {"s": "新安县"}, {"s": "栾川县"}, {"s": "嵩县"}, {"s": "汝阳县"}, {"s": "宜阳县"}, {"s": "洛宁县"}, {"s": "伊川县"}, {"s": "偃师市"}] }, { "n": "平顶山", "a": [{"s": "新华区"}, {"s": "卫东区"}, {"s": "石龙区"}, {"s": "湛河区"}, {"s": "宝丰县"}, {"s": "叶县"}, {"s": "鲁山县"}, {"s": "郏县"}, {"s": "舞钢市"}, {"s": "汝州市"}] }, { "n": "安阳", "a": [{"s": "文峰区"}, {"s": "北关区"}, {"s": "殷都区"}, {"s": "龙安区"}, {"s": "安阳县"}, {"s": "汤阴县"}, {"s": "滑县"}, {"s": "内黄县"}, {"s": "林州市"}] }, {"n": "鹤壁", "a": [{"s": "鹤山区"}, {"s": "山城区"}, {"s": "淇滨区"}, {"s": "浚县"}, {"s": "淇县"}]}, { "n": "新乡", "a": [{"s": "红旗区"}, {"s": "卫滨区"}, {"s": "凤泉区"}, {"s": "牧野区"}, {"s": "新乡县"}, {"s": "获嘉县"}, {"s": "原阳县"}, {"s": "延津县"}, {"s": "封丘县"}, {"s": "长垣县"}, {"s": "卫辉市"}, {"s": "辉县市"}] }, { "n": "焦作", "a": [{"s": "解放区"}, {"s": "中站区"}, {"s": "马村区"}, {"s": "山阳区"}, {"s": "修武县"}, {"s": "博爱县"}, {"s": "武陟县"}, {"s": "温县"}, {"s": "沁阳市"}, {"s": "孟州市"}] }, { "n": "濮阳", "a": [{"s": "华龙区"}, {"s": "清丰县"}, {"s": "南乐县"}, {"s": "范县"}, {"s": "台前县"}, {"s": "濮阳县"}] }, { "n": "许昌", "a": [{"s": "魏都区"}, {"s": "许昌县"}, {"s": "鄢陵县"}, {"s": "襄城县"}, {"s": "禹州市"}, {"s": "长葛市"}] }, {"n": "漯河", "a": [{"s": "源汇区"}, {"s": "郾城区"}, {"s": "召陵区"}, {"s": "舞阳县"}, {"s": "临颍县"}]}, { "n": "三门峡", "a": [{"s": "湖滨区"}, {"s": "渑池县"}, {"s": "陕县"}, {"s": "卢氏县"}, {"s": "义马市"}, {"s": "灵宝市"}] }, { "n": "南阳", "a": [{"s": "宛城区"}, {"s": "卧龙区"}, {"s": "南召县"}, {"s": "方城县"}, {"s": "西峡县"}, {"s": "镇平县"}, {"s": "内乡县"}, {"s": "淅川县"}, {"s": "社旗县"}, {"s": "唐河县"}, {"s": "新野县"}, {"s": "桐柏县"}, {"s": "邓州市"}] }, { "n": "商丘", "a": [{"s": "梁园区"}, {"s": "睢阳区"}, {"s": "民权县"}, {"s": "睢县"}, {"s": "宁陵县"}, {"s": "柘城县"}, {"s": "虞城县"}, {"s": "夏邑县"}, {"s": "永城市"}] }, { "n": "信阳", "a": [{"s": "浉河区"}, {"s": "平桥区"}, {"s": "罗山县"}, {"s": "光山县"}, {"s": "新县"}, {"s": "商城县"}, {"s": "固始县"}, {"s": "潢川县"}, {"s": "淮滨县"}, {"s": "息县"}] }, { "n": "周口", "a": [{"s": "川汇区"}, {"s": "扶沟县"}, {"s": "西华县"}, {"s": "商水县"}, {"s": "沈丘县"}, {"s": "郸城县"}, {"s": "淮阳县"}, {"s": "太康县"}, {"s": "鹿邑县"}, {"s": "项城市"}] }, { "n": "驻马店", "a": [{"s": "驿城区"}, {"s": "西平县"}, {"s": "上蔡县"}, {"s": "平舆县"}, {"s": "正阳县"}, {"s": "确山县"}, {"s": "泌阳县"}, {"s": "汝南县"}, {"s": "遂平县"}, {"s": "新蔡县"}] }, {"n": "济源", "a": [{"s": "济源"}]}] }, { "p": "湖北", "c": [{ "n": "武汉", "a": [{"s": "江岸区"}, {"s": "江汉区"}, {"s": "硚口区"}, {"s": "汉阳区"}, {"s": "武昌区"}, {"s": "青山区"}, {"s": "洪山区"}, {"s": "东西湖区"}, {"s": "汉南区"}, {"s": "蔡甸区"}, {"s": "江夏区"}, {"s": "黄陂区"}, {"s": "新洲区"}] }, { "n": "黄石", "a": [{"s": "黄石港区"}, {"s": "西塞山区"}, {"s": "下陆区"}, {"s": "铁山区"}, {"s": "阳新县"}, {"s": "大冶市"}] }, { "n": "十堰", "a": [{"s": "茅箭区"}, {"s": "张湾区"}, {"s": "郧县"}, {"s": "郧西县"}, {"s": "竹山县"}, {"s": "竹溪县"}, {"s": "房县"}, {"s": "丹江口市"}] }, { "n": "宜昌", "a": [{"s": "西陵区"}, {"s": "伍家岗区"}, {"s": "点军区"}, {"s": "猇亭区"}, {"s": "夷陵区"}, {"s": "远安县"}, {"s": "兴山县"}, {"s": "秭归县"}, {"s": "长阳土家族自治县"}, {"s": "五峰土家族自治县"}, {"s": "宜都市"}, {"s": "当阳市"}, {"s": "枝江市"}] }, { "n": "襄樊", "a": [{"s": "襄城区"}, {"s": "樊城区"}, {"s": "襄阳区"}, {"s": "南漳县"}, {"s": "谷城县"}, {"s": "保康县"}, {"s": "老河口市"}, {"s": "枣阳市"}, {"s": "宜城市"}] }, {"n": "鄂州", "a": [{"s": "梁子湖区"}, {"s": "华容区"}, {"s": "鄂城区"}]}, { "n": "荆门", "a": [{"s": "东宝区"}, {"s": "掇刀区"}, {"s": "京山县"}, {"s": "沙洋县"}, {"s": "钟祥市"}] }, { "n": "孝感", "a": [{"s": "孝南区"}, {"s": "孝昌县"}, {"s": "大悟县"}, {"s": "云梦县"}, {"s": "应城市"}, {"s": "安陆市"}, {"s": "汉川市"}] }, { "n": "荆州", "a": [{"s": "沙市区"}, {"s": "荆州区"}, {"s": "公安县"}, {"s": "监利县"}, {"s": "江陵县"}, {"s": "石首市"}, {"s": "洪湖市"}, {"s": "松滋市"}] }, { "n": "黄冈", "a": [{"s": "黄州区"}, {"s": "团风县"}, {"s": "红安县"}, {"s": "罗田县"}, {"s": "英山县"}, {"s": "浠水县"}, {"s": "蕲春县"}, {"s": "黄梅县"}, {"s": "麻城市"}, {"s": "武穴市"}] }, { "n": "咸宁", "a": [{"s": "咸安区"}, {"s": "嘉鱼县"}, {"s": "通城县"}, {"s": "崇阳县"}, {"s": "通山县"}, {"s": "赤壁市"}] }, {"n": "随州", "a": [{"s": "曾都区"}, {"s": "随县"}, {"s": "广水市"}]}, { "n": "恩施", "a": [{"s": "恩施市"}, {"s": "利川市"}, {"s": "建始县"}, {"s": "巴东县"}, {"s": "宣恩县"}, {"s": "咸丰县"}, {"s": "来凤县"}, {"s": "鹤峰县"}] }, {"n": "仙桃", "a": [{"s": "仙桃"}]}, {"n": "潜江", "a": [{"s": "潜江"}]}, { "n": "天门", "a": [{"s": "天门"}] }, {"n": "神农架", "a": [{"s": "神农架"}]}] }, { "p": "湖南", "c": [{ "n": "长沙", "a": [{"s": "芙蓉区"}, {"s": "天心区"}, {"s": "岳麓区"}, {"s": "开福区"}, {"s": "雨花区"}, {"s": "长沙县"}, {"s": "望城县"}, {"s": "宁乡县"}, {"s": "浏阳市"}] }, { "n": "株洲", "a": [{"s": "荷塘区"}, {"s": "芦淞区"}, {"s": "石峰区"}, {"s": "天元区"}, {"s": "株洲县"}, {"s": "攸县"}, {"s": "茶陵县"}, {"s": "炎陵县"}, {"s": "醴陵市"}] }, {"n": "湘潭", "a": [{"s": "雨湖区"}, {"s": "岳塘区"}, {"s": "湘潭县"}, {"s": "湘乡市"}, {"s": "韶山市"}]}, { "n": "衡阳", "a": [{"s": "珠晖区"}, {"s": "雁峰区"}, {"s": "石鼓区"}, {"s": "蒸湘区"}, {"s": "南岳区"}, {"s": "衡阳县"}, {"s": "衡南县"}, {"s": "衡山县"}, {"s": "衡东县"}, {"s": "祁东县"}, {"s": "耒阳市"}, {"s": "常宁市"}] }, { "n": "邵阳", "a": [{"s": "双清区"}, {"s": "大祥区"}, {"s": "北塔区"}, {"s": "邵东县"}, {"s": "新邵县"}, {"s": "邵阳县"}, {"s": "隆回县"}, {"s": "洞口县"}, {"s": "绥宁县"}, {"s": "新宁县"}, {"s": "城步苗族自治县"}, {"s": "武冈市"}] }, { "n": "岳阳", "a": [{"s": "岳阳楼区"}, {"s": "云溪区"}, {"s": "君山区"}, {"s": "岳阳县"}, {"s": "华容县"}, {"s": "湘阴县"}, {"s": "平江县"}, {"s": "汨罗市"}, {"s": "临湘市"}] }, { "n": "常德", "a": [{"s": "武陵区"}, {"s": "鼎城区"}, {"s": "安乡县"}, {"s": "汉寿县"}, {"s": "澧县"}, {"s": "临澧县"}, {"s": "桃源县"}, {"s": "石门县"}, {"s": "津市市"}] }, {"n": "张家界", "a": [{"s": "永定区"}, {"s": "武陵源区"}, {"s": "慈利县"}, {"s": "桑植县"}]}, { "n": "益阳", "a": [{"s": "资阳区"}, {"s": "赫山区"}, {"s": "南县"}, {"s": "桃江县"}, {"s": "安化县"}, {"s": "沅江市"}] }, { "n": "郴州", "a": [{"s": "北湖区"}, {"s": "苏仙区"}, {"s": "桂阳县"}, {"s": "宜章县"}, {"s": "永兴县"}, {"s": "嘉禾县"}, {"s": "临武县"}, {"s": "汝城县"}, {"s": "桂东县"}, {"s": "安仁县"}, {"s": "资兴市"}] }, { "n": "永州", "a": [{"s": "零陵区"}, {"s": "冷水滩区"}, {"s": "祁阳县"}, {"s": "东安县"}, {"s": "双牌县"}, {"s": "道县"}, {"s": "江永县"}, {"s": "宁远县"}, {"s": "蓝山县"}, {"s": "新田县"}, {"s": "江华瑶族自治县"}] }, { "n": "怀化", "a": [{"s": "鹤城区"}, {"s": "中方县"}, {"s": "沅陵县"}, {"s": "辰溪县"}, {"s": "溆浦县"}, {"s": "会同县"}, {"s": "麻阳苗族自治县"}, {"s": "新晃侗族自治县"}, {"s": "芷江侗族自治县"}, {"s": "靖州苗族侗族自治县"}, {"s": "通道侗族自治县"}, {"s": "洪江市"}] }, {"n": "娄底", "a": [{"s": "娄星区"}, {"s": "双峰县"}, {"s": "新化县"}, {"s": "冷水江市"}, {"s": "涟源市"}]}, { "n": "湘西", "a": [{"s": "吉首市"}, {"s": "泸溪县"}, {"s": "凤凰县"}, {"s": "花垣县"}, {"s": "保靖县"}, {"s": "古丈县"}, {"s": "永顺县"}, {"s": "龙山县"}] }] }, { "p": "广东", "c": [{ "n": "广州", "a": [{"s": "荔湾区"}, {"s": "越秀区"}, {"s": "海珠区"}, {"s": "天河区"}, {"s": "白云区"}, {"s": "黄埔区"}, {"s": "番禺区"}, {"s": "花都区"}, {"s": "南沙区"}, {"s": "萝岗区"}, {"s": "增城市"}, {"s": "从化市"}] }, { "n": "韶关", "a": [{"s": "武江区"}, {"s": "浈江区"}, {"s": "曲江区"}, {"s": "始兴县"}, {"s": "仁化县"}, {"s": "翁源县"}, {"s": "乳源瑶族自治县"}, {"s": "新丰县"}, {"s": "乐昌市"}, {"s": "南雄市"}] }, { "n": "深圳", "a": [{"s": "罗湖区"}, {"s": "福田区"}, {"s": "南山区"}, {"s": "宝安区"}, {"s": "龙岗区"}, {"s": "盐田区"}] }, {"n": "珠海", "a": [{"s": "香洲区"}, {"s": "斗门区"}, {"s": "金湾区"}]}, { "n": "汕头", "a": [{"s": "龙湖区"}, {"s": "金平区"}, {"s": "濠江区"}, {"s": "潮阳区"}, {"s": "潮南区"}, {"s": "澄海区"}, {"s": "南澳县"}] }, {"n": "佛山", "a": [{"s": "禅城区"}, {"s": "南海区"}, {"s": "顺德区"}, {"s": "三水区"}, {"s": "高明区"}]}, { "n": "江门", "a": [{"s": "蓬江区"}, {"s": "江海区"}, {"s": "新会区"}, {"s": "台山市"}, {"s": "开平市"}, {"s": "鹤山市"}, {"s": "恩平市"}] }, { "n": "湛江", "a": [{"s": "赤坎区"}, {"s": "霞山区"}, {"s": "坡头区"}, {"s": "麻章区"}, {"s": "遂溪县"}, {"s": "徐闻县"}, {"s": "廉江市"}, {"s": "雷州市"}, {"s": "吴川市"}] }, { "n": "茂名", "a": [{"s": "茂南区"}, {"s": "茂港区"}, {"s": "电白县"}, {"s": "高州市"}, {"s": "化州市"}, {"s": "信宜市"}] }, { "n": "肇庆", "a": [{"s": "端州区"}, {"s": "鼎湖区"}, {"s": "广宁县"}, {"s": "怀集县"}, {"s": "封开县"}, {"s": "德庆县"}, {"s": "高要市"}, {"s": "四会市"}] }, {"n": "惠州", "a": [{"s": "惠城区"}, {"s": "惠阳区"}, {"s": "博罗县"}, {"s": "惠东县"}, {"s": "龙门县"}]}, { "n": "梅州", "a": [{"s": "梅江区"}, {"s": "梅县"}, {"s": "大埔县"}, {"s": "丰顺县"}, {"s": "五华县"}, {"s": "平远县"}, {"s": "蕉岭县"}, {"s": "兴宁市"}] }, {"n": "汕尾", "a": [{"s": "城区"}, {"s": "海丰县"}, {"s": "陆河县"}, {"s": "陆丰市"}]}, { "n": "河源", "a": [{"s": "源城区"}, {"s": "紫金县"}, {"s": "龙川县"}, {"s": "连平县"}, {"s": "和平县"}, {"s": "东源县"}] }, {"n": "阳江", "a": [{"s": "江城区"}, {"s": "阳西县"}, {"s": "阳东县"}, {"s": "阳春市"}]}, { "n": "清远", "a": [{"s": "清城区"}, {"s": "佛冈县"}, {"s": "阳山县"}, {"s": "连山壮族瑶族自治县"}, {"s": "连南瑶族自治县"}, {"s": "清新县"}, {"s": "英德市"}, {"s": "连州市"}] }, {"n": "东莞", "a": [{"s": "东莞市"}]}, {"n": "中山", "a": [{"s": "中山市"}]}, { "n": "潮州", "a": [{"s": "湘桥区"}, {"s": "潮安县"}, {"s": "饶平县"}] }, {"n": "揭阳", "a": [{"s": "榕城区"}, {"s": "揭东县"}, {"s": "揭西县"}, {"s": "惠来县"}, {"s": "普宁市"}] }, { "n": "云浮", "a": [{"s": "云城区"}, {"s": "新兴县"}, {"s": "郁南县"}, {"s": "云安县"}, {"s": "罗定市"}] },{"n":"普宁"}] }, { "p": "广西", "c": [{ "n": "南宁", "a": [{"s": "兴宁区"}, {"s": "青秀区"}, {"s": "江南区"}, {"s": "西乡塘区"}, {"s": "良庆区"}, {"s": "邕宁区"}, {"s": "武鸣县"}, {"s": "隆安县"}, {"s": "马山县"}, {"s": "上林县"}, {"s": "宾阳县"}, {"s": "横县"}] }, { "n": "柳州", "a": [{"s": "城中区"}, {"s": "鱼峰区"}, {"s": "柳南区"}, {"s": "柳北区"}, {"s": "柳江县"}, {"s": "柳城县"}, {"s": "鹿寨县"}, {"s": "融安县"}, {"s": "融水苗族自治县"}, {"s": "三江侗族自治县"}] }, { "n": "桂林", "a": [{"s": "秀峰区"}, {"s": "叠彩区"}, {"s": "象山区"}, {"s": "七星区"}, {"s": "雁山区"}, {"s": "阳朔县"}, {"s": "临桂县"}, {"s": "灵川县"}, {"s": "全州县"}, {"s": "兴安县"}, {"s": "永福县"}, {"s": "灌阳县"}, {"s": "龙胜各族自治县"}, {"s": "资源县"}, {"s": "平乐县"}, {"s": "荔蒲县"}, {"s": "恭城瑶族自治县"}] }, { "n": "梧州", "a": [{"s": "万秀区"}, {"s": "蝶山区"}, {"s": "长洲区"}, {"s": "苍梧县"}, {"s": "藤县"}, {"s": "蒙山县"}, {"s": "岑溪市"}] }, {"n": "北海", "a": [{"s": "海城区"}, {"s": "银海区"}, {"s": "铁山港区"}, {"s": "合浦县"}]}, { "n": "防城港", "a": [{"s": "港口区"}, {"s": "防城区"}, {"s": "上思县"}, {"s": "东兴市"}] }, {"n": "钦州", "a": [{"s": "钦南区"}, {"s": "钦北区"}, {"s": "灵山县"}, {"s": "浦北县"}]}, { "n": "贵港", "a": [{"s": "港北区"}, {"s": "港南区"}, {"s": "覃塘区"}, {"s": "平南县"}, {"s": "桂平市"}] }, { "n": "玉林", "a": [{"s": "玉州区"}, {"s": "容县"}, {"s": "陆川县"}, {"s": "博白县"}, {"s": "兴业县"}, {"s": "北流市"}] }, { "n": "百色", "a": [{"s": "右江区"}, {"s": "田阳县"}, {"s": "田东县"}, {"s": "平果县"}, {"s": "德保县"}, {"s": "靖西县"}, {"s": "那坡县"}, {"s": "凌云县"}, {"s": "乐业县"}, {"s": "田林县"}, {"s": "西林县"}, {"s": "隆林各族自治县"}] }, {"n": "贺州", "a": [{"s": "八步区"}, {"s": "昭平县"}, {"s": "钟山县"}, {"s": "富川瑶族自治县"}]}, { "n": "河池", "a": [{"s": "金城江区"}, {"s": "南丹县"}, {"s": "天峨县"}, {"s": "凤山县"}, {"s": "东兰县"}, {"s": "罗城仫佬族自治县"}, {"s": "环江毛南族自治县"}, {"s": "巴马瑶族自治县"}, {"s": "都安瑶族自治县"}, {"s": "大化瑶族自治县"}, {"s": "宜州市"}] }, { "n": "来宾", "a": [{"s": "兴宾区"}, {"s": "忻城县"}, {"s": "象州县"}, {"s": "武宣县"}, {"s": "金秀瑶族自治县"}, {"s": "合山市"}] }, { "n": "崇左", "a": [{"s": "江洲区"}, {"s": "扶绥县"}, {"s": "宁明县"}, {"s": "龙州县"}, {"s": "大新县"}, {"s": "天等县"}, {"s": "凭祥市"}] }] }, { "p": "海南", "c": [{"n": "海口", "a": [{"s": "秀英区"}, {"s": "龙华区"}, {"s": "琼山区"}, {"s": "美兰区"}]}, { "n": "三亚", "a": [{"s": "三亚市"}] }, {"n": "五指山", "a": [{"s": "五指山"}]}, {"n": "琼海", "a": [{"s": "琼海"}]}, { "n": "儋州", "a": [{"s": "儋州"}] }, {"n": "文昌", "a": [{"s": "文昌"}]}, {"n": "万宁", "a": [{"s": "万宁"}]}, {"n": "东方", "a": [{"s": "东方"}]}] }, { "p": "重庆", "c": [{"n": "万州区"}, {"n": "涪陵区"}, {"n": "渝中区"}, {"n": "大渡口区"}, {"n": "江北区"}, {"n": "沙坪坝区"}, {"n": "九龙坡区"}, {"n": "南岸区"}, {"n": "北碚区"}, {"n": "万盛区"}, {"n": "双挢区"}, {"n": "渝北区"}, {"n": "巴南区"}, {"n": "长寿区"}, {"n": "綦江县"}, {"n": "潼南县"}, {"n": "铜梁县"}, {"n": "大足县"}, {"n": "荣昌县"}, {"n": "壁山县"}, {"n": "梁平县"}, {"n": "城口县"}, {"n": "丰都县"}, {"n": "垫江县"}, {"n": "武隆县"}, {"n": "忠县"}, {"n": "开县"}, {"n": "云阳县"}, {"n": "奉节县"}, {"n": "巫山县"}, {"n": "巫溪县"}, {"n": "黔江区"}, {"n": "石柱土家族自治县"}, {"n": "秀山土家族苗族自治县"}, {"n": "酉阳土家族苗族自治县"}, {"n": "彭水苗族土家族自治县"}, {"n": "江津区"}, {"n": "合川区"}, {"n": "永川区"}, {"n": "南川区"}] }, { "p": "四川", "c": [{ "n": "成都", "a": [{"s": "锦江区"}, {"s": "青羊区"}, {"s": "金牛区"}, {"s": "武侯区"}, {"s": "成华区"}, {"s": "龙泉驿区"}, {"s": "青白江区"}, {"s": "新都区"}, {"s": "温江区"}, {"s": "金堂县"}, {"s": "双流县"}, {"s": "郫县"}, {"s": "大邑县"}, {"s": "蒲江县"}, {"s": "新津县"}, {"s": "都江堰市"}, {"s": "彭州市"}, {"s": "邛崃市"}, {"s": "崇州市"}] }, { "n": "自贡", "a": [{"s": "自流井区"}, {"s": "贡井区"}, {"s": "大安区"}, {"s": "沿滩区"}, {"s": "荣县"}, {"s": "富顺县"}] }, {"n": "攀枝花", "a": [{"s": "东区"}, {"s": "西区"}, {"s": "仁和区"}, {"s": "米易县"}, {"s": "盐边县"}]}, { "n": "泸州", "a": [{"s": "江阳区"}, {"s": "纳溪区"}, {"s": "龙马潭区"}, {"s": "泸县"}, {"s": "合江县"}, {"s": "叙永县"}, {"s": "古蔺县"}] }, { "n": "德阳", "a": [{"s": "旌阳区"}, {"s": "中江县"}, {"s": "罗江县"}, {"s": "广汉市"}, {"s": "什邡市"}, {"s": "绵竹市"}] }, { "n": "绵阳", "a": [{"s": "涪城区"}, {"s": "游仙区"}, {"s": "三台县"}, {"s": "盐亭县"}, {"s": "安县"}, {"s": "梓潼县"}, {"s": "北川羌族自治县"}, {"s": "平武县"}, {"s": "江油市"}] }, { "n": "广元", "a": [{"s": "利州区"}, {"s": "元坝区"}, {"s": "朝天区"}, {"s": "旺苍县"}, {"s": "青川县"}, {"s": "剑阁县"}, {"s": "苍溪县"}] }, {"n": "遂宁", "a": [{"s": "船山区"}, {"s": ">安居区"}, {"s": ">蓬溪县"}, {"s": ">射洪县"}, {"s": ">大英县"}]}, { "n": "内江", "a": [{"s": "市中区"}, {"s": "东兴区"}, {"s": "威远县"}, {"s": "资中县"}, {"s": "隆昌县"}] }, { "n": "乐山", "a": [{"s": "市中区"}, {"s": "沙湾区"}, {"s": "五通桥区"}, {"s": "金口河区"}, {"s": "犍为县"}, {"s": "井研县"}, {"s": "夹江县"}, {"s": "沐川县"}, {"s": "峨边彝族自治县"}, {"s": "马边彝族自治县"}, {"s": "峨眉山市"}] }, { "n": "南充", "a": [{"s": "顺庆区"}, {"s": "高坪区"}, {"s": "嘉陵区"}, {"s": "南部县"}, {"s": "营山县"}, {"s": "蓬安县"}, {"s": "仪陇县"}, {"s": "西充县"}, {"s": "阆中市"}] }, { "n": "眉山", "a": [{"s": "东坡区"}, {"s": "仁寿县"}, {"s": "彭山县"}, {"s": "洪雅县"}, {"s": "丹棱县"}, {"s": "青神县"}] }, { "n": "宜宾", "a": [{"s": "翠屏区"}, {"s": "宜宾县"}, {"s": "南溪县"}, {"s": "江安县"}, {"s": "长宁县"}, {"s": "高县"}, {"s": "珙县"}, {"s": "筠连县"}, {"s": "兴文县"}, {"s": "屏山县"}] }, {"n": "广安", "a": [{"s": "广安区"}, {"s": "岳池县"}, {"s": "武胜县"}, {"s": "邻水县"}, {"s": "华蓥市"}]}, { "n": "达川", "a": [{"s": "通川区"}, {"s": "达县"}, {"s": "宣汉县"}, {"s": "开江县"}, {"s": "大竹县"}, {"s": "渠县"}, {"s": "万源市"}] }, { "n": "雅安", "a": [{"s": "雨城区"}, {"s": "名山县"}, {"s": "荥经县"}, {"s": "汉源县"}, {"s": "石棉县"}, {"s": "天全县"}, {"s": "芦山县"}, {"s": "宝兴县"}] }, {"n": "巴中", "a": [{"s": "巴州区"}, {"s": "通江县"}, {"s": "南江县"}, {"s": "平昌县"}]}, { "n": "资阳", "a": [{"s": "雁江区"}, {"s": "安岳县"}, {"s": "乐至县"}, {"s": "简阳市"}] }, { "n": "阿坝", "a": [{"s": "汶川县"}, {"s": "理县"}, {"s": "茂县"}, {"s": "松潘县"}, {"s": "九寨沟县"}, {"s": "金川县"}, {"s": "小金县"}, {"s": "黑水县"}, {"s": "马尔康县"}, {"s": "壤塘县"}, {"s": "阿坝县"}, {"s": "若尔盖县"}, {"s": "红原县"}] }, { "n": "甘孜", "a": [{"s": "康定县"}, {"s": "泸定县"}, {"s": "丹巴县"}, {"s": "九龙县"}, {"s": "雅江县"}, {"s": "道孚县"}, {"s": "炉霍县"}, {"s": "甘孜县"}, {"s": "新龙县"}, {"s": "德格县"}, {"s": "白玉县"}, {"s": "石渠县"}, {"s": "色达县"}, {"s": "理塘县"}, {"s": "巴塘县"}, {"s": "乡城县"}, {"s": "稻城县"}, {"s": "得荣县"}] }, { "n": "凉山", "a": [{"s": "西昌市"}, {"s": "木里藏族自治县"}, {"s": "盐源县"}, {"s": "德昌县"}, {"s": "会理县"}, {"s": "会东县"}, {"s": "宁南县"}, {"s": "普格县"}, {"s": "布拖县"}, {"s": "金阳县"}, {"s": "昭觉县"}, {"s": "喜德县"}, {"s": "冕宁县"}, {"s": "越西县"}, {"s": "甘洛县"}, {"s": "美姑县"}, {"s": "雷波县"}] }] }, { "p": "贵州", "c": [{ "n": "贵阳", "a": [{"s": "南明区"}, {"s": "云岩区"}, {"s": "花溪区"}, {"s": "乌当区"}, {"s": "白云区"}, {"s": "小河区"}, {"s": "开阳县"}, {"s": "息烽县"}, {"s": "修文县"}, {"s": "清镇市"}] }, {"n": "六盘水", "a": [{"s": "钟山区"}, {"s": "六枝特区"}, {"s": "水城县"}, {"s": "盘县"}]}, { "n": "遵义", "a": [{"s": "红花岗区"}, {"s": "汇川区"}, {"s": "遵义县"}, {"s": "桐梓县"}, {"s": "绥阳县"}, {"s": "正安县"}, {"s": "道真仡佬族苗族自治县"}, {"s": "务川仡佬族苗族自治县"}, {"s": "凤冈县"}, {"s": "湄潭县"}, {"s": "余庆县"}, {"s": "习水县"}, {"s": "赤水市"}, {"s": "仁怀市"}] }, { "n": "安顺", "a": [{"s": "西秀区"}, {"s": "平坝县"}, {"s": "普定县"}, {"s": "镇宁布依族苗族自治县"}, {"s": "关岭布依族苗族自治县"}, {"s": "紫云苗族布依族自治县"}] }, { "n": "铜仁", "a": [{"s": "铜仁市"}, {"s": "江口县"}, {"s": "玉屏侗族自治县"}, {"s": "石阡县"}, {"s": "思南县"}, {"s": "印江土家族苗族自治县"}, {"s": "德江县"}, {"s": "沿河土家族自治县"}, {"s": "松桃苗族自治县"}, {"s": "万山特区"}] }, { "n": "黔西南", "a": [{"s": "兴义市"}, {"s": "兴仁县"}, {"s": "普安县"}, {"s": "晴隆县"}, {"s": "贞丰县"}, {"s": "望谟县"}, {"s": "册亨县"}, {"s": "安龙县"}] }, { "n": "毕节", "a": [{"s": "毕节市"}, {"s": "大方县"}, {"s": "黔西县"}, {"s": "金沙县"}, {"s": "织金县"}, {"s": "纳雍县"}, {"s": "威宁彝族回族苗族自治县"}, {"s": "赫章县"}] }, { "n": "黔东南", "a": [{"s": "凯里市"}, {"s": "黄平县"}, {"s": "施秉县"}, {"s": "三穗县"}, {"s": "镇远县"}, {"s": "岑巩县"}, {"s": "天柱县"}, {"s": "锦屏县"}, {"s": "剑河县"}, {"s": "台江县"}, {"s": "黎平县"}, {"s": "榕江县"}, {"s": "从江县"}, {"s": "雷山县"}, {"s": "麻江县"}, {"s": "丹寨县"}] }, { "n": "黔南", "a": [{"s": "都匀市"}, {"s": "福泉市"}, {"s": "荔波县"}, {"s": "贵定县"}, {"s": "瓮安县"}, {"s": "独山县"}, {"s": "平塘县"}, {"s": "罗甸县"}, {"s": "长顺县"}, {"s": "龙里县"}, {"s": "惠水县"}, {"s": "三都水族自治县"}] }] }, { "p": "云南", "c": [{ "n": "昆明", "a": [{"s": "五华区"}, {"s": "盘龙区"}, {"s": "官渡区"}, {"s": "西山区"}, {"s": "东川区"}, {"s": "呈贡县"}, {"s": "晋宁县"}, {"s": "富民县"}, {"s": "宜良县"}, {"s": "石林彝族自治县"}, {"s": "嵩明县"}, {"s": "禄劝彝族苗族自治县"}, {"s": "寻甸回族彝族自治县"}, {"s": "安宁市"}] }, { "n": "曲靖", "a": [{"s": "麒麟区"}, {"s": "马龙县"}, {"s": "陆良县"}, {"s": "师宗县"}, {"s": "罗平县"}, {"s": "富源县"}, {"s": "会泽县"}, {"s": "沾益县"}, {"s": "宣威市"}] }, { "n": "玉溪", "a": [{"s": "红塔区"}, {"s": "江川县"}, {"s": "澄江县"}, {"s": "通海县"}, {"s": "华宁县"}, {"s": "易门县"}, {"s": "峨山彝族自治县"}, {"s": "新平彝族傣族自治县"}, {"s": "元江哈尼族彝族傣族自治县"}] }, {"n": "保山", "a": [{"s": "隆阳区"}, {"s": "施甸县"}, {"s": "腾冲县"}, {"s": "龙陵县"}, {"s": "昌宁县"}]}, { "n": "昭通", "a": [{"s": "昭阳区"}, {"s": "鲁甸县"}, {"s": "巧家县"}, {"s": "盐津县"}, {"s": "大关县"}, {"s": "永善县"}, {"s": "绥江县"}, {"s": "镇雄县"}, {"s": "彝良县"}, {"s": "威信县"}, {"s": "水富县"}] }, { "n": "丽江", "a": [{"s": "古城区"}, {"s": "玉龙纳西族自治县"}, {"s": "永胜县"}, {"s": "华坪县"}, {"s": "宁蒗彝族自治县"}] }, { "n": "普洱", "a": [{"s": "思茅区"}, {"s": "宁洱镇"}, {"s": "墨江哈尼族自治县"}, {"s": "景东彝族自治县"}, {"s": "景谷傣族彝族自治县"}, {"s": "镇沅彝族哈尼族拉祜族自治县"}, {"s": "江城哈尼族彝族自治县"}, {"s": "孟连傣族拉祜族佤族自治县"}, {"s": "澜沧拉祜族自治县"}, {"s": "西盟佤族自治县"}] }, { "n": "临沧", "a": [{"s": "临翔区"}, {"s": "凤庆县"}, {"s": "云县"}, {"s": "永德县"}, {"s": "镇康县"}, {"s": "双江拉祜族佤族布朗族傣族自治县"}, {"s": "耿马傣族佤族自治县"}, {"s": "沧源佤族自治县"}] }, { "n": "楚雄", "a": [{"s": "楚雄市"}, {"s": "双柏县"}, {"s": "牟定县"}, {"s": "南华县"}, {"s": "姚安县"}, {"s": "大姚县"}, {"s": "永仁县"}, {"s": "元谋县"}, {"s": "武定县"}, {"s": "禄丰县"}] }, { "n": "红河", "a": [{"s": "个旧市"}, {"s": "开远市"}, {"s": "蒙自县"}, {"s": "屏边苗族自治县"}, {"s": "建水县"}, {"s": "石屏县"}, {"s": "弥勒县"}, {"s": "泸西县"}, {"s": "元阳县"}, {"s": "红河县"}, {"s": "金平苗族瑶族傣族自治县"}, {"s": "绿春县"}, {"s": "河口瑶族自治县"}] }, { "n": "文山", "a": [{"s": "文山县"}, {"s": "砚山县"}, {"s": "西畴县"}, {"s": "麻栗坡县"}, {"s": "马关县"}, {"s": "丘北县"}, {"s": "广南县"}, {"s": "富宁县"}] }, {"n": "西双版纳", "a": [{"s": "景洪市"}, {"s": "勐海县"}, {"s": "勐腊县"}]}, { "n": "大理", "a": [{"s": "大理市"}, {"s": "漾濞彝族自治县"}, {"s": "祥云县"}, {"s": "宾川县"}, {"s": "弥渡县"}, {"s": "南涧彝族自治县"}, {"s": "巍山彝族回族自治县"}, {"s": "永平县"}, {"s": "云龙县"}, {"s": "洱源县"}, {"s": "剑川县"}, {"s": "鹤庆县"}] }, {"n": "德宏", "a": [{"s": "瑞丽市"}, {"s": "潞西市"}, {"s": "梁河县"}, {"s": "盈江县"}, {"s": "陇川县"}]}, { "n": "怒江傈", "a": [{"s": "泸水县"}, {"s": "福贡县"}, {"s": "贡山独龙族怒族自治县"}, {"s": "兰坪白族普米族自治县"}] }, {"n": "迪庆", "a": [{"s": "香格里拉县"}, {"s": "德钦县"}, {"s": "维西傈僳族自治县"}] },{"n": "安宁"} ] }, { "p": "西藏", "c": [{ "n": "拉萨", "a": [{"s": "城关区"}, {"s": "林周县"}, {"s": "当雄县"}, {"s": "尼木县"}, {"s": "曲水县"}, {"s": "堆龙德庆县"}, {"s": "达孜县"}, {"s": "墨竹工卡县"}] }, { "n": "昌都", "a": [{"s": "昌都县"}, {"s": "江达县"}, {"s": "贡觉县"}, {"s": "类乌齐县"}, {"s": "丁青县"}, {"s": "察雅县"}, {"s": "八宿县"}, {"s": "左贡县"}, {"s": "芒康县"}, {"s": "洛隆县"}, {"s": "边坝县"}] }, { "n": "山南", "a": [{"s": "乃东县"}, {"s": "扎囊县"}, {"s": "贡嘎县"}, {"s": "桑日县"}, {"s": "琼结县"}, {"s": "曲松县"}, {"s": "措美县"}, {"s": "洛扎县"}, {"s": "加查县"}, {"s": "隆子县"}, {"s": "错那县"}, {"s": "浪卡子县"}] }, { "n": "日喀则", "a": [{"s": "日喀则市"}, {"s": "南木林县"}, {"s": "江孜县"}, {"s": "定日县"}, {"s": "萨迦县"}, {"s": "拉孜县"}, {"s": "昂仁县"}, {"s": "谢通门县"}, {"s": "白朗县"}, {"s": "仁布县"}, {"s": "康马县"}, {"s": "定结县"}, {"s": "仲巴县"}, {"s": "亚东县"}, {"s": "吉隆县"}, {"s": "聂拉木县"}, {"s": "萨嘎县"}, {"s": "岗巴县"}] }, { "n": "那曲", "a": [{"s": "那曲县"}, {"s": "嘉黎县"}, {"s": "比如县"}, {"s": "聂荣县"}, {"s": "安多县"}, {"s": "申扎县"}, {"s": "索县"}, {"s": "班戈县"}, {"s": "巴青县"}, {"s": "尼玛县"}] }, { "n": "阿里", "a": [{"s": "普兰县"}, {"s": "札达县"}, {"s": "噶尔县"}, {"s": "日土县"}, {"s": "革吉县"}, {"s": "改则县"}, {"s": "措勤县"}] }, { "n": "林芝", "a": [{"s": "林芝县"}, {"s": "工布江达县"}, {"s": "米林县"}, {"s": "墨脱县"}, {"s": "波密县"}, {"s": "察隅县"}, {"s": "朗县"}] }] }, { "p": "陕西", "c": [{ "n": "西安", "a": [{"s": "新城区"}, {"s": "碑林区"}, {"s": "莲湖区"}, {"s": "灞桥区"}, {"s": "未央区"}, {"s": "雁塔区"}, {"s": "阎良区"}, {"s": "临潼区"}, {"s": "长安区"}, {"s": "蓝田县"}, {"s": "周至县"}, {"s": "户县"}, {"s": "高陵县"}] }, {"n": "铜川", "a": [{"s": "王益区"}, {"s": "印台区"}, {"s": "耀州区"}, {"s": "宜君县"}]}, { "n": "宝鸡", "a": [{"s": "渭滨区"}, {"s": "金台区"}, {"s": "陈仓区"}, {"s": "凤翔县"}, {"s": "岐山县"}, {"s": "扶风县"}, {"s": "眉县"}, {"s": "陇县"}, {"s": "千阳县"}, {"s": "麟游县"}, {"s": "凤县"}, {"s": "太白县"}] }, { "n": "咸阳", "a": [{"s": "秦都区"}, {"s": "杨凌区"}, {"s": "渭城区"}, {"s": "三原县"}, {"s": "泾阳县"}, {"s": "乾县"}, {"s": "礼泉县"}, {"s": "永寿县"}, {"s": "彬县"}, {"s": "长武县"}, {"s": "旬邑县"}, {"s": "淳化县"}, {"s": "武功县"}, {"s": "兴平市"}] }, { "n": "渭南", "a": [{"s": "临渭区"}, {"s": "华县"}, {"s": "潼关县"}, {"s": "大荔县"}, {"s": "合阳县"}, {"s": "澄城县"}, {"s": "蒲城县"}, {"s": "白水县"}, {"s": "富平县"}, {"s": "韩城市"}, {"s": "华阴市"}] }, { "n": "延安", "a": [{"s": "宝塔区"}, {"s": "延长县"}, {"s": "延川县"}, {"s": "子长县"}, {"s": "安塞县"}, {"s": "志丹县"}, {"s": "吴起县"}, {"s": "甘泉县"}, {"s": "富县"}, {"s": "洛川县"}, {"s": "宜川县"}, {"s": "黄龙县"}, {"s": "黄陵县"}] }, { "n": "汉中", "a": [{"s": "汉台区"}, {"s": "南郑县"}, {"s": "城固县"}, {"s": "洋县"}, {"s": "西乡县"}, {"s": "勉县"}, {"s": "宁强县"}, {"s": "略阳县"}, {"s": "镇巴县"}, {"s": "留坝县"}, {"s": "佛坪县"}] }, { "n": "榆林", "a": [{"s": "榆阳区"}, {"s": "神木县"}, {"s": "府谷县"}, {"s": "横山县"}, {"s": "靖边县"}, {"s": "定边县"}, {"s": "绥德县"}, {"s": "米脂县"}, {"s": "佳县"}, {"s": "吴堡县"}, {"s": "清涧县"}, {"s": "子洲县"}] }, { "n": "安康", "a": [{"s": "汉滨区"}, {"s": "汉阴县"}, {"s": "石泉县"}, {"s": "宁陕县"}, {"s": "紫阳县"}, {"s": "岚皋县"}, {"s": "平利县"}, {"s": "镇坪县"}, {"s": "旬阳县"}, {"s": "白河县"}] }, { "n": "商洛", "a": [{"s": "商州区"}, {"s": "洛南县"}, {"s": "丹凤县"}, {"s": "商南县"}, {"s": "山阳县"}, {"s": "镇安县"}, {"s": "柞水县"}] }] }, { "p": "甘肃", "c": [{ "n": "兰州", "a": [{"s": "区(县)"}, {"s": "城关区"}, {"s": "七里河区"}, {"s": "西固区"}, {"s": "安宁区"}, {"s": "红古区"}, {"s": "永登县"}, {"s": "皋兰县"}, {"s": "榆中县"}] }, {"n": "嘉峪关", "a": [{"s": "嘉峪关市"}]}, {"n": "金昌", "a": [{"s": "金川区"}, {"s": "永昌县"}]}, { "n": "白银", "a": [{"s": "白银区"}, {"s": "平川区"}, {"s": "靖远县"}, {"s": "会宁县"}, {"s": "景泰县"}] }, { "n": "天水", "a": [{"s": "秦城区"}, {"s": "麦积区"}, {"s": "清水县"}, {"s": "秦安县"}, {"s": "甘谷县"}, {"s": "武山县"}, {"s": "张家川回族自治县"}] }, {"n": "武威", "a": [{"s": "凉州区"}, {"s": "民勤县"}, {"s": "古浪县"}, {"s": "天祝藏族自治县"}]}, { "n": "张掖", "a": [{"s": "甘州区"}, {"s": "肃南裕固族自治县"}, {"s": "民乐县"}, {"s": "临泽县"}, {"s": "高台县"}, {"s": "山丹县"}] }, { "n": "平凉", "a": [{"s": "崆峒区"}, {"s": "泾川县"}, {"s": "灵台县"}, {"s": "崇信县"}, {"s": "华亭县"}, {"s": "庄浪县"}, {"s": "静宁县"}] }, { "n": "酒泉", "a": [{"s": "肃州区"}, {"s": "金塔县"}, {"s": "瓜州县"}, {"s": "肃北蒙古族自治县"}, {"s": "阿克塞哈萨克族自治县"}, {"s": "玉门市"}, {"s": "敦煌市"}] }, { "n": "庆阳", "a": [{"s": "西峰区"}, {"s": "庆城县"}, {"s": "环县"}, {"s": "华池县"}, {"s": "合水县"}, {"s": "正宁县"}, {"s": "宁县"}, {"s": "镇原县"}] }, { "n": "定西", "a": [{"s": "安定区"}, {"s": "通渭县"}, {"s": "陇西县"}, {"s": "渭源县"}, {"s": "临洮县"}, {"s": "漳县"}, {"s": "岷县"}] }, { "n": "陇南", "a": [{"s": "武都区"}, {"s": "成县"}, {"s": "文县"}, {"s": "宕昌县"}, {"s": "康县"}, {"s": "西和县"}, {"s": "礼县"}, {"s": "徽县"}, {"s": "两当县"}] }, { "n": "临夏", "a": [{"s": "临夏市"}, {"s": "临夏县"}, {"s": "康乐县"}, {"s": "永靖县"}, {"s": "广河县"}, {"s": "和政县"}, {"s": "东乡族自治县"}, {"s": "积石山保安族东乡族撒拉族自治县"}] }, { "n": "甘南", "a": [{"s": "合作市"}, {"s": "临潭县"}, {"s": "卓尼县"}, {"s": "舟曲县"}, {"s": "迭部县"}, {"s": "玛曲县"}, {"s": "碌曲县"}, {"s": "夏河县"}] }] }, { "p": "青海", "c": [{ "n": "西宁", "a": [{"s": "城东区"}, {"s": "城中区"}, {"s": "城西区"}, {"s": "城北区"}, {"s": "大通回族土族自治县"}, {"s": "湟中县"}, {"s": "湟源县"}] }, { "n": "海东", "a": [{"s": "平安县"}, {"s": "民和回族土族自治县"}, {"s": "乐都县"}, {"s": "互助土族自治县"}, {"s": "化隆回族自治县"}, {"s": "循化撒拉族自治县"}] }, {"n": "海北", "a": [{"s": "门源回族自治县"}, {"s": "祁连县"}, {"s": "海晏县"}, {"s": "刚察县"}]}, { "n": "黄南", "a": [{"s": "同仁县"}, {"s": "尖扎县"}, {"s": "泽库县"}, {"s": "河南蒙古族自治县"}] }, {"n": "海南", "a": [{"s": "共和县"}, {"s": "同德县"}, {"s": "贵德县"}, {"s": "兴海县"}, {"s": "贵南县"}]}, { "n": "果洛", "a": [{"s": "玛沁县"}, {"s": "班玛县"}, {"s": "甘德县"}, {"s": "达日县"}, {"s": "久治县"}, {"s": "玛多县"}] }, { "n": "玉树", "a": [{"s": "玉树县"}, {"s": "杂多县"}, {"s": "称多县"}, {"s": "治多县"}, {"s": "囊谦县"}, {"s": "曲麻莱县"}] }, {"n": "梅西", "a": [{"s": "格尔木市"}, {"s": "德令哈市"}, {"s": "乌兰县"}, {"s": "都兰县"}, {"s": "天峻县"}]}] }, { "p": "宁夏", "c": [{ "n": "银川", "a": [{"s": "兴庆区"}, {"s": "西夏区"}, {"s": "金凤区"}, {"s": "永宁县"}, {"s": "贺兰县"}, {"s": "灵武市"}] }, {"n": "石嘴山", "a": [{"s": "大武口区"}, {"s": "惠农区"}, {"s": "平罗县"}]}, { "n": "吴忠", "a": [{"s": "利通区"}, {"s": "红寺堡区"}, {"s": "盐池县"}, {"s": "同心县"}, {"s": "青铜峡市"}] }, {"n": "固原", "a": [{"s": "原州区"}, {"s": "西吉县"}, {"s": "隆德县"}, {"s": "泾源县"}, {"s": "彭阳县"}]}, { "n": "中卫", "a": [{"s": "沙坡头区"}, {"s": "中宁县"}, {"s": "海原县"}] }] }, { "p": "新疆", "c": [{ "n": "乌鲁木齐", "a": [{"s": "天山区"}, {"s": "沙依巴克区"}, {"s": "新市区"}, {"s": "水磨沟区"}, {"s": "头屯河区"}, {"s": "达坂城区"}, {"s": "米东区"}, {"s": "乌鲁木齐县"}] }, {"n": "克拉玛依", "a": [{"s": "独山子区"}, {"s": "克拉玛依区"}, {"s": "白碱滩区"}, {"s": "乌尔禾区"}]}, { "n": "吐鲁番", "a": [{"s": "吐鲁番市"}, {"s": "鄯善县"}, {"s": "托克逊县"}] }, {"n": "哈密", "a": [{"s": "哈密市"}, {"s": "巴里坤哈萨克自治县"}, {"s": "伊吾县"}]}, { "n": "昌吉", "a": [{"s": "昌吉市"}, {"s": "阜康市"}, {"s": "呼图壁县"}, {"s": "玛纳斯县"}, {"s": "奇台县"}, {"s": "吉木萨尔县"}, {"s": "木垒哈萨克自治县"}] }, {"n": "博尔塔拉", "a": [{"s": "博乐市"}, {"s": "精河县"}, {"s": "温泉县"}]}, { "n": "巴音郭楞", "a": [{"s": "库尔勒市"}, {"s": "轮台县"}, {"s": "尉犁县"}, {"s": "若羌县"}, {"s": "且末县"}, {"s": "焉耆回族自治县"}, {"s": "和静县"}, {"s": "和硕县"}, {"s": "博湖县"}] }, { "n": "阿克苏", "a": [{"s": "阿克苏市"}, {"s": "温宿县"}, {"s": "库车县"}, {"s": "沙雅县"}, {"s": "新和县"}, {"s": "拜城县"}, {"s": "乌什县"}, {"s": "阿瓦提县"}, {"s": "柯坪县"}] }, {"n": "克孜勒苏", "a": [{"s": "阿图什市"}, {"s": "阿克陶县"}, {"s": "阿合奇县"}, {"s": "乌恰县"}]}, { "n": "喀什", "a": [{"s": "喀什市"}, {"s": "疏附县"}, {"s": "疏勒县"}, {"s": "英吉沙县"}, {"s": "泽普县"}, {"s": "莎车县"}, {"s": "叶城县"}, {"s": "麦盖提县"}, {"s": "岳普湖县"}, {"s": "伽师县"}, {"s": "巴楚县"}, {"s": "塔什库尔干县塔吉克自治"}] }, { "n": "和田", "a": [{"s": "和田市"}, {"s": "和田县"}, {"s": "墨玉县"}, {"s": "皮山县"}, {"s": "洛浦县"}, {"s": "策勒县"}, {"s": "于田县"}, {"s": "民丰县"}] }, { "n": "伊犁", "a": [{"s": "伊宁市"}, {"s": "奎屯市"}, {"s": "伊宁县"}, {"s": "察布查尔锡伯自治县"}, {"s": "霍城县"}, {"s": "巩留县"}, {"s": "新源县"}, {"s": "昭苏县"}, {"s": "特克斯县"}, {"s": "尼勒克县"}] }, { "n": "塔城", "a": [{"s": "塔城市"}, {"s": "乌苏市"}, {"s": "额敏县"}, {"s": "沙湾县"}, {"s": "托里县"}, {"s": "裕民县"}, {"s": "和布克赛尔蒙古自治县"}] }, { "n": "阿勒泰", "a": [{"s": "阿勒泰市"}, {"s": "布尔津县"}, {"s": "富蕴县"}, {"s": "福海县"}, {"s": "哈巴河县"}, {"s": "青河县"}, {"s": "吉木乃县"}] }, {"n": "石河子", "a": [{"s": "石河子"}]}, {"n": "阿拉尔", "a": [{"s": "阿拉尔"}]}, { "n": "图木舒克", "a": [{"s": "图木舒克"}] }, {"n": "五家渠", "a": [{"s": "五家渠"}] }, {"n": "轮台"}] }, { "p": "香港", "c": [{"n": "中西区"}, {"n": "东区"}, {"n": "九龙城区"}, {"n": "观塘区"}, {"n": "南区"}, {"n": "深水区"}, {"n": "湾仔区"}, {"n": "黄大仙区"}, {"n": "油尖旺区"}, {"n": "离岛区"}, {"n": "葵青区"}, {"n": "北区"}, {"n": "西贡区"}, {"n": "沙田区"}, {"n": "屯门区"}, {"n": "大埔区"}, {"n": "荃湾区"}, {"n": "元朗区"}] }, { "p": "澳门", "c": [{"n": "花地玛堂区"}, {"n": "圣安多尼堂区"}, {"n": "大堂区"}, {"n": "望德堂区"}, {"n": "风顺堂区"}, {"n": "嘉模堂区"}, {"n": "圣方济各堂区"}] }, { "p": "台湾", "c": [{"n": "台北市"}, {"n": "高雄市"}, {"n": "基隆市"}, {"n": "台中市"}, {"n": "台南市"}, {"n": "新竹市"}, {"n": "嘉义市"}, {"n": "台北县"}, {"n": "宜兰县"}, {"n": "新竹县"}, {"n": "桃园县"}, {"n": "苗栗县"}, {"n": "台中县"}, {"n": "彰化县"}, {"n": "南投县"}, {"n": "嘉义县"}, {"n": "云林县"}, {"n": "台南县"}, {"n": "高雄县"}, {"n": "屏东县"}, {"n": "台东县"}, {"n": "花莲县"}, {"n": "澎湖县"}] }, {"p": "国外","c":[{"n":"其他"}]}] }
PypiClean
/questions-three-3.14.2.0.tar.gz/questions-three-3.14.2.0/questions_three/reporters/result_compiler/result_compiler.py
from datetime import datetime from questions_three.constants import TestEvent, TestStatus from questions_three.event_broker import EventBroker, subscribe_event_handlers from twin_sister import dependency from .suite_results import SuiteResults, TestResult def current_time(): return dependency(datetime).now() class ResultCompiler: def __init__(self): self.results = SuiteResults() def _find_or_create_test(self, test_name): for candidate in self.results.tests: if candidate.name == test_name: return candidate test = TestResult(test_name=test_name) test.start_time = current_time() test.status = TestStatus.running self.results.tests.append(test) return test def activate(self): subscribe_event_handlers(self) def on_artifact_created(self, artifact, test_name=None, **kwargs): if test_name: self._find_or_create_test(test_name).artifacts.append(artifact) else: self.results.artifacts.append(artifact) def on_suite_started(self, suite_name, **kwargs): self.results.suite_start_time = current_time() self.results.suite_name = suite_name def on_suite_ended(self, **kwargs): self.results.suite_end_time = current_time() EventBroker.publish(event=TestEvent.suite_results_compiled, suite_results=self.results) def on_suite_erred(self, exception=None, **kwargs): self.results.suite_exception = exception or Exception("Suite erred. No exception was provided.") def on_test_ended(self, test_name, **kwargs): test = self._find_or_create_test(test_name) test.end_time = current_time() if TestStatus.running == test.status: test.status = TestStatus.passed def on_test_erred(self, test_name, exception=None, **kwargs): test = self._find_or_create_test(test_name) test.status = TestStatus.erred test.exception = exception def on_test_failed(self, test_name, exception=None, **kwargs): test = self._find_or_create_test(test_name) test.status = TestStatus.failed test.exception = exception def on_test_skipped(self, test_name, exception=None, **kwargs): test = self._find_or_create_test(test_name) test.status = TestStatus.skipped test.exception = exception def on_test_started(self, test_name, **kwargs): self._find_or_create_test(test_name)
PypiClean
/peacockprint-0.1.0.tar.gz/peacockprint-0.1.0/CONTRIBUTING.rst
.. highlight:: shell ============ Contributing ============ Contributions are welcome, and they are greatly appreciated! Every little bit helps, and credit will always be given. You can contribute in many ways: Types of Contributions ---------------------- Report Bugs ~~~~~~~~~~~ Report bugs at https://github.com/gvido-berzins/peacockprint/issues. If you are reporting a bug, please include: * Your operating system name and version. * Any details about your local setup that might be helpful in troubleshooting. * Detailed steps to reproduce the bug. Fix Bugs ~~~~~~~~ Look through the GitHub issues for bugs. Anything tagged with "bug" and "help wanted" is open to whoever wants to implement it. Implement Features ~~~~~~~~~~~~~~~~~~ Look through the GitHub issues for features. Anything tagged with "enhancement" and "help wanted" is open to whoever wants to implement it. Write Documentation ~~~~~~~~~~~~~~~~~~~ PeacockPrint could always use more documentation, whether as part of the official PeacockPrint docs, in docstrings, or even on the web in blog posts, articles, and such. Submit Feedback ~~~~~~~~~~~~~~~ The best way to send feedback is to file an issue at https://github.com/gvido-berzins/peacockprint/issues. If you are proposing a feature: * Explain in detail how it would work. * Keep the scope as narrow as possible, to make it easier to implement. * Remember that this is a volunteer-driven project, and that contributions are welcome :) Get Started! ------------ Ready to contribute? Here's how to set up `peacockprint` for local development. 1. Fork the `peacockprint` repo on GitHub. 2. Clone your fork locally:: $ git clone [email protected]:your_name_here/peacockprint.git 3. Install your local copy into a virtualenv. Assuming you have virtualenvwrapper installed, this is how you set up your fork for local development:: $ mkvirtualenv peacockprint $ cd peacockprint/ $ python setup.py develop 4. Create a branch for local development:: $ git checkout -b name-of-your-bugfix-or-feature Now you can make your changes locally. 5. When you're done making changes, check that your changes pass flake8 and the tests, including testing other Python versions with tox:: $ flake8 peacockprint tests $ python setup.py test or pytest $ tox To get flake8 and tox, just pip install them into your virtualenv. 6. Commit your changes and push your branch to GitHub:: $ git add . $ git commit -m "Your detailed description of your changes." $ git push origin name-of-your-bugfix-or-feature 7. Submit a pull request through the GitHub website. Pull Request Guidelines ----------------------- Before you submit a pull request, check that it meets these guidelines: 1. The pull request should include tests. 2. If the pull request adds functionality, the docs should be updated. Put your new functionality into a function with a docstring, and add the feature to the list in README.rst. 3. The pull request should work for Python 3.5, 3.6, 3.7 and 3.8, and for PyPy. Check https://travis-ci.com/gvido-berzins/peacockprint/pull_requests and make sure that the tests pass for all supported Python versions. Tips ---- To run a subset of tests:: $ python -m unittest tests.test_peacockprint Deploying --------- A reminder for the maintainers on how to deploy. Make sure all your changes are committed (including an entry in HISTORY.rst). Then run:: $ bump2version patch # possible: major / minor / patch $ git push $ git push --tags Travis will then deploy to PyPI if tests pass.
PypiClean
/keyfactor_v_1_client-1.0.3-py3-none-any.whl/keyfactor_v_1_client/api/user/user_get_user.py
from typing import Any, Dict, Optional, Union import httpx from ...client import Client from ...models.models_ssh_users_ssh_user_response import ModelsSSHUsersSshUserResponse from ...types import Response, Unset def _get_kwargs( id: int, *, client: Client, x_keyfactor_api_version: Union[Unset, str] = "1", x_keyfactor_requested_with: str = "APIClient", ) -> Dict[str, Any]: url = "{}/SSH/Users/{id}".format(client.base_url, id=id) headers: Dict[str, str] = client.get_headers() cookies: Dict[str, Any] = client.get_cookies() if not isinstance(x_keyfactor_api_version, Unset): headers["x-keyfactor-api-version"] = x_keyfactor_api_version headers["x-keyfactor-requested-with"] = x_keyfactor_requested_with return { "method": "get", "url": url, "headers": headers, "cookies": cookies, "timeout": client.get_timeout(), } def _parse_response(*, response: httpx.Response) -> Optional[ModelsSSHUsersSshUserResponse]: if response.status_code == 200: response_200 = ModelsSSHUsersSshUserResponse.from_dict(response.json()) return response_200 return None def _build_response(*, response: httpx.Response) -> Response[ModelsSSHUsersSshUserResponse]: return Response( status_code=response.status_code, content=response.content, headers=response.headers, parsed=_parse_response(response=response), ) def sync_detailed( id: int, *, client: Client, x_keyfactor_api_version: Union[Unset, str] = "1", x_keyfactor_requested_with: str = "APIClient", ) -> Response[ModelsSSHUsersSshUserResponse]: """Looks up information about an existing SSH user. Args: id (int): x_keyfactor_api_version (Union[Unset, str]): Default: '1'. x_keyfactor_requested_with (str): Default: 'APIClient'. Returns: Response[ModelsSSHUsersSshUserResponse] """ kwargs = _get_kwargs( id=id, client=client, x_keyfactor_api_version=x_keyfactor_api_version, x_keyfactor_requested_with=x_keyfactor_requested_with, ) response = httpx.request( verify=client.verify_ssl, **kwargs, ) return _build_response(response=response) def sync( id: int, *, client: Client, x_keyfactor_api_version: Union[Unset, str] = "1", x_keyfactor_requested_with: str = "APIClient", ) -> Optional[ModelsSSHUsersSshUserResponse]: """Looks up information about an existing SSH user. Args: id (int): x_keyfactor_api_version (Union[Unset, str]): Default: '1'. x_keyfactor_requested_with (str): Default: 'APIClient'. Returns: Response[ModelsSSHUsersSshUserResponse] """ return sync_detailed( id=id, client=client, x_keyfactor_api_version=x_keyfactor_api_version, x_keyfactor_requested_with=x_keyfactor_requested_with, ).parsed async def asyncio_detailed( id: int, *, client: Client, x_keyfactor_api_version: Union[Unset, str] = "1", x_keyfactor_requested_with: str = "APIClient", ) -> Response[ModelsSSHUsersSshUserResponse]: """Looks up information about an existing SSH user. Args: id (int): x_keyfactor_api_version (Union[Unset, str]): Default: '1'. x_keyfactor_requested_with (str): Default: 'APIClient'. Returns: Response[ModelsSSHUsersSshUserResponse] """ kwargs = _get_kwargs( id=id, client=client, x_keyfactor_api_version=x_keyfactor_api_version, x_keyfactor_requested_with=x_keyfactor_requested_with, ) async with httpx.AsyncClient(verify=client.verify_ssl) as _client: response = await _client.request(**kwargs) return _build_response(response=response) async def asyncio( id: int, *, client: Client, x_keyfactor_api_version: Union[Unset, str] = "1", x_keyfactor_requested_with: str = "APIClient", ) -> Optional[ModelsSSHUsersSshUserResponse]: """Looks up information about an existing SSH user. Args: id (int): x_keyfactor_api_version (Union[Unset, str]): Default: '1'. x_keyfactor_requested_with (str): Default: 'APIClient'. Returns: Response[ModelsSSHUsersSshUserResponse] """ return ( await asyncio_detailed( id=id, client=client, x_keyfactor_api_version=x_keyfactor_api_version, x_keyfactor_requested_with=x_keyfactor_requested_with, ) ).parsed
PypiClean
/odoo14_addon_account_cutoff_start_end_dates-14.0.1.1.0-py3-none-any.whl/odoo/addons/account_cutoff_start_end_dates/readme/DESCRIPTION.rst
This module allows you to easily compute the prepaid revenue/expenses and also the revenue/expense accruals by using the **Start Date** and **End Date** fields of invoice lines/journal items. For example, if you have an insurance contrat invoiced in April 2020 that run from April 1st 2020 to March 31st 2021, you will enter these dates as start and end dates on the supplier invoice line. If your fiscal year ends on December 31st 2020, 3 months of expenses are part of the 2021 fiscal year and should not be part of the 2020 fiscal year. So, thanks to this module, you will create a *Prepaid Expense* on December 31st 2020 and Odoo will identify this expense with the 3 months that are after the cut-off date and propose to generate the appropriate cut-off journal entry. Another example: you have a UPS invoice dated January 5th 2021 that covers the shipments of December 2020. When you encode this vendor bill, set the start date as December 1st 2020 and the end date as December 31st 2020. Then, thanks to this module, you will create an *Expense Accrual* dated December 31st 2020 that will generate a cut-off journal entry that will "move" the UPS expense from 2021 to 2020.
PypiClean
/Gene_POCKET-0.0.4-py3-none-any.whl/pocket/pocket.py
import pandas as pd import numpy as np import os import re import ipdb from pandas_plink import read_plink #from genes_function_svm import gene_function_svm from imp import reload from pocket.genes_function_svm import gene_function_svm from pocket.eqtl import limix_gwas from os.path import expanduser class pocket(): """ """ def __init__(self, leadSNP, chrom, region_start, region_end, gwas_res, pheno, bed_f, gene_annotation_df, vars_annotation_df, kinship, positive_sets_dict, feature_df, expression_df_dict=None, twas_df_dict=None, repeat_time=10, n_jobs=10, save_path = None, save_details = False): self.leadSNP = leadSNP self.chrom = chrom self.region_start = region_start self.region_end = region_end gwas_res = gwas_res.query("(Chr == '%s') & (ChrPos <= %s) & (ChrPos >= %s)" %(chrom, region_end, region_start)) gwas_res = gwas_res.sort_values(['PValue']) #gwas_res.index = gwas_res.SNP self.gwas_res = gwas_res anno_df = gene_annotation_df.query("chrom == '%s' & start >= %s & end <= %s" %(chrom, region_start, region_end)) self.anno_df = anno_df self.region_genes = list(anno_df.index) self.vars_anno = vars_annotation_df.query("Var_chrom == '%s' & Var_pos >= %s & Var_pos <= %s" %(chrom, region_start, region_end)) self.pheno = pheno self.bed_f = bed_f self.kinship = kinship self.positive_sets_dict = positive_sets_dict self.feature_df = feature_df self.expression_df_dict = expression_df_dict self.repeat_time = repeat_time self.n_jobs = n_jobs self.twas_df_dict = twas_df_dict self.hap_res = None self.expression_effect =None self.region_geno_mat = None self.region_snp_info_df = None self.snp_info_df = None self.ld_res = None self.gene_effect_res = None self.gf_score = None self.save_path = save_path self.save_details = save_details def make_geno_df_region(self, chrom, start, end): from limix.io import plink (bim, fam, bed) = plink.read(self.bed_f, verbose=False) bim.columns = ['chrom', 'rsid', 'cm', 'position', 'a0', 'a1', 'i'] d = bim.query(" chrom == '{0}' & position >={1} & position <= {2}".format(chrom, start, end)) geno_mat = bed[d.i.values, :].compute() geno_mat = pd.DataFrame(geno_mat, index = d.rsid, columns = fam.index ) self.region_geno_mat = geno_mat self.region_snp_info_df = d def make_geno_df_vars(self, var_ids): from limix.io import plink (bim, fam, bed) = plink.read(self.bed_f, verbose=False) bim.columns = ['chrom', 'rsid', 'cm', 'position', 'a0', 'a1', 'i'] d = bim.query("rsid in @var_ids") geno_mat = bed[d.i.values, :].compute() geno_mat = pd.DataFrame(geno_mat, index = d.rsid, columns = fam.index ) self.geno_mat = geno_mat self.snp_info_df = d def ld_caculation(self, plink_path=None, temp_path=None, interval =1000): """ """ import subprocess import random import glob import os SNP =self.leadSNP if temp_path is None: temp_path = expanduser("~")+'/temp' if not os.path.isdir(temp_path): os.mkdir(temp_path) rand_n = str(random.random()*1e7)[0:6] if plink_path is None: subprocess.call("plink --bfile %s --r2 --ld-snp %s --allow-extra-chr --ld-window-kb %d --ld-window 100000 --ld-window-r2 0 --out %s/%s_ld_res" %(self.bed_f, SNP, interval,temp_path, rand_n), shell=True) else: subprocess.call("%s --bfile %s --r2 --ld-snp %s --allow-extra-chr --ld-window-kb %d --ld-window 100000 --ld-window-r2 0 --out %s/%s_ld_res" %(plink_path, self.bed_f, SNP, interval,temp_path, rand_n), shell=True) ld_res = pd.read_table('%s/%s_ld_res.ld' %(temp_path,rand_n),sep='\s+') ld_res.index = ld_res.loc[:,'SNP_B'] ld_res = ld_res[~ld_res.index.duplicated(keep='first')] files = glob.glob('%s/%s_ld_res*' %(temp_path,rand_n)) for f in files: try: os.remove(f) except OSError: pass self.ld_res = ld_res def ev_caculation(self, ld_res=None, plink_path=None, temp_path=None, region_geno=None, ld_thresh= 0.4, effect_w_dict={'high':5,'moderate':3,'modifier':1,'low':1}): lmm_df = self.gwas_res gene_df = self.anno_df effect_anno = self.vars_anno i = (effect_anno.Annotation == 'upstream_gene_variant')|(effect_anno.Annotation == 'intragenic_variant')| (effect_anno.Annotation == 'intergenic_region')|(effect_anno.Annotation == 'downstream_gene_variant')|(effect_anno.Annotation == 'intron_variant')|(effect_anno.Annotation == 'N') ##remove non cds variant effect_anno = effect_anno[~i] if 'Imapct' in effect_anno.columns: effect_anno.loc[:,'Impact'] = effect_anno.Imapct if region_geno is None: self.make_geno_df_region(self.chrom, self.region_start, self.region_end) region_geno = self.region_snp_info_df geno_used = region_geno geno_used.index = region_geno.rsid effect_anno = effect_anno[effect_anno.index.isin(geno_used.index)] snps = [set(g.loc[['Ref_geno','Alt_geno']]) == set(geno_used.loc[i, ['a0','a1']]) for i,g in effect_anno.iterrows()] effect_anno = effect_anno.loc[snps,:] if ld_res is None: self.ld_caculation() ld_res = self.ld_res lmm_df.index = lmm_df.SNP lmm_df.index.name = None lmm_df.loc[:,'r2'] = ld_res.R2 #print(lmm_df.head()) #print(effect_anno.head()) #ipdb.sset_trace() anno_res = pd.merge(lmm_df, effect_anno, on='SNP',how='inner') anno_res = anno_res.query("r2 > {}".format(ld_thresh)) if anno_res.shape[0] == 0: gene_effect_res = pd.Series(0,index = gene_df.gene) else: anno_res.loc[:,'scaled_p'] = (-np.log10(anno_res.PValue) -min(-np.log10(anno_res.PValue)))/(max(-np.log10(anno_res.PValue)) - min(-np.log10(anno_res.PValue))) score_list = [] g_list = [] for g, res in anno_res.groupby('Gene'): score = max([x.scaled_p*effect_w_dict[x.Impact.lower()] for i,x in res.iterrows()]) score_list.append(score) g_list.append(g) gene_effect_res = pd.Series(score_list,index=g_list) gene_effect_res = (gene_effect_res - min(gene_effect_res))/(max(gene_effect_res) - min(gene_effect_res)) self.gene_effect_res = gene_effect_res if self.save_details and (self.save_path is not None): anno_res.to_csv('{}/{}_{}_{}_{}_{}.csv'.format(self.save_path, self.leadSNP, self.chrom, self.region_start, self.region_end, 'high_effect_vars_res') ) def gf_caculation(self): prob_list = [] repeat_time = self.repeat_time n_jobs = self.n_jobs feature_df = self.feature_df positive_sets_dict = self.positive_sets_dict r2_list = [] for k, posi_genes in positive_sets_dict.items(): feature_df_used = feature_df.loc[:,~feature_df.columns.str.lower().str.contains(k)] svm_res = gene_function_svm(feature_df_used, posi_genes, repeat_time, n_jobs) svm_res.mutiple_svm(self.region_genes) c, prob, score = svm_res.cat_res, svm_res.prob_res, svm_res.r2_list prob_list.append(prob) r2_list.append(score) prob_df = pd.DataFrame(prob_list, index = positive_sets_dict.keys()).T mean_score = prob_df.mean(axis=1) self.gf_score = mean_score if self.save_details and (self.save_path is not None): prob_df.loc['R2',:] = r2_list prob_df.loc[:,'Mean_socre'] = prob_df.mean(axis=1) prob_df = prob_df.sort_values(['Mean_socre'], ascending=False) prob_df.to_csv('{}/{}_{}_{}_{}_{}.csv'.format(self.save_path, self.leadSNP, self.chrom, self.region_start, self.region_end, 'gene_function_predicted_SVM_score') ) def topN_eQTL_asso(self, geno_mat, gene_express, snp_info_df): res = limix_gwas(geno_mat, gene_express, snp_info_df, kinship=self.kinship) #ipdb.sset_trace() res.data_check() res.maf_filter() res.mean_impute() g = res.do_gwas() g = g.sort_values(['pv20']) return g.iloc[0,:] def ee_caculation(self, ld_res =None, p_thresh=1e-5, ld_thresh=0.4, combine='mean'): from joblib import Parallel, delayed lmm_df = self.gwas_res twas_res_dict = self.twas_df_dict lmm_df.index = lmm_df.SNP n_job = self.n_jobs if ld_res is None: ld_res = self.ld_res lmm_df.loc[:,'r2'] = ld_res.R2 lmm_df_eqtl = lmm_df.query("r2 > {}".format(ld_thresh)) i = lmm_df_eqtl.PValue < p_thresh if sum(i) <=10: top_SNPs = lmm_df_eqtl.SNP[i] else: top_SNPs = lmm_df_eqtl.SNP[:10] if len(top_SNPs) < 2: top_SNPs = lmm_df_eqtl.SNP[:3] self.make_geno_df_vars(top_SNPs) #ipdb.sset_trace() twas_eqtl_res_df = pd.DataFrame() twas_eqtl_res_df.loc[:,'Gene'] = self.region_genes twas_eqtl_res_df.index = self.region_genes for k,express_df in self.expression_df_dict.items(): express_df = express_df[express_df.index.isin(self.region_genes)] #for i,g in express_df.iterrows(): # self.topN_eQTL_asso(self.geno_mat, g, self.snp_info_df) eqtl_res = Parallel(n_jobs= n_job)(delayed(self.topN_eQTL_asso)(self.geno_mat, g, self.snp_info_df) for i,g in express_df.iterrows()) eqtl_res = pd.DataFrame(eqtl_res) eqtl_res.index = express_df.index eqtl_res.loc[:,'Gene'] = express_df.index if twas_res_dict is not None: #eqtl_res = pd.merge(eqtl_res, self.anno_df, left_on='Gene',right_on='gene',how = 'inner') twas_eqtl_res = pd.merge(twas_res_dict[k], eqtl_res, on='Gene', how='inner') twas_eqtl_res.loc[:,'twas_p'] = -np.log10(twas_eqtl_res.lmm_p) twas_eqtl_res.loc[:,'eqtl_p'] = -np.log10(twas_eqtl_res.pv20) if self.save_details and (self.save_path is not None): twas_eqtl_res.loc[:,['Gene', 'twas_p', 'eqtl_p']].to_csv('{}/{}_{}_{}_{}_{}_expression_effect_res.csv'.format(self.save_path, self.leadSNP,self.chrom, self.region_start, self.region_end, k) ) region_genes = twas_eqtl_res.Gene twas_eqtl_res = twas_eqtl_res.loc[:,'twas_p']*twas_eqtl_res.loc[:,'eqtl_p'] twas_eqtl_res.index = region_genes twas_eqtl_res_df.loc[:,k] = twas_eqtl_res twas_eqtl_res.index = region_genes else: eqtl_res.loc[:,'eqtl_p'] = -np.log10(eqtl_res.pv20) if self.save_details and (self.save_path is not None): eqtl_res.loc[:,'eqtl_p'].to_csv('{}/{}_{}_{}_{}_{}_expression_effect_res.csv'.format(self.save_path, self.leadSNP,self.chrom, self.region_start, self.region_end, k)) twas_eqtl_res = eqtl_res.loc[:,'eqtl_p'] twas_eqtl_res_df.loc[:,k] = twas_eqtl_res twas_eqtl_res_df = twas_eqtl_res_df.fillna(0) if combine == 'max': expression_effect = twas_eqtl_res_df.loc[:, list(self.expression_df_dict.keys())] expression_effect = expression_effect.max(axis=1) else: expression_effect = twas_eqtl_res_df.loc[:, list(self.expression_df_dict.keys())] expression_effect = expression_effect.mean(axis=1) self.expression_effect = (expression_effect - min(expression_effect))/(max(expression_effect) - min(expression_effect)) def make_gene_hap_pos(self, promoter_len=2000): gene_df = self.anno_df position_list = [] for i,g in gene_df.iterrows(): if g.strand == '-': position_list.append([g.start -100, g.end + promoter_len]) else: position_list.append([g.start- promoter_len, g.end + 100]) return position_list def make_gene_hap_df(self, geno_df, start, end, return_min=True, n_clusters=[2,3,4,5,6], min_num=10): from scipy import cluster from limix.qc import mean_impute d = self.region_snp_info_df.query("position >= @start & position <= @end") snps = d.rsid geno_df = geno_df[geno_df.index.isin(snps)] geno = mean_impute(geno_df.values) geno_df.index.name =None if geno.shape[0] < 5: return 1 Z = cluster.hierarchy.linkage(geno.T) cutree = pd.DataFrame(cluster.hierarchy.cut_tree(Z, n_clusters=n_clusters),index= geno_df.columns, columns=n_clusters) hap_count = cutree.apply(pd.value_counts).fillna(0) hap_list =[] i_list = [] for i in cutree.columns: used_h = np.where(hap_count.loc[:,i] >= min_num)[0] #ipdb.sset_trace() if len(used_h) >1: h = np.array(cutree.loc[:,i]).astype('float') #ipdb.sset_trace() h[~np.isin(h, used_h)] = np.nan hap_list.append(h) i_list.append(i) else: continue hap_df = pd.DataFrame(hap_list,index=i_list, columns=geno_df.columns) if len(hap_list) < 1: return 1 else: if len(hap_df.shape) ==1: hap_df = pd.DataFrame([hap_df,hap_df], index=['h1','h2']) elif hap_df.shape[0] ==1: hap_df = hap_df.append(hap_df) hap_df.index = ['h1','h2'] snp_info_df = pd.DataFrame() snp_info_df.loc[:,'rsid'] = range(hap_df.shape[0]) snp_info_df.loc[:,'chrom'] = 1 snp_info_df.loc[:,'position'] = 1 #print(hap_df) res = limix_gwas(hap_df, self.pheno, snp_info_df, kinship=self.kinship) res.data_check() res.mean_impute() g = res.do_gwas() g = g.sort_values(['pv20']) if return_min: return min(g.pv20) else: return res def eh_caculation(self, region_geno=None, promoter_len=2000, n_clusters=[2,3,4,5,6]): from joblib import Parallel, delayed n_job= self.n_jobs if region_geno is None: self.make_geno_df_region(self.chrom, self.region_start, self.region_end) geno_df = self.region_geno_mat else: geno_df = region_geno position_list = self.make_gene_hap_pos(promoter_len = promoter_len) ##pos = position_list[0] ##p_list = [] ##for gene, pos in zip(self.anno_df.gene, position_list): ## print(gene) ## p_list.append(self.make_gene_hap_df(geno_df, pos[0], pos[1], n_clusters= n_clusters)) ##xx = self.make_gene_hap_df(geno_df, pos[0], pos[1], n_clusters= n_clusters) #ipdb.sset_trace() #return [geno_df, position_list] p_list = Parallel(n_jobs= n_job)(delayed(self.make_gene_hap_df)(geno_df, pos[0], pos[1], n_clusters= n_clusters) for pos in position_list) hap_p = pd.Series(p_list, index = self.anno_df.gene) if self.save_details and (self.save_path is not None): hap_p.sort_values().to_csv('{}/{}_{}_{}_{}_hap_association_res.csv'.format(self.save_path, self.leadSNP, self.chrom, self.region_start, self.region_end)) self.hap_res = (-np.log10(hap_p) - min(-np.log10(hap_p)))/(max(-np.log10(hap_p))- min(-np.log10(hap_p))) def pocket_summary(self): res_df = pd.DataFrame() res_df.loc[:,"Gene"] = self.anno_df.gene res_df.loc[:,'symbol'] = self.anno_df.loc[:,'symbol'] if self.gene_effect_res is None: self.ev_caculation() if self.gf_score is None: self.gf_caculation() if self.expression_df_dict is not None: if self.expression_effect is None: self.ee_caculation() if self.hap_res is None: self.eh_caculation() res_df.loc[:,'variation_effect'] = self.gene_effect_res res_df.loc[:,'expression_effect'] = self.expression_effect res_df.loc[:,'haplotype_effect'] = self.hap_res res_df.loc[:,'gene_function'] = self.gf_score res_df.loc[:,'variation_effect'] = res_df.loc[:,'variation_effect'].fillna(0) if self.expression_effect is None: res = res_df.gene_function*(res_df.variation_effect + 2*res_df.haplotype_effect) else: res = res_df.gene_function*(res_df.variation_effect + res_df.expression_effect + 2*res_df.haplotype_effect) res_df.loc[:,'summary_score'] = res res_df = res_df.sort_values(['summary_score'],ascending=False) res_df.loc[:,'Description'] = self.anno_df.loc[:,'description'] res_df.loc[:,'Description'] = ['' if pd.isnull(x) else x.split('[')[0] for x in res_df.loc[:,'Description']] self.summary_score = res_df if self.save_path is not None: res_df.to_csv('{0}/{1}_{2}_{3}_{4}_gene_prori_res.csv'.format(self.save_path, self.leadSNP, self.chrom, self.region_start, self.region_end))
PypiClean
/python_antenna_pattern-0.0.1.tar.gz/python_antenna_pattern-0.0.1/docs/installation.rst
.. highlight:: shell ============ Installation ============ Stable release -------------- To install python-antenna-pattern, run this command in your terminal: .. code-block:: console $ pip install python_antenna_pattern This is the preferred method to install python-antenna-pattern, as it will always install the most recent stable release. If you don't have `pip`_ installed, this `Python installation guide`_ can guide you through the process. .. _pip: https://pip.pypa.io .. _Python installation guide: http://docs.python-guide.org/en/latest/starting/installation/ From sources ------------ The sources for python-antenna-pattern can be downloaded from the `Github repo`_. You can either clone the public repository: .. code-block:: console $ git clone git://github.com/tyc85/python_antenna_pattern Or download the `tarball`_: .. code-block:: console $ curl -OJL https://github.com/tyc85/python_antenna_pattern/tarball/master Once you have a copy of the source, you can install it with: .. code-block:: console $ python setup.py install .. _Github repo: https://github.com/tyc85/python_antenna_pattern .. _tarball: https://github.com/tyc85/python_antenna_pattern/tarball/master
PypiClean
/nodejs_bin-18.4.0a3-py3-none-manylinux_2_12_x86_64.manylinux2010_x86_64.whl/nodejs/lib/node_modules/npm/node_modules/qrcode-terminal/vendor/QRCode/QRUtil.js
var QRMode = require('./QRMode'); var QRPolynomial = require('./QRPolynomial'); var QRMath = require('./QRMath'); var QRMaskPattern = require('./QRMaskPattern'); var QRUtil = { PATTERN_POSITION_TABLE : [ [], [6, 18], [6, 22], [6, 26], [6, 30], [6, 34], [6, 22, 38], [6, 24, 42], [6, 26, 46], [6, 28, 50], [6, 30, 54], [6, 32, 58], [6, 34, 62], [6, 26, 46, 66], [6, 26, 48, 70], [6, 26, 50, 74], [6, 30, 54, 78], [6, 30, 56, 82], [6, 30, 58, 86], [6, 34, 62, 90], [6, 28, 50, 72, 94], [6, 26, 50, 74, 98], [6, 30, 54, 78, 102], [6, 28, 54, 80, 106], [6, 32, 58, 84, 110], [6, 30, 58, 86, 114], [6, 34, 62, 90, 118], [6, 26, 50, 74, 98, 122], [6, 30, 54, 78, 102, 126], [6, 26, 52, 78, 104, 130], [6, 30, 56, 82, 108, 134], [6, 34, 60, 86, 112, 138], [6, 30, 58, 86, 114, 142], [6, 34, 62, 90, 118, 146], [6, 30, 54, 78, 102, 126, 150], [6, 24, 50, 76, 102, 128, 154], [6, 28, 54, 80, 106, 132, 158], [6, 32, 58, 84, 110, 136, 162], [6, 26, 54, 82, 110, 138, 166], [6, 30, 58, 86, 114, 142, 170] ], G15 : (1 << 10) | (1 << 8) | (1 << 5) | (1 << 4) | (1 << 2) | (1 << 1) | (1 << 0), G18 : (1 << 12) | (1 << 11) | (1 << 10) | (1 << 9) | (1 << 8) | (1 << 5) | (1 << 2) | (1 << 0), G15_MASK : (1 << 14) | (1 << 12) | (1 << 10) | (1 << 4) | (1 << 1), getBCHTypeInfo : function(data) { var d = data << 10; while (QRUtil.getBCHDigit(d) - QRUtil.getBCHDigit(QRUtil.G15) >= 0) { d ^= (QRUtil.G15 << (QRUtil.getBCHDigit(d) - QRUtil.getBCHDigit(QRUtil.G15) ) ); } return ( (data << 10) | d) ^ QRUtil.G15_MASK; }, getBCHTypeNumber : function(data) { var d = data << 12; while (QRUtil.getBCHDigit(d) - QRUtil.getBCHDigit(QRUtil.G18) >= 0) { d ^= (QRUtil.G18 << (QRUtil.getBCHDigit(d) - QRUtil.getBCHDigit(QRUtil.G18) ) ); } return (data << 12) | d; }, getBCHDigit : function(data) { var digit = 0; while (data !== 0) { digit++; data >>>= 1; } return digit; }, getPatternPosition : function(typeNumber) { return QRUtil.PATTERN_POSITION_TABLE[typeNumber - 1]; }, getMask : function(maskPattern, i, j) { switch (maskPattern) { case QRMaskPattern.PATTERN000 : return (i + j) % 2 === 0; case QRMaskPattern.PATTERN001 : return i % 2 === 0; case QRMaskPattern.PATTERN010 : return j % 3 === 0; case QRMaskPattern.PATTERN011 : return (i + j) % 3 === 0; case QRMaskPattern.PATTERN100 : return (Math.floor(i / 2) + Math.floor(j / 3) ) % 2 === 0; case QRMaskPattern.PATTERN101 : return (i * j) % 2 + (i * j) % 3 === 0; case QRMaskPattern.PATTERN110 : return ( (i * j) % 2 + (i * j) % 3) % 2 === 0; case QRMaskPattern.PATTERN111 : return ( (i * j) % 3 + (i + j) % 2) % 2 === 0; default : throw new Error("bad maskPattern:" + maskPattern); } }, getErrorCorrectPolynomial : function(errorCorrectLength) { var a = new QRPolynomial([1], 0); for (var i = 0; i < errorCorrectLength; i++) { a = a.multiply(new QRPolynomial([1, QRMath.gexp(i)], 0) ); } return a; }, getLengthInBits : function(mode, type) { if (1 <= type && type < 10) { // 1 - 9 switch(mode) { case QRMode.MODE_NUMBER : return 10; case QRMode.MODE_ALPHA_NUM : return 9; case QRMode.MODE_8BIT_BYTE : return 8; case QRMode.MODE_KANJI : return 8; default : throw new Error("mode:" + mode); } } else if (type < 27) { // 10 - 26 switch(mode) { case QRMode.MODE_NUMBER : return 12; case QRMode.MODE_ALPHA_NUM : return 11; case QRMode.MODE_8BIT_BYTE : return 16; case QRMode.MODE_KANJI : return 10; default : throw new Error("mode:" + mode); } } else if (type < 41) { // 27 - 40 switch(mode) { case QRMode.MODE_NUMBER : return 14; case QRMode.MODE_ALPHA_NUM : return 13; case QRMode.MODE_8BIT_BYTE : return 16; case QRMode.MODE_KANJI : return 12; default : throw new Error("mode:" + mode); } } else { throw new Error("type:" + type); } }, getLostPoint : function(qrCode) { var moduleCount = qrCode.getModuleCount(); var lostPoint = 0; var row = 0; var col = 0; // LEVEL1 for (row = 0; row < moduleCount; row++) { for (col = 0; col < moduleCount; col++) { var sameCount = 0; var dark = qrCode.isDark(row, col); for (var r = -1; r <= 1; r++) { if (row + r < 0 || moduleCount <= row + r) { continue; } for (var c = -1; c <= 1; c++) { if (col + c < 0 || moduleCount <= col + c) { continue; } if (r === 0 && c === 0) { continue; } if (dark === qrCode.isDark(row + r, col + c) ) { sameCount++; } } } if (sameCount > 5) { lostPoint += (3 + sameCount - 5); } } } // LEVEL2 for (row = 0; row < moduleCount - 1; row++) { for (col = 0; col < moduleCount - 1; col++) { var count = 0; if (qrCode.isDark(row, col ) ) count++; if (qrCode.isDark(row + 1, col ) ) count++; if (qrCode.isDark(row, col + 1) ) count++; if (qrCode.isDark(row + 1, col + 1) ) count++; if (count === 0 || count === 4) { lostPoint += 3; } } } // LEVEL3 for (row = 0; row < moduleCount; row++) { for (col = 0; col < moduleCount - 6; col++) { if (qrCode.isDark(row, col) && !qrCode.isDark(row, col + 1) && qrCode.isDark(row, col + 2) && qrCode.isDark(row, col + 3) && qrCode.isDark(row, col + 4) && !qrCode.isDark(row, col + 5) && qrCode.isDark(row, col + 6) ) { lostPoint += 40; } } } for (col = 0; col < moduleCount; col++) { for (row = 0; row < moduleCount - 6; row++) { if (qrCode.isDark(row, col) && !qrCode.isDark(row + 1, col) && qrCode.isDark(row + 2, col) && qrCode.isDark(row + 3, col) && qrCode.isDark(row + 4, col) && !qrCode.isDark(row + 5, col) && qrCode.isDark(row + 6, col) ) { lostPoint += 40; } } } // LEVEL4 var darkCount = 0; for (col = 0; col < moduleCount; col++) { for (row = 0; row < moduleCount; row++) { if (qrCode.isDark(row, col) ) { darkCount++; } } } var ratio = Math.abs(100 * darkCount / moduleCount / moduleCount - 50) / 5; lostPoint += ratio * 10; return lostPoint; } }; module.exports = QRUtil;
PypiClean
/igot-0.1.0.tar.gz/igot-0.1.0/src/i_got/extractors/google.py
__all__ = ['google_download'] from ..common import * import re # YouTube media encoding options, in descending quality order. # taken from http://en.wikipedia.org/wiki/YouTube#Quality_and_codecs, 3/22/2013. youtube_codecs = [ {'itag': 38, 'container': 'MP4', 'video_resolution': '3072p', 'video_encoding': 'H.264', 'video_profile': 'High', 'video_bitrate': '3.5-5', 'audio_encoding': 'AAC', 'audio_bitrate': '192'}, {'itag': 46, 'container': 'WebM', 'video_resolution': '1080p', 'video_encoding': 'VP8', 'video_profile': '', 'video_bitrate': '', 'audio_encoding': 'Vorbis', 'audio_bitrate': '192'}, {'itag': 37, 'container': 'MP4', 'video_resolution': '1080p', 'video_encoding': 'H.264', 'video_profile': 'High', 'video_bitrate': '3-4.3', 'audio_encoding': 'AAC', 'audio_bitrate': '192'}, {'itag': 102, 'container': 'WebM', 'video_resolution': '720p', 'video_encoding': 'VP8', 'video_profile': '3D', 'video_bitrate': '2', 'audio_encoding': 'Vorbis', 'audio_bitrate': '192'}, {'itag': 45, 'container': 'WebM', 'video_resolution': '720p', 'video_encoding': '', 'video_profile': '', 'video_bitrate': '', 'audio_encoding': '', 'audio_bitrate': ''}, {'itag': 22, 'container': 'MP4', 'video_resolution': '720p', 'video_encoding': 'H.264', 'video_profile': 'High', 'video_bitrate': '2-2.9', 'audio_encoding': 'AAC', 'audio_bitrate': '192'}, {'itag': 84, 'container': 'MP4', 'video_resolution': '720p', 'video_encoding': 'H.264', 'video_profile': '3D', 'video_bitrate': '2-2.9', 'audio_encoding': 'AAC', 'audio_bitrate': '152'}, {'itag': 120, 'container': 'FLV', 'video_resolution': '720p', 'video_encoding': 'AVC', 'video_profile': '[email protected]', 'video_bitrate': '2', 'audio_encoding': 'AAC', 'audio_bitrate': '128'}, {'itag': 85, 'container': 'MP4', 'video_resolution': '520p', 'video_encoding': 'H.264', 'video_profile': '3D', 'video_bitrate': '2-2.9', 'audio_encoding': 'AAC', 'audio_bitrate': '152'}, {'itag': 44, 'container': 'WebM', 'video_resolution': '480p', 'video_encoding': 'VP8', 'video_profile': '', 'video_bitrate': '1', 'audio_encoding': 'Vorbis', 'audio_bitrate': '128'}, {'itag': 35, 'container': 'FLV', 'video_resolution': '480p', 'video_encoding': 'H.264', 'video_profile': 'Main', 'video_bitrate': '0.8-1', 'audio_encoding': 'AAC', 'audio_bitrate': '128'}, {'itag': 101, 'container': 'WebM', 'video_resolution': '360p', 'video_encoding': 'VP8', 'video_profile': '3D', 'video_bitrate': '', 'audio_encoding': 'Vorbis', 'audio_bitrate': '192'}, {'itag': 100, 'container': 'WebM', 'video_resolution': '360p', 'video_encoding': 'VP8', 'video_profile': '3D', 'video_bitrate': '', 'audio_encoding': 'Vorbis', 'audio_bitrate': '128'}, {'itag': 43, 'container': 'WebM', 'video_resolution': '360p', 'video_encoding': 'VP8', 'video_profile': '', 'video_bitrate': '0.5', 'audio_encoding': 'Vorbis', 'audio_bitrate': '128'}, {'itag': 34, 'container': 'FLV', 'video_resolution': '360p', 'video_encoding': 'H.264', 'video_profile': 'Main', 'video_bitrate': '0.5', 'audio_encoding': 'AAC', 'audio_bitrate': '128'}, {'itag': 82, 'container': 'MP4', 'video_resolution': '360p', 'video_encoding': 'H.264', 'video_profile': '3D', 'video_bitrate': '0.5', 'audio_encoding': 'AAC', 'audio_bitrate': '96'}, {'itag': 18, 'container': 'MP4', 'video_resolution': '270p/360p', 'video_encoding': 'H.264', 'video_profile': 'Baseline', 'video_bitrate': '0.5', 'audio_encoding': 'AAC', 'audio_bitrate': '96'}, {'itag': 6, 'container': 'FLV', 'video_resolution': '270p', 'video_encoding': 'Sorenson H.263', 'video_profile': '', 'video_bitrate': '0.8', 'audio_encoding': 'MP3', 'audio_bitrate': '64'}, {'itag': 83, 'container': 'MP4', 'video_resolution': '240p', 'video_encoding': 'H.264', 'video_profile': '3D', 'video_bitrate': '0.5', 'audio_encoding': 'AAC', 'audio_bitrate': '96'}, {'itag': 13, 'container': '3GP', 'video_resolution': '', 'video_encoding': 'MPEG-4 Visual', 'video_profile': '', 'video_bitrate': '0.5', 'audio_encoding': 'AAC', 'audio_bitrate': ''}, {'itag': 5, 'container': 'FLV', 'video_resolution': '240p', 'video_encoding': 'Sorenson H.263', 'video_profile': '', 'video_bitrate': '0.25', 'audio_encoding': 'MP3', 'audio_bitrate': '64'}, {'itag': 36, 'container': '3GP', 'video_resolution': '240p', 'video_encoding': 'MPEG-4 Visual', 'video_profile': 'Simple', 'video_bitrate': '0.17', 'audio_encoding': 'AAC', 'audio_bitrate': '38'}, {'itag': 17, 'container': '3GP', 'video_resolution': '144p', 'video_encoding': 'MPEG-4 Visual', 'video_profile': 'Simple', 'video_bitrate': '0.05', 'audio_encoding': 'AAC', 'audio_bitrate': '24'}, ] fmt_level = dict( zip( [str(codec['itag']) for codec in youtube_codecs], range(len(youtube_codecs)))) def google_download(url, output_dir = '.', merge = True, info_only = False, **kwargs): # Percent-encoding Unicode URL url = parse.quote(url, safe = ':/+%?=') service = url.split('/')[2].split('.')[0] if service == 'plus': # Google Plus # attempt to extract images first # TBD: posts with > 4 images # TBD: album links html = get_html(parse.unquote(url), faker=True) real_urls = [] for src in re.findall(r'src="([^"]+)"[^>]*itemprop="image"', html): t = src.split('/') t[0], t[-2] = t[0] or 'https:', 's0-d' u = '/'.join(t) real_urls.append(u) if not real_urls: real_urls = re.findall(r'<meta property="og:image" content="([^"]+)', html) real_urls = [re.sub(r'w\d+-h\d+-p', 's0', u) for u in real_urls] post_date = r1(r'"?(20\d\d[-/]?[01]\d[-/]?[0123]\d)"?', html) post_id = r1(r'/posts/([^"]+)', html) title = post_date + "_" + post_id try: url = "https://plus.google.com/" + r1(r'(photos/\d+/albums/\d+/\d+)\?authkey', html) html = get_html(url, faker=True) temp = re.findall(r'\[(\d+),\d+,\d+,"([^"]+)"\]', html) temp = sorted(temp, key = lambda x : fmt_level[x[0]]) urls = [unicodize(i[1]) for i in temp if i[0] == temp[0][0]] assert urls real_urls = urls # Look ma, there's really a video! post_url = r1(r'"(https://plus.google.com/[^/]+/posts/[^"]*)"', html) post_author = r1(r'/\+([^/]+)/posts', post_url) if post_author: post_url = "https://plus.google.com/+%s/posts/%s" % (parse.quote(post_author), r1(r'posts/(.+)', post_url)) post_html = get_html(post_url, faker=True) title = r1(r'<title[^>]*>([^<\n]+)', post_html) if title is None: response = request.urlopen(request.Request(real_url)) if response.headers['content-disposition']: filename = parse.unquote(r1(r'filename="?(.+)"?', response.headers['content-disposition'])).split('.') title = ''.join(filename[:-1]) except: pass for (i, real_url) in enumerate(real_urls): title_i = "%s[%s]" % (title, i) if len(real_urls) > 1 else title type, ext, size = url_info(real_url) if ext is None: ext = 'mp4' print_info(site_info, title_i, ext, size) if not info_only: download_urls([real_url], title_i, ext, size, output_dir, merge = merge) elif service in ['docs', 'drive'] : # Google Docs html = get_content(url, headers=fake_headers) title = r1(r'"title":"([^"]*)"', html) or r1(r'<meta itemprop="name" content="([^"]*)"', html) if len(title.split('.')) > 1: title = ".".join(title.split('.')[:-1]) docid = r1('/file/d/([^/]+)', url) request.install_opener(request.build_opener(request.HTTPCookieProcessor())) real_url = "https://docs.google.com/uc?export=download&confirm=no_antivirus&id=%s" % docid redirected_url = get_location(real_url) if real_url != redirected_url: # tiny file - get real url here type, ext, size = url_info(redirected_url) real_url = redirected_url else: # huge file - the real_url is a confirm page and real url is in it confirm_page = get_content(real_url) hrefs = re.findall(r'href="(.+?)"', confirm_page) for u in hrefs: if u.startswith('/uc?export=download'): rel = unescape_html(u) confirm_url = 'https://docs.google.com' + rel real_url = get_location(confirm_url) _, ext, size = url_info(real_url, headers=fake_headers) if size is None: size = 0 print_info(site_info, title, ext, size) if not info_only: download_urls([real_url], title, ext, size, output_dir, merge = merge) site_info = "Google.com" download = google_download download_playlist = playlist_not_supported('google')
PypiClean
/mata_echo_discord.py-1.0.6-py3-none-any.whl/discord/types/interactions.py
from __future__ import annotations from typing import Optional, TYPE_CHECKING, Dict, TypedDict, Union, List, Literal from .snowflake import Snowflake from .components import Component, ComponentType from .embed import Embed from .channel import ChannelType from .member import Member from .role import Role from .user import User if TYPE_CHECKING: from .message import AllowedMentions, Message ApplicationCommandType = Literal[1, 2, 3] class _ApplicationCommandOptional(TypedDict, total=False): options: List[ApplicationCommandOption] type: ApplicationCommandType class ApplicationCommand(_ApplicationCommandOptional): id: Snowflake application_id: Snowflake name: str description: str class _ApplicationCommandOptionOptional(TypedDict, total=False): choices: List[ApplicationCommandOptionChoice] options: List[ApplicationCommandOption] ApplicationCommandOptionType = Literal[1, 2, 3, 4, 5, 6, 7, 8, 9, 10] class ApplicationCommandOption(_ApplicationCommandOptionOptional): type: ApplicationCommandOptionType name: str description: str required: bool class ApplicationCommandOptionChoice(TypedDict): name: str value: Union[str, int] ApplicationCommandPermissionType = Literal[1, 2] class ApplicationCommandPermissions(TypedDict): id: Snowflake type: ApplicationCommandPermissionType permission: bool class BaseGuildApplicationCommandPermissions(TypedDict): permissions: List[ApplicationCommandPermissions] class PartialGuildApplicationCommandPermissions(BaseGuildApplicationCommandPermissions): id: Snowflake class GuildApplicationCommandPermissions(PartialGuildApplicationCommandPermissions): application_id: Snowflake guild_id: Snowflake InteractionType = Literal[1, 2, 3] class _ApplicationCommandInteractionDataOption(TypedDict): name: str class _ApplicationCommandInteractionDataOptionSubcommand(_ApplicationCommandInteractionDataOption): type: Literal[1, 2] options: List[ApplicationCommandInteractionDataOption] class _ApplicationCommandInteractionDataOptionString(_ApplicationCommandInteractionDataOption): type: Literal[3] value: str class _ApplicationCommandInteractionDataOptionInteger(_ApplicationCommandInteractionDataOption): type: Literal[4] value: int class _ApplicationCommandInteractionDataOptionBoolean(_ApplicationCommandInteractionDataOption): type: Literal[5] value: bool class _ApplicationCommandInteractionDataOptionSnowflake(_ApplicationCommandInteractionDataOption): type: Literal[6, 7, 8, 9] value: Snowflake class _ApplicationCommandInteractionDataOptionNumber(_ApplicationCommandInteractionDataOption): type: Literal[10] value: float ApplicationCommandInteractionDataOption = Union[ _ApplicationCommandInteractionDataOptionString, _ApplicationCommandInteractionDataOptionInteger, _ApplicationCommandInteractionDataOptionSubcommand, _ApplicationCommandInteractionDataOptionBoolean, _ApplicationCommandInteractionDataOptionSnowflake, _ApplicationCommandInteractionDataOptionNumber, ] class ApplicationCommandResolvedPartialChannel(TypedDict): id: Snowflake type: ChannelType permissions: str name: str class ApplicationCommandInteractionDataResolved(TypedDict, total=False): users: Dict[Snowflake, User] members: Dict[Snowflake, Member] roles: Dict[Snowflake, Role] channels: Dict[Snowflake, ApplicationCommandResolvedPartialChannel] class _ApplicationCommandInteractionDataOptional(TypedDict, total=False): options: List[ApplicationCommandInteractionDataOption] resolved: ApplicationCommandInteractionDataResolved target_id: Snowflake type: ApplicationCommandType class ApplicationCommandInteractionData(_ApplicationCommandInteractionDataOptional): id: Snowflake name: str class _ComponentInteractionDataOptional(TypedDict, total=False): values: List[str] class ComponentInteractionData(_ComponentInteractionDataOptional): custom_id: str component_type: ComponentType InteractionData = Union[ApplicationCommandInteractionData, ComponentInteractionData] class _InteractionOptional(TypedDict, total=False): data: InteractionData guild_id: Snowflake channel_id: Snowflake member: Member user: User message: Message class Interaction(_InteractionOptional): id: Snowflake application_id: Snowflake type: InteractionType token: str version: int class InteractionApplicationCommandCallbackData(TypedDict, total=False): tts: bool content: str embeds: List[Embed] allowed_mentions: AllowedMentions flags: int components: List[Component] InteractionResponseType = Literal[1, 4, 5, 6, 7] class _InteractionResponseOptional(TypedDict, total=False): data: InteractionApplicationCommandCallbackData class InteractionResponse(_InteractionResponseOptional): type: InteractionResponseType class MessageInteraction(TypedDict): id: Snowflake type: InteractionType name: str user: User class _EditApplicationCommandOptional(TypedDict, total=False): description: str options: Optional[List[ApplicationCommandOption]] type: ApplicationCommandType class EditApplicationCommand(_EditApplicationCommandOptional): name: str default_permission: bool
PypiClean
/boris-behav-obs-8.20.6.tar.gz/boris-behav-obs-8.20.6/boris/map_creator.py
import binascii import json import os from PyQt5.QtCore import ( Qt, pyqtSignal, QPoint, QByteArray, QBuffer, QIODevice, QLineF, ) from PyQt5.QtGui import ( QColor, QBrush, QMouseEvent, QPixmap, QIcon, QPen, QPolygon, QPolygonF, ) from PyQt5.QtWidgets import ( QGraphicsPolygonItem, QGraphicsEllipseItem, QGraphicsPixmapItem, QGraphicsLineItem, QAction, QMainWindow, QGraphicsView, QPushButton, QLabel, QHBoxLayout, QLineEdit, QSlider, QGraphicsScene, QWidget, QColorDialog, QVBoxLayout, QMessageBox, QInputDialog, QFileDialog, QApplication, ) from . import config as cfg from . import dialog from . import utilities as util designColor = QColor(255, 0, 0, 128) # red opacity: 50% penWidth = 0 penStyle = Qt.NoPen selectedBrush = QBrush() selectedBrush.setStyle(Qt.SolidPattern) selectedBrush.setColor(QColor(255, 255, 0, 255)) class ModifiersMapCreatorWindow(QMainWindow): closed = pyqtSignal() class View(QGraphicsView): """ class for handling mousepress event in QGraphicsView """ mousePress = pyqtSignal(QMouseEvent) def mousePressEvent(self, event): self.mousePress.emit(event) _start = 0 elList, points = [], [] def __init__(self, parent): QGraphicsView.__init__(self, parent) self.setBackgroundBrush(QColor(128, 128, 128)) self.setScene(QGraphicsScene(self)) self.scene().update() bitmapFileName, mapName, fileName = "", "", "" flagNewArea, flagMapChanged = False, False areasList, polygonsList2 = {}, {} areaColor = QColor("lime") def __init__(self): super(ModifiersMapCreatorWindow, self).__init__() self.pixmap = QPixmap() self.closedPolygon = None self.selectedPolygon = None self.setWindowTitle("BORIS - Modifiers map creator") self.newMapAction = QAction(QIcon(), "&New modifiers map", self) self.newMapAction.setShortcut("Ctrl+N") self.newMapAction.setStatusTip("Create a new modifiers map") self.newMapAction.triggered.connect(self.newMap) self.openMapAction = QAction(QIcon(), "&Open modifiers map", self) self.openMapAction.setShortcut("Ctrl+O") self.openMapAction.setStatusTip("Open a modifiers map") self.openMapAction.triggered.connect(self.openMap) self.saveMapAction = QAction(QIcon(), "&Save modifiers map", self) self.saveMapAction.setShortcut("Ctrl+S") self.saveMapAction.setStatusTip("Save modifiers map") self.saveMapAction.setEnabled(False) self.saveMapAction.triggered.connect(self.saveMap_clicked) self.saveAsMapAction = QAction(QIcon(), "Save modifiers map as", self) self.saveAsMapAction.setStatusTip("Save modifiers map as") self.saveAsMapAction.setEnabled(False) self.saveAsMapAction.triggered.connect(self.saveAsMap_clicked) self.mapNameAction = QAction(QIcon(), "&Modifiers map name", self) self.mapNameAction.setShortcut("Ctrl+M") self.mapNameAction.setStatusTip("Change modifiers map name") self.mapNameAction.setEnabled(False) self.mapNameAction.triggered.connect(self.mapName_clicked) self.exitAction = QAction(QIcon(), "&Close", self) self.exitAction.setStatusTip("Close modifiers map creator") self.exitAction.triggered.connect(self.close) menubar = self.menuBar() fileMenu = menubar.addMenu("&Modifiers Map creator") fileMenu.addAction(self.newMapAction) fileMenu.addAction(self.openMapAction) fileMenu.addAction(self.saveMapAction) fileMenu.addAction(self.saveAsMapAction) fileMenu.addSeparator() fileMenu.addAction(self.mapNameAction) fileMenu.addSeparator() fileMenu.addAction(self.exitAction) self.view = self.View(self) self.view.mousePress.connect(self.viewMousePressEvent) self.btLoad = QPushButton("Load bitmap", self) self.btLoad.clicked.connect(self.loadBitmap) self.btLoad.setVisible(False) self.btNewArea = QPushButton("New modifier", self) self.btNewArea.clicked.connect(self.newArea) self.btNewArea.setVisible(False) self.hlayout = QHBoxLayout() self.lb = QLabel("Modifier") self.lb.setVisible(False) self.hlayout.addWidget(self.lb) self.leAreaCode = QLineEdit(self) self.leAreaCode.setVisible(False) self.hlayout.addWidget(self.leAreaCode) self.btColor = QPushButton() self.btColor.clicked.connect(self.chooseColor) self.btColor.setVisible(False) self.btColor.setStyleSheet( "QWidget {{background-color:{}}}".format(self.areaColor.name()) ) self.hlayout.addWidget(self.btColor) self.slAlpha = QSlider(Qt.Horizontal) self.slAlpha.setRange(20, 100) self.slAlpha.setValue(50) self.slAlpha.valueChanged.connect(self.slAlpha_changed) self.slAlpha.setVisible(False) self.hlayout.addWidget(self.slAlpha) self.slAlpha_changed(50) """ self.btCancelMap = QPushButton("Cancel modifiers map", self) self.btCancelMap.clicked.connect(self.cancelMap) self.btCancelMap.setVisible(False) """ layout = QVBoxLayout() layout.addWidget(self.view) layout.addWidget(self.btLoad) hlayout2 = QHBoxLayout() hlayout2.addWidget(self.btNewArea) self.btSaveArea = QPushButton("Save modifier", self) self.btSaveArea.clicked.connect(self.saveArea) self.btSaveArea.setVisible(False) hlayout2.addWidget(self.btSaveArea) self.btCancelAreaCreation = QPushButton("Cancel new modifier", self) self.btCancelAreaCreation.clicked.connect(self.cancelAreaCreation) self.btCancelAreaCreation.setVisible(False) hlayout2.addWidget(self.btCancelAreaCreation) self.btDeleteArea = QPushButton("Delete selected modifier", self) self.btDeleteArea.clicked.connect(self.deleteArea) self.btDeleteArea.setVisible(True) self.btDeleteArea.setEnabled(False) hlayout2.addWidget(self.btDeleteArea) layout.addLayout(hlayout2) layout.addLayout(self.hlayout) """layout.addWidget(self.btCancelMap)""" main_widget = QWidget(self) main_widget.setLayout(layout) self.setCentralWidget(main_widget) self.statusBar().showMessage("") def slAlpha_changed(self, val): """ opacity slider value changed """ self.btColor.setText("Opacity: {} %".format(val)) self.areaColor.setAlpha(int(val / 100 * 255)) if self.selectedPolygon: self.selectedPolygon.setBrush(self.areaColor) self.areasList[self.leAreaCode.text()]["color"] = self.areaColor.rgba() if self.closedPolygon: self.closedPolygon.setBrush(self.areaColor) def chooseColor(self): """ area color button clicked """ cd = QColorDialog() cd.setWindowFlags(Qt.WindowStaysOnTopHint) cd.setOptions(QColorDialog.ShowAlphaChannel | QColorDialog.DontUseNativeDialog) if cd.exec_(): self.areaColor = cd.currentColor() self.btColor.setStyleSheet( f"QWidget {{background-color:{self.areaColor.name()}}}" ) if self.selectedPolygon: self.selectedPolygon.setBrush(self.areaColor) self.areasList[self.leAreaCode.text()]["color"] = self.areaColor.rgba() if self.closedPolygon: self.closedPolygon.setBrush(self.areaColor) def closeEvent(self, event): if self.flagMapChanged: response = dialog.MessageDialog( "BORIS - Modifiers map creator", "What to do about the current unsaved modifiers coding map?", ["Save", "Discard", "Cancel"], ) if response == "Save": if not self.saveMap_clicked(): event.ignore() if response == "Cancel": event.ignore() return self.closed.emit() event.accept() def viewMousePressEvent(self, event): """ check if area selected with mouse """ if not self.bitmapFileName: return self.btDeleteArea.setEnabled(False) test = self.view.mapToScene(event.pos()).toPoint() if ( test.x() < 0 or test.y() < 0 or test.x() > self.pixmap.size().width() or test.y() > self.pixmap.size().height() ): return if not self.flagNewArea: # test clicked point for areas txt = "" # reset selected polygon to default pen if self.selectedPolygon: self.selectedPolygon.setPen( QPen(designColor, penWidth, penStyle, Qt.RoundCap, Qt.RoundJoin) ) self.selectedPolygon = None self.selectedPolygonMemBrush = None for areaCode in self.polygonsList2: if self.polygonsList2[areaCode].contains(test): if txt: txt += "," txt += areaCode self.selectedPolygon = self.polygonsList2[areaCode] self.selectedPolygonAreaCode = areaCode self.selectedPolygonMemBrush = self.selectedPolygon.brush() self.selectedPolygon.setPen( QPen( QColor(255, 0, 0, 255), 2, Qt.SolidLine, Qt.RoundCap, Qt.RoundJoin, ) ) self.leAreaCode.setText(areaCode) self.leAreaCode.setVisible(True) self.btDeleteArea.setEnabled(True) self.areaColor = self.selectedPolygon.brush().color() self.btColor.setStyleSheet( "QWidget {{background-color:{}}}".format( self.selectedPolygon.brush().color().name() ) ) self.btColor.setVisible(True) self.slAlpha.setValue( int(self.selectedPolygon.brush().color().alpha() / 255 * 100) ) self.slAlpha.setVisible(True) break if txt: self.statusBar().showMessage( "Modifier{}: {}".format("s" if "," in txt else "", txt) ) else: self.statusBar().showMessage("") if not self.selectedPolygon: self.leAreaCode.setVisible(False) self.btColor.setVisible(False) self.slAlpha.setVisible(False) return # delete last line item if (event.buttons() & Qt.RightButton) and not self.closedPolygon: if self.view.points: self.view.points = self.view.points[0:-1] if self.view.points: self.view._start = QPoint( self.view.points[-1][0], self.view.points[-1][1] ) else: self.view._start = None # remove graphical elements if self.view.elList: self.view.scene().removeItem(self.view.elList[-1]) self.view.elList = self.view.elList[0:-1] # add line item if event.buttons() == Qt.LeftButton and not self.closedPolygon: if self.view._start: end = test # test is polygon is crossed if len(self.view.points) >= 3: for idx, point in enumerate(self.view.points[:-2]): if util.intersection( self.view.points[idx], self.view.points[idx + 1], self.view.points[-1], (int(end.x()), int(end.y())), ): QMessageBox.critical( self, "", "The polygon edges can not be intersected" ) return # test if polygon closed (dist min 10 px) if ( abs(end.x() - self.view.points[0][0]) < 10 and abs(end.y() - self.view.points[0][1]) < 10 ): line = QGraphicsLineItem( QLineF( self.view._start, QPoint(self.view.points[0][0], self.view.points[0][1]), ) ) line.setPen( QPen( designColor, penWidth, Qt.SolidLine, Qt.RoundCap, Qt.RoundJoin, ) ) self.view.scene().addItem(line) self.view.elList.append(line) self.statusBar().showMessage("Area completed") # create polygon newPolygon = QPolygonF() for p in self.view.points: newPolygon.append(QPoint(p[0], p[1])) # draw polygon a red polygon self.closedPolygon = QGraphicsPolygonItem(newPolygon) self.closedPolygon.setPen( QPen(designColor, penWidth, penStyle, Qt.RoundCap, Qt.RoundJoin) ) self.closedPolygon.setBrush(self.areaColor) self.view.scene().addItem(self.closedPolygon) return self.view.points.append((int(end.x()), int(end.y()))) line = QGraphicsLineItem(QLineF(self.view._start, end)) line.setPen( QPen(designColor, 2, Qt.SolidLine, Qt.RoundCap, Qt.RoundJoin) ) self.view.scene().addItem(line) self.view.elList.append(line) self.view._start = test else: # first point self.view._start = test ellipse = QGraphicsEllipseItem( self.view._start.x(), self.view._start.y(), 3, 3 ) ellipse.setPen( QPen(designColor, 1, Qt.SolidLine, Qt.RoundCap, Qt.RoundJoin) ) brush = QBrush() brush.setStyle(Qt.SolidPattern) brush.setColor(designColor) ellipse.setBrush(brush) self.view.scene().addItem(ellipse) self.view.elList.append(ellipse) self.view.points.append((self.view._start.x(), self.view._start.y())) def mapName_clicked(self): """ change map name """ text, ok = QInputDialog.getText( self, "Modifiers map name", "Enter a name for the modifiers map", QLineEdit.Normal, self.mapName, ) if ok: self.mapName = text self.setWindowTitle( "{} - Modifiers map creator tool - {}".format( cfg.programName, self.mapName ) ) def newMap(self): """ create a new map """ if self.flagMapChanged: response = dialog.MessageDialog( cfg.programName + " - Modifiers map creator", "What to do about the current unsaved coding map?", ["Save", "Discard", "Cancel"], ) if response == "Save": if not self.saveMap_clicked(): return if response == "Cancel": return self.cancelMap() text, ok = QInputDialog.getText( self, "Map name", "Enter a name for the new map" ) if ok: self.mapName = text else: return if self.mapName == "": QMessageBox.critical(self, "", "You must define a name for the new map") return if self.mapName in ["areas", "bitmap"]: QMessageBox.critical(self, "", "This name is not allowed") return self.setWindowTitle(cfg.programName + " - Map creator tool - " + self.mapName) self.btLoad.setVisible(True) """self.btCancelMap.setVisible(True)""" self.statusBar().showMessage( 'Click "Load bitmap" button to select and load a bitmap into the viewer' ) def openMap(self): """ load bitmap from data show it in view scene """ if self.flagMapChanged: response = dialog.MessageDialog( cfg.programName + " - Map creator", "What to do about the current unsaved coding map?", ["Save", "Discard", "Cancel"], ) if response == "Save": if not self.saveMap_clicked(): return if response == "Cancel": return fn = QFileDialog().getOpenFileName( self, "Open a coding map", "", "BORIS coding map (*.boris_map);;All files (*)", ) fileName = fn[0] if type(fn) is tuple else fn if fileName: try: self.codingMap = json.loads(open(fileName, "r").read()) except Exception: QMessageBox.critical( self, cfg.programName, "The file {} seems not a behaviors coding map...".format(fileName), ) return self.cancelMap() self.mapName = self.codingMap["name"] self.setWindowTitle( cfg.programName + " - Map creator tool - " + self.mapName ) self.bitmapFileName = True self.fileName = fileName self.areasList = self.codingMap["areas"] # dictionary of dictionaries bitmapContent = binascii.a2b_base64(self.codingMap["bitmap"]) self.pixmap.loadFromData(bitmapContent) self.btDeleteArea.setEnabled(False) self.view.setSceneRect( 0, 0, self.pixmap.size().width(), self.pixmap.size().height() ) pixItem = QGraphicsPixmapItem(self.pixmap) pixItem.setPos(0, 0) self.view.scene().addItem(pixItem) for areaCode in self.areasList: points = self.areasList[areaCode]["geometry"] newPolygon = QPolygonF() for p in points: newPolygon.append(QPoint(p[0], p[1])) clr = QColor() clr.setRgba(self.areasList[areaCode]["color"]) # draw polygon polygon = QGraphicsPolygonItem() polygon.setPolygon(newPolygon) polygon.setPen(QPen(clr, penWidth, penStyle, Qt.RoundCap, Qt.RoundJoin)) polygon.setBrush(QBrush(clr, Qt.SolidPattern)) self.view.scene().addItem(polygon) self.polygonsList2[areaCode] = polygon self.btNewArea.setVisible(True) self.btLoad.setVisible(False) self.saveMapAction.setEnabled(True) self.saveAsMapAction.setEnabled(True) self.mapNameAction.setEnabled(True) self.statusBar().showMessage('Click "New area" to create a new area') else: self.statusBar().showMessage("No file", 5000) def saveMap(self): if self.fileName: # create dict with map name key mapDict = {"name": self.mapName} # add areas mapDict["areas"] = self.areasList import io # Save QPixmap to QByteArray via QBuffer. byte_array = QByteArray() buffer = QBuffer(byte_array) buffer.open(QIODevice.WriteOnly) self.pixmap.save(buffer, "PNG") string_io = io.BytesIO(byte_array) string_io.seek(0) # add bitmap mapDict["bitmap"] = binascii.b2a_base64(string_io.read()).decode("utf-8") with open(self.fileName, "w") as outfile: outfile.write(json.dumps(mapDict)) self.flagMapChanged = False return True else: return False def saveAsMap_clicked(self): filters = "Modifiers map (*.boris_map);;All files (*)" fn = QFileDialog(self).getSaveFileName( self, "Save modifiers map as", "", filters ) if type(fn) is tuple: self.fileName, _ = fn else: self.fileName = fn if self.fileName: if os.path.splitext(self.fileName)[1] != ".boris_map": self.fileName += ".boris_map" self.saveMap() def saveMap_clicked(self): if not self.fileName: fn = QFileDialog(self).getSaveFileName( self, "Save modifiers map", self.mapName + ".boris_map", "BORIS MAP (*.boris_map);;All files (*)", ) if type(fn) is tuple: self.fileName, _ = fn else: self.fileName = fn if self.fileName and os.path.splitext(self.fileName)[1] != ".boris_map": self.fileName += ".boris_map" if self.fileName: return self.saveMap() return False def newArea(self): if not self.bitmapFileName: QMessageBox.critical( self, cfg.programName, "A bitmap must be loaded before to define areas" ) return if self.selectedPolygon: self.selectedPolygon.setPen( QPen(designColor, penWidth, penStyle, Qt.RoundCap, Qt.RoundJoin) ) self.selectedPolygon = None self.flagNewArea = True self.btSaveArea.setVisible(True) self.btCancelAreaCreation.setVisible(True) self.btNewArea.setVisible(False) self.lb.setVisible(True) self.leAreaCode.clear() self.leAreaCode.setVisible(True) self.btColor.setVisible(True) self.slAlpha.setVisible(True) self.btDeleteArea.setVisible(False) self.statusBar().showMessage( "Select the vertices of the area for this modifier with the mouse (right click will cancel the last point)" ) def saveArea(self): if not self.closedPolygon: QMessageBox.critical( self, cfg.programName, "You must close your area before saving it.\nThe last vertex must correspond to the first one.", ) if len(self.view.points) < 3: QMessageBox.critical(self, cfg.programName, "You must define a closed area") return # check if no area code if not self.leAreaCode.text(): QMessageBox.critical( self, cfg.programName, "You must define a code for the new modifier" ) return # check if not allowed character for c in "|,()": if c in self.leAreaCode.text(): QMessageBox.critical( self, cfg.programName, "The modifier contains a character that is not allowed <b>()|,</b>.", ) return # check if area code already used if self.leAreaCode.text() in self.areasList: QMessageBox.critical( self, cfg.programName, "The modifier is already in use" ) return # create polygon newPolygon = QPolygon() for p in self.view.points: newPolygon.append(QPoint(p[0], p[1])) self.areasList[self.leAreaCode.text()] = { "geometry": self.view.points, "color": self.areaColor.rgba(), } # remove all lines for line in self.view.elList: self.view.scene().removeItem(line) # draw polygon self.closedPolygon.setBrush(QBrush(self.areaColor, Qt.SolidPattern)) self.polygonsList2[self.leAreaCode.text()] = self.closedPolygon self.closedPolygon = None self.view._start = 0 self.view.points = [] self.view.elList = [] self.flagNewArea = False self.closedPolygon = None self.btSaveArea.setVisible(False) self.btCancelAreaCreation.setVisible(False) self.lb.setVisible(False) self.leAreaCode.setVisible(False) self.btColor.setVisible(False) self.slAlpha.setVisible(False) self.btDeleteArea.setVisible(True) self.btNewArea.setVisible(True) self.leAreaCode.setText("") self.flagMapChanged = True self.statusBar().showMessage("New modifier saved", 5000) def cancelAreaCreation(self): if self.closedPolygon: self.view.scene().removeItem(self.closedPolygon) self.closedPolygon = None # remove all lines for line in self.view.elList: self.view.scene().removeItem(line) self.view.elList = [] self.view._start = 0 self.view.points = [] self.flagNewArea = False self.btCancelAreaCreation.setVisible(False) self.btDeleteArea.setVisible(True) self.btSaveArea.setVisible(False) self.lb.setVisible(False) self.btColor.setVisible(False) self.slAlpha.setVisible(False) self.btNewArea.setVisible(True) self.leAreaCode.setVisible(False) self.leAreaCode.setText("") def deleteArea(self): """ remove selected area from map """ if self.selectedPolygon: self.view.scene().removeItem(self.selectedPolygon) self.view.scene().removeItem( self.polygonsList2[self.selectedPolygonAreaCode] ) del self.polygonsList2[self.selectedPolygonAreaCode] del self.areasList[self.selectedPolygonAreaCode] self.flagMapChanged = True self.view.elList = [] self.view._start = 0 self.view.points = [] self.flagNewArea = False self.btSaveArea.setVisible(False) self.lb.setVisible(False) self.btColor.setVisible(False) self.slAlpha.setVisible(False) self.btNewArea.setVisible(True) self.leAreaCode.setVisible(False) self.leAreaCode.setText("") def cancelMap(self): """ remove current map """ self.flagNewArea = False self.areasList = {} self.polygonsList2 = {} self.view.scene().clear() self.btLoad.setVisible(False) self.btDeleteArea.setVisible(False) self.btNewArea.setVisible(False) self.saveMapAction.setEnabled(False) self.saveAsMapAction.setEnabled(False) self.mapNameAction.setEnabled(False) self.statusBar().showMessage("") self.flagMapChanged = False def loadBitmap(self): """ load bitmap as background for coding map resize bitmap to 512 px if bigger """ maxSize = 512 fn = QFileDialog().getOpenFileName( self, "Load bitmap", "", "bitmap files (*.png *.jpg);;All files (*)" ) fileName = fn[0] if type(fn) is tuple else fn if fileName: self.bitmapFileName = fileName self.pixmap.load(self.bitmapFileName) if ( self.pixmap.size().width() > maxSize or self.pixmap.size().height() > maxSize ): self.pixmap = self.pixmap.scaled(maxSize, maxSize, Qt.KeepAspectRatio) QMessageBox.information( self, cfg.programName, "The bitmap was resized to %d x %d pixels\nThe original file was not modified" % (self.pixmap.size().width(), self.pixmap.size().height()), ) # scale image # pixmap = pixmap.scaled (256, 256, Qt.KeepAspectRatio) self.view.setSceneRect( 0, 0, self.pixmap.size().width(), self.pixmap.size().height() ) pixitem = QGraphicsPixmapItem(self.pixmap) pixitem.setPos(0, 0) self.view.scene().addItem(pixitem) self.btNewArea.setVisible(True) self.btLoad.setVisible(False) self.saveMapAction.setEnabled(True) self.saveAsMapAction.setEnabled(True) self.mapNameAction.setEnabled(True) self.statusBar().showMessage( """Click "New modifier" to create a new modifier""" ) self.flagMapChanged = True if __name__ == "__main__": import sys app = QApplication(sys.argv) window = ModifiersMapCreatorWindow() window.resize(640, 640) window.show() sys.exit(app.exec_())
PypiClean
/django-cruds-adminlte-0.0.16.tar.gz/django-cruds-adminlte-0.0.16/cruds_adminlte/static/ckeditor/plugins/tabletools/dialogs/tableCell.js
/* Copyright (c) 2003-2016, CKSource - Frederico Knabben. All rights reserved. For licensing, see LICENSE.md or http://ckeditor.com/license */ CKEDITOR.dialog.add("cellProperties",function(g){function d(a){return function(b){for(var c=a(b[0]),d=1;d<b.length;d++)if(a(b[d])!==c){c=null;break}"undefined"!=typeof c&&(this.setValue(c),CKEDITOR.env.gecko&&"select"==this.type&&!c&&(this.getInputElement().$.selectedIndex=-1))}}function l(a){if(a=n.exec(a.getStyle("width")||a.getAttribute("width")))return a[2]}var h=g.lang.table,c=h.cell,e=g.lang.common,k=CKEDITOR.dialog.validate,n=/^(\d+(?:\.\d+)?)(px|%)$/,f={type:"html",html:"\x26nbsp;"},p="rtl"== g.lang.dir,m=g.plugins.colordialog;return{title:c.title,minWidth:CKEDITOR.env.ie&&CKEDITOR.env.quirks?450:410,minHeight:CKEDITOR.env.ie&&(CKEDITOR.env.ie7Compat||CKEDITOR.env.quirks)?230:220,contents:[{id:"info",label:c.title,accessKey:"I",elements:[{type:"hbox",widths:["40%","5%","40%"],children:[{type:"vbox",padding:0,children:[{type:"hbox",widths:["70%","30%"],children:[{type:"text",id:"width",width:"100px",label:e.width,validate:k.number(c.invalidWidth),onLoad:function(){var a=this.getDialog().getContentElement("info", "widthType").getElement(),b=this.getInputElement(),c=b.getAttribute("aria-labelledby");b.setAttribute("aria-labelledby",[c,a.$.id].join(" "))},setup:d(function(a){var b=parseInt(a.getAttribute("width"),10);a=parseInt(a.getStyle("width"),10);return isNaN(a)?isNaN(b)?"":b:a}),commit:function(a){var b=parseInt(this.getValue(),10),c=this.getDialog().getValueOf("info","widthType")||l(a);isNaN(b)?a.removeStyle("width"):a.setStyle("width",b+c);a.removeAttribute("width")},"default":""},{type:"select",id:"widthType", label:g.lang.table.widthUnit,labelStyle:"visibility:hidden","default":"px",items:[[h.widthPx,"px"],[h.widthPc,"%"]],setup:d(l)}]},{type:"hbox",widths:["70%","30%"],children:[{type:"text",id:"height",label:e.height,width:"100px","default":"",validate:k.number(c.invalidHeight),onLoad:function(){var a=this.getDialog().getContentElement("info","htmlHeightType").getElement(),b=this.getInputElement(),c=b.getAttribute("aria-labelledby");b.setAttribute("aria-labelledby",[c,a.$.id].join(" "))},setup:d(function(a){var b= parseInt(a.getAttribute("height"),10);a=parseInt(a.getStyle("height"),10);return isNaN(a)?isNaN(b)?"":b:a}),commit:function(a){var b=parseInt(this.getValue(),10);isNaN(b)?a.removeStyle("height"):a.setStyle("height",CKEDITOR.tools.cssLength(b));a.removeAttribute("height")}},{id:"htmlHeightType",type:"html",html:"\x3cbr /\x3e"+h.widthPx}]},f,{type:"select",id:"wordWrap",label:c.wordWrap,"default":"yes",items:[[c.yes,"yes"],[c.no,"no"]],setup:d(function(a){var b=a.getAttribute("noWrap");if("nowrap"== a.getStyle("white-space")||b)return"no"}),commit:function(a){"no"==this.getValue()?a.setStyle("white-space","nowrap"):a.removeStyle("white-space");a.removeAttribute("noWrap")}},f,{type:"select",id:"hAlign",label:c.hAlign,"default":"",items:[[e.notSet,""],[e.alignLeft,"left"],[e.alignCenter,"center"],[e.alignRight,"right"],[e.alignJustify,"justify"]],setup:d(function(a){var b=a.getAttribute("align");return a.getStyle("text-align")||b||""}),commit:function(a){var b=this.getValue();b?a.setStyle("text-align", b):a.removeStyle("text-align");a.removeAttribute("align")}},{type:"select",id:"vAlign",label:c.vAlign,"default":"",items:[[e.notSet,""],[e.alignTop,"top"],[e.alignMiddle,"middle"],[e.alignBottom,"bottom"],[c.alignBaseline,"baseline"]],setup:d(function(a){var b=a.getAttribute("vAlign");a=a.getStyle("vertical-align");switch(a){case "top":case "middle":case "bottom":case "baseline":break;default:a=""}return a||b||""}),commit:function(a){var b=this.getValue();b?a.setStyle("vertical-align",b):a.removeStyle("vertical-align"); a.removeAttribute("vAlign")}}]},f,{type:"vbox",padding:0,children:[{type:"select",id:"cellType",label:c.cellType,"default":"td",items:[[c.data,"td"],[c.header,"th"]],setup:d(function(a){return a.getName()}),commit:function(a){a.renameNode(this.getValue())}},f,{type:"text",id:"rowSpan",label:c.rowSpan,"default":"",validate:k.integer(c.invalidRowSpan),setup:d(function(a){if((a=parseInt(a.getAttribute("rowSpan"),10))&&1!=a)return a}),commit:function(a){var b=parseInt(this.getValue(),10);b&&1!=b?a.setAttribute("rowSpan", this.getValue()):a.removeAttribute("rowSpan")}},{type:"text",id:"colSpan",label:c.colSpan,"default":"",validate:k.integer(c.invalidColSpan),setup:d(function(a){if((a=parseInt(a.getAttribute("colSpan"),10))&&1!=a)return a}),commit:function(a){var b=parseInt(this.getValue(),10);b&&1!=b?a.setAttribute("colSpan",this.getValue()):a.removeAttribute("colSpan")}},f,{type:"hbox",padding:0,widths:["60%","40%"],children:[{type:"text",id:"bgColor",label:c.bgColor,"default":"",setup:d(function(a){var b=a.getAttribute("bgColor"); return a.getStyle("background-color")||b}),commit:function(a){this.getValue()?a.setStyle("background-color",this.getValue()):a.removeStyle("background-color");a.removeAttribute("bgColor")}},m?{type:"button",id:"bgColorChoose","class":"colorChooser",label:c.chooseColor,onLoad:function(){this.getElement().getParent().setStyle("vertical-align","bottom")},onClick:function(){g.getColorFromDialog(function(a){a&&this.getDialog().getContentElement("info","bgColor").setValue(a);this.focus()},this)}}:f]},f, {type:"hbox",padding:0,widths:["60%","40%"],children:[{type:"text",id:"borderColor",label:c.borderColor,"default":"",setup:d(function(a){var b=a.getAttribute("borderColor");return a.getStyle("border-color")||b}),commit:function(a){this.getValue()?a.setStyle("border-color",this.getValue()):a.removeStyle("border-color");a.removeAttribute("borderColor")}},m?{type:"button",id:"borderColorChoose","class":"colorChooser",label:c.chooseColor,style:(p?"margin-right":"margin-left")+": 10px",onLoad:function(){this.getElement().getParent().setStyle("vertical-align", "bottom")},onClick:function(){g.getColorFromDialog(function(a){a&&this.getDialog().getContentElement("info","borderColor").setValue(a);this.focus()},this)}}:f]}]}]}]}],onShow:function(){this.cells=CKEDITOR.plugins.tabletools.getSelectedCells(this._.editor.getSelection());this.setupContent(this.cells)},onOk:function(){for(var a=this._.editor.getSelection(),b=a.createBookmarks(),c=this.cells,d=0;d<c.length;d++)this.commitContent(c[d]);this._.editor.forceNextSelectionCheck();a.selectBookmarks(b);this._.editor.selectionChange()}, onLoad:function(){var a={};this.foreach(function(b){b.setup&&b.commit&&(b.setup=CKEDITOR.tools.override(b.setup,function(c){return function(){c.apply(this,arguments);a[b.id]=b.getValue()}}),b.commit=CKEDITOR.tools.override(b.commit,function(c){return function(){a[b.id]!==b.getValue()&&c.apply(this,arguments)}}))})}}});
PypiClean
/nh_currency-1.0.1-py3-none-any.whl/currency/currency.py
import time import requests from currency import cache from currency.data import _currencies, _suffix, _no_space from currency.exceptions import CurrencyException ccache = None def get_cache(): """ return empty dict if cache not exists """ global ccache ccache = cache.read() if ccache is None: ccache = {} def validate_currency(*currencies): """ some validation checks before doing anything """ validated_currency = [] if not currencies: raise CurrencyException('My function need something to run, duh') for currency in currencies: currency = currency.upper() if not isinstance(currency, str): raise TypeError('Currency code should be a string: ' + repr(currency)) if currency not in _currencies: raise CurrencyException('Currency code not found: ' + repr(currency)) validated_currency.append(currency) return validated_currency[0] if len(validated_currency) == 1 else validated_currency def validate_price(price): """ validation checks for price argument """ if isinstance(price, str): try: price = int(price) except ValueError: # fallback if convert to int failed price = float(price) if not isinstance(price, (int, float)): raise TypeError('Price should be a number: ' + repr(price)) return price def info(currency): """ return all info about currency """ currency = validate_currency(currency) return _currencies[currency] def code(currency): """ return symbol of currency """ currency = validate_currency(currency) return _currencies[currency]['code'] def name(currency, *, plural=False): """ return name of currency """ currency = validate_currency(currency) if plural: return _currencies[currency]['name_plural'] return _currencies[currency]['name'] def symbol(currency, *, native=True): """ return symbol of currency """ currency = validate_currency(currency) if native: return _currencies[currency]['symbol_native'] return _currencies[currency]['symbol'] def decimals(currency): """ return maximum decimal digits of currency """ currency = validate_currency(currency) return _currencies[currency]['decimal_digits'] def roundto(currency): """ return currency increment used for rounding """ currency = validate_currency(currency) return _currencies[currency]['rounding'] def rounding(price, currency): """ rounding currency value based on its max decimal digits """ currency = validate_currency(currency) price = validate_price(price) if decimals(currency) == 0: return round(int(price), decimals(currency)) return round(price, decimals(currency)) def issuffix(currency): """ check if currency symbol is after price number unlike the majority """ return currency in _suffix def nospace(currency): """ check if currency do not need a space between symbol and price number """ return currency in _no_space def pretty(price, currency, *, abbrev=True, trim=True): """ return format price with symbol. Example format(100, 'USD') return '$100' pretty(price, currency, abbrev=True, trim=False) abbrev: True: print value + symbol. Symbol can either be placed before or after value False: print value + currency code. currency code is placed behind value trim: True: trim float value to the maximum digit numbers of that currency False: keep number of decimal in initial argument """ currency = validate_currency(currency) price = validate_price(price) space = '' if nospace(currency) else ' ' fmtstr = '' if trim: fmtstr = '{:0,.{x}f}'.format(price, x=decimals(currency)).rstrip('0').rstrip('.') else: fmtstr = '{:0,}'.format(price).rstrip('0').rstrip('.') if abbrev: # use currency symbol if issuffix(currency): return fmtstr + space + symbol(currency) return symbol(currency, native=False) + space + fmtstr return fmtstr + ' ' + code(currency) # use currency code def check_update(from_currency, to_currency): """ check if last update is over 30 mins ago. if so return True to update, else False """ if from_currency not in ccache: # if currency never get converted before ccache[from_currency] = {} if ccache[from_currency].get(to_currency) is None: ccache[from_currency][to_currency] = {'last_update': 0} last_update = float(ccache[from_currency][to_currency]['last_update']) if time.time() - last_update >= 30 * 60: # if last update is more than 30 min ago return True return False def update_cache(from_currency, to_currency): """ update from_currency to_currency pair in cache if last update for that pair is over 30 minutes ago by request API info """ if check_update(from_currency, to_currency) is True: ccache[from_currency][to_currency]['value'] = convert_using_api(from_currency, to_currency) ccache[from_currency][to_currency]['last_update'] = time.time() cache.write(ccache) def convert_using_api(from_currency, to_currency): """ convert from from_currency to to_currency by requesting API """ convert_str = from_currency + '_' + to_currency options = {'compact': 'ultra', 'q': convert_str} api_url = 'https://free.currencyconverterapi.com/api/v5/convert' result = requests.get(api_url, params=options).json() return result[convert_str] def convert(from_currency, to_currency, from_currency_price=1): """ convert from from_currency to to_currency using cached info """ get_cache() from_currency, to_currency = validate_currency(from_currency, to_currency) update_cache(from_currency, to_currency) return ccache[from_currency][to_currency]['value'] * from_currency_price # TODO: # upload to pip # vim: nofoldenable
PypiClean
/matlab_wrapper-1.tar.gz/matlab_wrapper-1/CONTRIBUTING.rst
Contributing Guidelines ======================= Reporting Issues ---------------- - Make sure that the issue is not addressed in the FAQ.rst_ file. - Provide information about your setup: - Python (version, architecture, distribution) - MATLAB (version, architecture) - OS (version, architecture) - Describe how to reproduce the issue and copy-paste the error messages in the report. - At best, provide a small code snippet, that can run by itself and illustrates the problem. - Use the `issue tracker`_. .. _FAQ.rst: FAQ.rst .. _issue tracker: https://github.com/mrkrd/matlab_wrapper/issues Contributing new Code --------------------- - Source code is located at: https://github.com/mrkrd/matlab_wrapper - Your patches or pull requests are very welcome! - Good place to start is the TODO.org_ file (best viewd in Emacs org-mode). At the moment, all new features and ideas go through this file. The status of the item can be: DONE (already implemented), TODO (some work has been started) or no status (not started). The items often contain some intended implementation details and remarks. You can also request more details through our issue tracker. - Coding style is mostly PEP8 compliant and The Zen of Python is your friend:: >>> import this - Module installation in the developer mode can be useful:: python setup.py develop --user - Each new feature should have a test case in the tests directory. Make sure that tests are passing using py.test_. - Add your name to AUTHORS.rst_ file. .. _TODO.org: TODO.org .. _py.test: http://pytest.org .. _AUTHORS.rst: AUTHORS.rst
PypiClean
/kaze_python-0.8.9-py3-none-any.whl/kaze/VM/ScriptBuilder.py
import struct import binascii from kaze.VM.OpCode import PUSHDATA1, PUSHDATA2, PUSHDATA4, PUSHF, PUSHT, PACK, PUSH0, PUSH1, PUSHM1, PUSHBYTES75, \ APPCALL, TAILCALL, SYSCALL from kaze.IO.MemoryStream import MemoryStream from kazecore.BigInteger import BigInteger class ScriptBuilder: """docstring for ScriptBuilder""" def __init__(self): super(ScriptBuilder, self).__init__() self.ms = MemoryStream() # MemoryStream def WriteUInt16(self, value, endian="<"): return self.pack('%sH' % endian, value) def WriteUInt32(self, value, endian="<"): return self.pack('%sI' % endian, value) def WriteUInt64(self, value, endian="<"): return self.pack('%sQ' % endian, value) def WriteVarInt(self, value, endian="<"): if not isinstance(value, int): raise TypeError('%s not int type.' % value) if value < 0: raise Exception('%d too small.' % value) elif value < 0xfd: return self.WriteByte(value) elif value <= 0xffff: self.WriteByte(0xfd) return self.WriteUInt16(value, endian) elif value <= 0xFFFFFFFF: self.WriteByte(0xfe) return self.WriteUInt32(value, endian) else: self.WriteByte(0xff) return self.WriteUInt64(value, endian) def WriteVarBytes(self, value, endian="<", unhexlify=True): length = len(value) self.WriteVarInt(length, endian) return self.WriteBytes(value) def WriteByte(self, value): if type(value) is bytes: self.ms.write(value) elif type(value) is str: self.ms.write(value.encode('utf-8')) elif type(value) is int: self.ms.write(bytes([value])) def WriteBytes(self, value): try: value = binascii.unhexlify(value) except TypeError: pass except binascii.Error: pass self.ms.write(value) def WriteBool(self, value, endian="<"): if value: self.add(PUSHT) else: self.add(PUSHF) def pack(self, fmt, data): return self.WriteBytes(struct.pack(fmt, data)) def add(self, op): if isinstance(op, int): self.ms.write(bytes([op])) else: self.ms.write(op) return def push(self, data): if data is None: return if type(data) is bool: return self.add(data) if type(data) is int or type(data) is BigInteger: if data == -1: return self.add(PUSHM1) elif data == 0: return self.add(PUSH0) elif data > 0 and data <= 16: return self.add(int.from_bytes(PUSH1, 'little') - 1 + data) else: return self.push(binascii.hexlify(data.ToByteArray())) else: if not type(data) == bytearray: try: buf = binascii.unhexlify(data) except binascii.Error: buf = data else: buf = bytes(data) if len(buf) <= int.from_bytes(PUSHBYTES75, 'big'): self.add(len(buf)) self.add(buf) elif len(buf) < 0x100: self.add(PUSHDATA1) self.add(len(buf)) self.add(buf) elif len(buf) < 0x10000: self.add(PUSHDATA2) self.add(len(buf) & 0xff) self.add(len(buf) >> 8) self.add(buf) elif len(buf) < 0x100000000: self.add(PUSHDATA4) self.add(len(buf) & 0xff) self.add((len(buf) >> 8) & 0xff) self.add((len(buf) >> 16) & 0xff) self.add(len(buf) >> 24) self.add(buf) return def WriteVarData(self, data): length = len(data) if length <= 75: self.WriteByte(length) elif length < 0x100: self.ms.write(PUSHDATA1) self.WriteByte(length) elif length < 0x1000: self.ms.write(PUSHDATA2) self.WriteBytes(length.to_bytes(2, 'little')) elif length < 0x10000: self.ms.write(PUSHDATA4) self.WriteBytes(length.to_bytes(4, 'little')) self.WriteBytes(data) def Emit(self, op, arg=None): self.ms.write(op) if arg is not None: self.ms.write(arg) def EmitPushBigInteger(self, number): if number == -1: return self.Emit(PUSHM1) if number == 0: return self.Emit(PUSH0) if number > 0 and number <= 16: return self.Emit(int.from_bytes(PUSH1, 'little') - 1 + number) return self.Emit(number) def EmitAppCall(self, scriptHash, useTailCall=False): if len(scriptHash) != 20: raise Exception("Invalid script") if useTailCall: return self.Emit(TAILCALL, scriptHash) return self.Emit(APPCALL, scriptHash) def EmitAppCallWithOperationAndData(self, script_hash, operation, data): self.push(data) self.push(operation.encode('utf-8').hex()) self.Emit(APPCALL, script_hash.Data) def EmitAppCallWithOperationAndArgs(self, script_hash, operation, args): args.reverse() for i in args: self.push(i) self.push(len(args)) self.Emit(PACK) self.push(operation.encode('utf-8').hex()) self.Emit(APPCALL, script_hash.Data) def EmitAppCallWithOperation(self, script_hash, operation): self.push(False) self.push(operation.encode('utf-8').hex()) self.Emit(APPCALL, script_hash.Data) def EmitAppCallWithJsonArgs(self, script_hash, args): args.reverse() for a in args: if isinstance(a.Value, list): a.Value.reverse() for item in a.Value: self.push(item.ToVM()) self.push(len(a.Value)) self.Emit(PACK) else: self.push(a.ToVM()) self.Emit(APPCALL, script_hash.Data) def EmitSysCall(self, api): if api is None: raise Exception("Please specify an api") api_bytes = bytearray(api.encode('utf-8')) length = len(api_bytes) length_bytes = bytearray(length.to_bytes(1, 'little')) out = length_bytes + api_bytes return self.Emit(SYSCALL, out) def EmitSysCallWithArguments(self, api, args): args.reverse() for argument in args: if type(argument) is bool: self.WriteBool(argument) elif type(argument) is bytes and len(argument) == 1: self.WriteByte(argument) else: self.push(binascii.hexlify(argument)) self.EmitSysCall(api) def ToArray(self, cleanup=True): retval = self.ms.ToArray() if cleanup: self.ms.Cleanup() self.ms = None return retval
PypiClean
/alchemite_apiclient-0.61.0-py3-none-any.whl/alchemite_apiclient/model/get_suggest_historic_done_all_of.py
import re # noqa: F401 import sys # noqa: F401 from alchemite_apiclient.model_utils import ( # noqa: F401 ApiTypeError, ModelComposed, ModelNormal, ModelSimple, cached_property, change_keys_js_to_python, convert_js_args_to_python_args, date, datetime, file_type, none_type, validate_get_composed_info, OpenApiModel ) from alchemite_apiclient.exceptions import ApiAttributeError def lazy_import(): from alchemite_apiclient.model.suggest_historic_result import SuggestHistoricResult globals()['SuggestHistoricResult'] = SuggestHistoricResult class GetSuggestHistoricDoneAllOf(ModelNormal): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. Attributes: allowed_values (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict with a capitalized key describing the allowed value and an allowed value. These dicts store the allowed enum values. attribute_map (dict): The key is attribute name and the value is json key in definition. discriminator_value_class_map (dict): A dict to go from the discriminator variable value to the discriminator class name. validations (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict that stores validations for max_length, min_length, max_items, min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, inclusive_minimum, and regex. additional_properties_type (tuple): A tuple of classes accepted as additional properties values. """ allowed_values = { ('status',): { 'DONE': "done", }, } validations = { } @cached_property def additional_properties_type(): # noqa """ This must be a method because a model may have properties that are of type self, this must run after the class is loaded """ lazy_import() return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501 _nullable = False @cached_property def openapi_types(): # noqa """ This must be a method because a model may have properties that are of type self, this must run after the class is loaded Returns openapi_types (dict): The key is attribute name and the value is attribute type. """ lazy_import() return { 'status': (str,), # noqa: E501 'result': (SuggestHistoricResult,), # noqa: E501 } @cached_property def discriminator(): # noqa return None attribute_map = { 'status': 'status', # noqa: E501 'result': 'result', # noqa: E501 } read_only_vars = { } _composed_schemas = {} @classmethod @convert_js_args_to_python_args def _from_openapi_data(cls, result, *args, **kwargs): # noqa: E501 """GetSuggestHistoricDoneAllOf - a model defined in OpenAPI Args: result (SuggestHistoricResult): Keyword Args: status (str): defaults to "done", must be one of ["done", ] # noqa: E501 _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) """ status = kwargs.get('status', "done") _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) self = super(OpenApiModel, cls).__new__(cls) if args: raise ApiTypeError( "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( args, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) self.status = status self.result = result for var_name, var_value in kwargs.items(): if var_name not in self.attribute_map and \ self._configuration is not None and \ self._configuration.discard_unknown_keys and \ self.additional_properties_type is None: # discard variable. continue setattr(self, var_name, var_value) return self required_properties = set([ '_data_store', '_check_type', '_spec_property_naming', '_path_to_item', '_configuration', '_visited_composed_classes', ]) @convert_js_args_to_python_args def __init__(self, result, *args, **kwargs): # noqa: E501 """GetSuggestHistoricDoneAllOf - a model defined in OpenAPI Args: result (SuggestHistoricResult): Keyword Args: status (str): defaults to "done", must be one of ["done", ] # noqa: E501 _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) """ status = kwargs.get('status', "done") _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) if args: raise ApiTypeError( "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( args, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) self.status = status self.result = result for var_name, var_value in kwargs.items(): if var_name not in self.attribute_map and \ self._configuration is not None and \ self._configuration.discard_unknown_keys and \ self.additional_properties_type is None: # discard variable. continue setattr(self, var_name, var_value) if var_name in self.read_only_vars: raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate " f"class with read only attributes.")
PypiClean
/collective.css3buttons-0.1.tar.gz/collective.css3buttons-0.1/README.md
# CSS3 Buttons for Plone # This package brings in the functionality of [CSS3 buttons ](http://css3buttons.michaelhenriksen.dk) into [Zope](http://zope.org) / [Plone](http://plone.org/). ## Buildout Installation ## Add the egg and zcml references to your buildout.cfg: [buildout] ... eggs = ... collective.css3buttons ... [instance] ... zcml = ... collective.css3buttons ... ## Usage Instructions ## ### Enable the stylesheet ### If you are using this directly (i.e. within Zope and/or without Plone), you will need to include the css resource file like so: <style type="text/css"> @import url(/++resource++collective.css3buttons.resources/stylesheets/css3buttons.css); </style> If this is used under Plone, just activate the product `CSS3 Buttons` under Site Setup > Add-ons. ### Add the classes ### Just insert the HTML fragments as defined in the [documentation ](http://css3buttons.michaelhenriksen.dk) for CSS3 Buttons into your template files. ## License ## ### The [Unlicense](http://unlicense.org): ### This is free and unencumbered software released into the public domain. Anyone is free to copy, modify, publish, use, compile, sell, or distribute this software, either in source code form or as a compiled binary, for any purpose, commercial or non-commercial, and by any means. In jurisdictions that recognize copyright laws, the author or authors of this software dedicate any and all copyright interest in the software to the public domain. We make this dedication for the benefit of the public at large and to the detriment of our heirs and successors. We intend this dedication to be an overt act of relinquishment in perpetuity of all present and future rights to this software under copyright law. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. For more information, please refer to <http://unlicense.org/>
PypiClean
/django-legal-1.0.tar.gz/django-legal-1.0/legal/models.py
from django.conf import settings from django.db import models from legal.exceptions import NoVersionException auth_user_model = getattr(settings, 'AUTH_USER_MODEL', 'auth.User') class Agreement(models.Model): name = models.CharField(max_length=50, unique=True, db_index=True) def __str__(self): return self.name @property def versions(self): """ Returns all versions of this agreement. """ return AgreementVersion.objects.filter(agreement=self) @property def current_version(self): """ Returns the latest AgreementVersion for this Agreement. If none exists, raises NoVersionException. """ versions = self.versions if versions: return versions[0] raise NoVersionException def user_accepted(self, user): current = self.current_version if not current: return False return AgreementAcceptance.objects.filter(user=user, agreement_version=current).exists() def accept(self, user): acceptance = AgreementAcceptance(agreement_version=self.current_version, user=user) acceptance.save() @property def date(self): return self.current_version.date @property def content(self): return self.current_version.content class AgreementVersion(models.Model): agreement = models.ForeignKey(Agreement, db_index=True) date = models.DateTimeField() content = models.TextField() def __str__(self): return '%s published on %s' % (self.agreement.name, self.date) class Meta: unique_together = ('agreement', 'date') ordering = ["-date"] class AgreementAcceptance(models.Model): user = models.ForeignKey(auth_user_model) agreement_version = models.ForeignKey(AgreementVersion) date = models.DateTimeField(auto_now=True) def __str__(self): return '%s accepted %s (%s) on %s' % ( self.user, self.agreement_version.agreement.name, self.agreement_version.date, self.date) class Meta: unique_together = ('user', 'agreement_version', 'date') ordering = ["-date"]
PypiClean
/nni-3.0rc1-py3-none-macosx_10_9_x86_64.whl/nni_node/node_modules/moment/src/locale/el.js
import moment from '../moment'; function isFunction(input) { return ( (typeof Function !== 'undefined' && input instanceof Function) || Object.prototype.toString.call(input) === '[object Function]' ); } export default moment.defineLocale('el', { monthsNominativeEl: 'Ιανουάριος_Φεβρουάριος_Μάρτιος_Απρίλιος_Μάιος_Ιούνιος_Ιούλιος_Αύγουστος_Σεπτέμβριος_Οκτώβριος_Νοέμβριος_Δεκέμβριος'.split( '_' ), monthsGenitiveEl: 'Ιανουαρίου_Φεβρουαρίου_Μαρτίου_Απριλίου_Μαΐου_Ιουνίου_Ιουλίου_Αυγούστου_Σεπτεμβρίου_Οκτωβρίου_Νοεμβρίου_Δεκεμβρίου'.split( '_' ), months: function (momentToFormat, format) { if (!momentToFormat) { return this._monthsNominativeEl; } else if ( typeof format === 'string' && /D/.test(format.substring(0, format.indexOf('MMMM'))) ) { // if there is a day number before 'MMMM' return this._monthsGenitiveEl[momentToFormat.month()]; } else { return this._monthsNominativeEl[momentToFormat.month()]; } }, monthsShort: 'Ιαν_Φεβ_Μαρ_Απρ_Μαϊ_Ιουν_Ιουλ_Αυγ_Σεπ_Οκτ_Νοε_Δεκ'.split('_'), weekdays: 'Κυριακή_Δευτέρα_Τρίτη_Τετάρτη_Πέμπτη_Παρασκευή_Σάββατο'.split( '_' ), weekdaysShort: 'Κυρ_Δευ_Τρι_Τετ_Πεμ_Παρ_Σαβ'.split('_'), weekdaysMin: 'Κυ_Δε_Τρ_Τε_Πε_Πα_Σα'.split('_'), meridiem: function (hours, minutes, isLower) { if (hours > 11) { return isLower ? 'μμ' : 'ΜΜ'; } else { return isLower ? 'πμ' : 'ΠΜ'; } }, isPM: function (input) { return (input + '').toLowerCase()[0] === 'μ'; }, meridiemParse: /[ΠΜ]\.?Μ?\.?/i, longDateFormat: { LT: 'h:mm A', LTS: 'h:mm:ss A', L: 'DD/MM/YYYY', LL: 'D MMMM YYYY', LLL: 'D MMMM YYYY h:mm A', LLLL: 'dddd, D MMMM YYYY h:mm A', }, calendarEl: { sameDay: '[Σήμερα {}] LT', nextDay: '[Αύριο {}] LT', nextWeek: 'dddd [{}] LT', lastDay: '[Χθες {}] LT', lastWeek: function () { switch (this.day()) { case 6: return '[το προηγούμενο] dddd [{}] LT'; default: return '[την προηγούμενη] dddd [{}] LT'; } }, sameElse: 'L', }, calendar: function (key, mom) { var output = this._calendarEl[key], hours = mom && mom.hours(); if (isFunction(output)) { output = output.apply(mom); } return output.replace('{}', hours % 12 === 1 ? 'στη' : 'στις'); }, relativeTime: { future: 'σε %s', past: '%s πριν', s: 'λίγα δευτερόλεπτα', ss: '%d δευτερόλεπτα', m: 'ένα λεπτό', mm: '%d λεπτά', h: 'μία ώρα', hh: '%d ώρες', d: 'μία μέρα', dd: '%d μέρες', M: 'ένας μήνας', MM: '%d μήνες', y: 'ένας χρόνος', yy: '%d χρόνια', }, dayOfMonthOrdinalParse: /\d{1,2}η/, ordinal: '%dη', week: { dow: 1, // Monday is the first day of the week. doy: 4, // The week that contains Jan 4st is the first week of the year. }, });
PypiClean
/ydk-models-cisco-ios-xe-16.9.3.post1.tar.gz/ydk-models-cisco-ios-xe-16.9.3.post1/ydk/models/cisco_ios_xe/Cisco_IOS_XE_process_memory_oper.py
import sys from collections import OrderedDict from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64 from ydk.filters import YFilter from ydk.errors import YError, YModelError from ydk.errors.error_handler import handle_type_error as _handle_type_error class MemoryUsageProcesses(Entity): """ Data nodes for System wide Process Memory Statistics. .. attribute:: memory_usage_process The list of software processes on the device **type**\: list of :py:class:`MemoryUsageProcess <ydk.models.cisco_ios_xe.Cisco_IOS_XE_process_memory_oper.MemoryUsageProcesses.MemoryUsageProcess>` **config**\: False """ _prefix = 'process-memory-ios-xe-oper' _revision = '2017-02-07' def __init__(self): if sys.version_info > (3,): super().__init__() else: super(MemoryUsageProcesses, self).__init__() self._top_entity = None self.yang_name = "memory-usage-processes" self.yang_parent_name = "Cisco-IOS-XE-process-memory-oper" self.is_top_level_class = True self.has_list_ancestor = False self.ylist_key_names = [] self._child_classes = OrderedDict([("memory-usage-process", ("memory_usage_process", MemoryUsageProcesses.MemoryUsageProcess))]) self._leafs = OrderedDict() self.memory_usage_process = YList(self) self._segment_path = lambda: "Cisco-IOS-XE-process-memory-oper:memory-usage-processes" self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(MemoryUsageProcesses, [], name, value) class MemoryUsageProcess(Entity): """ The list of software processes on the device. .. attribute:: pid (key) Process\-ID of the process **type**\: int **range:** 0..4294967295 **config**\: False .. attribute:: name (key) The name of the process **type**\: str **config**\: False .. attribute:: tty TTY bound to by the process **type**\: int **range:** 0..65535 **config**\: False .. attribute:: allocated_memory Total memory allocated to this process (bytes) **type**\: int **range:** 0..18446744073709551615 **config**\: False **units**\: bytes .. attribute:: freed_memory Total memory freed by this process (bytes) **type**\: int **range:** 0..18446744073709551615 **config**\: False **units**\: bytes .. attribute:: holding_memory Total memory currently held by this process (bytes) **type**\: int **range:** 0..18446744073709551615 **config**\: False **units**\: bytes .. attribute:: get_buffers Get Buffers of this process (bytes) **type**\: int **range:** 0..4294967295 **config**\: False .. attribute:: ret_buffers Return Buffers of this process (bytes) **type**\: int **range:** 0..4294967295 **config**\: False """ _prefix = 'process-memory-ios-xe-oper' _revision = '2017-02-07' def __init__(self): if sys.version_info > (3,): super().__init__() else: super(MemoryUsageProcesses.MemoryUsageProcess, self).__init__() self.yang_name = "memory-usage-process" self.yang_parent_name = "memory-usage-processes" self.is_top_level_class = False self.has_list_ancestor = False self.ylist_key_names = ['pid','name'] self._child_classes = OrderedDict([]) self._leafs = OrderedDict([ ('pid', (YLeaf(YType.uint32, 'pid'), ['int'])), ('name', (YLeaf(YType.str, 'name'), ['str'])), ('tty', (YLeaf(YType.uint16, 'tty'), ['int'])), ('allocated_memory', (YLeaf(YType.uint64, 'allocated-memory'), ['int'])), ('freed_memory', (YLeaf(YType.uint64, 'freed-memory'), ['int'])), ('holding_memory', (YLeaf(YType.uint64, 'holding-memory'), ['int'])), ('get_buffers', (YLeaf(YType.uint32, 'get-buffers'), ['int'])), ('ret_buffers', (YLeaf(YType.uint32, 'ret-buffers'), ['int'])), ]) self.pid = None self.name = None self.tty = None self.allocated_memory = None self.freed_memory = None self.holding_memory = None self.get_buffers = None self.ret_buffers = None self._segment_path = lambda: "memory-usage-process" + "[pid='" + str(self.pid) + "']" + "[name='" + str(self.name) + "']" self._absolute_path = lambda: "Cisco-IOS-XE-process-memory-oper:memory-usage-processes/%s" % self._segment_path() self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(MemoryUsageProcesses.MemoryUsageProcess, ['pid', 'name', 'tty', 'allocated_memory', 'freed_memory', 'holding_memory', 'get_buffers', 'ret_buffers'], name, value) def clone_ptr(self): self._top_entity = MemoryUsageProcesses() return self._top_entity
PypiClean
/hcpmqe-2.0.11.tar.gz/hcpmqe-2.0.11/hcpmqelib/db/csv.py
import os import csv import bz2 import gzip import lzma from logging import getLogger # noinspection SqlNoDataSourceInspection class CsvHandler(): """ Setup and write a SQLite3 database file. """ def __init__(self, filename, clearfile, compression='plain'): """ :param filename: the name of the sqlite3 database :param compression: the compression method :param clearfile: clear the output file if True """ self.filename = filename self.clearfile = clearfile self.log = getLogger('hcpmqe.' + __name__) # delete csv file if existent if self.clearfile: try: os.remove(filename) self.log.debug(f'deleted existing csv file "{filename}"') except (OSError) as e: self.log.debug(f'failed to delete csv file "{filename}" - {e}') compression = compression.strip("('").strip("',)") ## setup csv writer try: if compression == 'plain': self.filehdl = open(self.filename, 'a', newline='') elif compression == 'bz2': self.filehdl = bz2.open(self.filename, 'at', newline='', compresslevel=9) # bzip2 -d -c test.csv.bz2 elif compression == 'gzip': self.filehdl = gzip.open(self.filename, 'at', newline='', compresslevel=9) # gunzip < test.csv.gzip elif compression == 'lzma': self.filehdl = lzma.open(self.filename, 'at', newline='') # zcat < test.csv.lzma else: self.log.debug(f'unable to create csv file - {compression} compression unavailable') raise AttributeError(f'unable to create csv file - {compression} compression unavailable') self.log.debug(f'created csv file "{filename}" ({compression})') except Exception as e: self.log.debug(f'failed to create csv file "{filename}" - {e}') raise (e) self.opscreated = False # signals that the header is not yet created def init_db(self): """ This initializes the csv file - a dummy method. """ self.log.debug('initializing the csv file') def __init_ops(self, rec): """ Create the ops table, depending on the records received """ self.csv = csv.DictWriter(self.filehdl, sorted(rec.keys())) self.csv.writeheader() self.log.debug(f'wrote csv header -> {list(rec.keys())}') def writeops(self, resultset): """ Ingest all records in resultset into DB. """ if not self.opscreated: # create the ops table based on the first record self.__init_ops(resultset[0]) self.opscreated = True self.csv.writerows(resultset) self.log.debug(f'wrote {len(resultset)} records into csv file') def close(self): self.filehdl.close() self.log.debug('closed csv file')
PypiClean
/insulearner-0.1.6.tar.gz/insulearner-0.1.6/InsuLearner/insulearner.py
__author__ = "Cameron Summers" """ InsuLearner: Estimating Insulin Pump Settings via Machine Learning Code underlying this blog post: https://www.cameronsummers.com/how_I_calculate_my_sons_insulin_pump_settings_with_machine_learning """ import sys import os import argparse import datetime as dt import warnings import matplotlib.cbook warnings.filterwarnings("ignore",category=matplotlib.MatplotlibDeprecationWarning) import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import scipy from sklearn.linear_model import LinearRegression from InsuLearner.tidepool.tidepool_user_model import TidepoolUser from InsuLearner.tidepool.tidepool_api import TidepoolAPI from InsuLearner.util import get_logger from InsuLearner.carbohydrate_sensitivity_factor import estimate_csf logger = get_logger(__name__) def compute_aace_pump_settings(weight_kg, prepump_tdd): """ Get pump settings using the American Association of Clinical Endocrinologists/American College of Endocrinology. Other references: https://diabetesed.net/wp-content/uploads/2019/09/Insulin-Pump-Calculations-Sept-2019-slides.pdf Review of insulin dosing formulas https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4960276/ Args: weight_kg (float): weight in kg prepump_tdd (float): units of insulin per day, (CS: presuming this is a life-time average) Returns: (float, float, float): basal rate, carb insulin ratio, insulin sensitivity factor """ tdd_method1 = weight_kg * 0.5 tdd_method2 = prepump_tdd * 0.75 starting_pump_tdd = (tdd_method1 + tdd_method2) / 2 basal_rate = starting_pump_tdd * 0.5 / 24 cir = 450.0 / starting_pump_tdd isf = 1700.0 / starting_pump_tdd return basal_rate, cir, isf def pd_1d_series_to_X(series): """helper function""" return np.array(series.values.tolist())[:, np.newaxis] def estimate_therapy_settings_from_window_stats_lr(aggregated_df, K, period_window_size_hours, target_bg=110, x="total_carbs", y="total_insulin", do_plots=True, trained_model=None, weight_scheme=None): """ Fit the linear model and estimate settings """ X_carbs = pd_1d_series_to_X(aggregated_df[x]) y_insulin = aggregated_df[y] if weight_scheme is None: sample_weights = None else: if weight_scheme == "CGM Weighted": sample_weights = 1.0 / np.array(np.maximum(1.0, abs(target_bg - aggregated_df["cgm_geo_mean"])) / aggregated_df["cgm_percent_tir"]) * aggregated_df["cgm_percent_available"] elif weight_scheme == "Carb Uncertainty": sample_weights = scipy.stats.norm.pdf(aggregated_df["total_carbs"], aggregated_df["total_carbs"].mean(), aggregated_df["total_carbs"].std()) else: raise Exception("Unknown weight scheme {}.".format(weight_scheme)) nan_mask = np.isnan(sample_weights) X_carbs = X_carbs[~nan_mask] y_insulin = y_insulin[~nan_mask] sample_weights = sample_weights[~nan_mask] if do_plots: plt.figure() plt.title("Sample Weight Distribution") plt.hist(sample_weights) if trained_model is None: lm_carb_to_insulin = LinearRegression() lm_carb_to_insulin.fit(X_carbs, y_insulin, sample_weight=sample_weights) else: lm_carb_to_insulin = trained_model basal_insulin_estimate = lm_carb_to_insulin.intercept_ r2_fit = lm_carb_to_insulin.score(X_carbs, y_insulin) logger.info(f"Linear Model: Fit R^2 {np.round(r2_fit, 2)}. Intercept {np.round(lm_carb_to_insulin.intercept_, 2)}. Slope g/U {np.round(1.0 / lm_carb_to_insulin.coef_, 2)}") cir_estimate_slope = 1 / lm_carb_to_insulin.coef_[0] isf_estimate_slope = cir_estimate_slope * K basal_rate_estimate_hourly = basal_insulin_estimate / period_window_size_hours logger.info("Total Period Basal={:.2f}U. (Mean %Daily Total: {:.2f}%)".format(basal_insulin_estimate, np.nanmean(basal_insulin_estimate / aggregated_df[y]) * 100)) logger.info("\n\n\tSettings Estimates:\n") logger.info(f"\tEstimated CIR={round(cir_estimate_slope, 2)} g/U.") logger.info(f"\tEstimated Hourly Basal={round(basal_rate_estimate_hourly, 3)} U/hr") logger.info(f"\tCSF={round(K, 2)} mg/dL / g") logger.info(f"\tEstimated ISF={round(isf_estimate_slope, 2)} mg/dL/ U") settings = (cir_estimate_slope, isf_estimate_slope, basal_insulin_estimate, lm_carb_to_insulin, r2_fit, K) if do_plots: plot_aggregated_scatter(aggregated_df, period_window_size_hours, lr_model=lm_carb_to_insulin, settings=settings, plot_aace=True, weight_scheme=weight_scheme) return settings def plot_aggregated_scatter(aggregated_df, period_window_size_hours, lr_model=None, settings=None, plot_aace=True, weight_scheme=None): """ Plot the linear model on the data. """ period_window_size_days = period_window_size_hours / 24 fig, ax = plt.subplots(figsize=(8, 8)) win_start_dates_str = aggregated_df["start_date"].dt.strftime("%Y-%m-%d") win_end_dates_str = aggregated_df["end_date"].dt.strftime("%Y-%m-%d") plt.title(f"Insulin Prediction Modeling, Aggr Period={period_window_size_days} Days, {win_start_dates_str.values[0]} to {win_end_dates_str.values[-1]}, {len(aggregated_df)} data points") hue_col = "cgm_geo_mean" # hue_col = None vars_to_plot = ["total_carbs", "total_insulin", "cgm_geo_mean"] scatter_df = aggregated_df[vars_to_plot] sns.scatterplot(data=scatter_df, x="total_carbs", y="total_insulin", hue=hue_col, ax=ax) plt.figure() ax.set_ylim(0, aggregated_df["total_insulin"].max() * 1.1) ax.set_xlim(0, aggregated_df["total_carbs"].max() * 1.1) if settings: cir_estimate_slope, isf_estimate_slope, basal_insulin_estimate, lm_carb_to_insulin, r2_fit, K = settings basal_glucose_lr = -basal_insulin_estimate / lr_model.coef_[0] x1, y1 = basal_glucose_lr, 0 x2, y2 = aggregated_df["total_carbs"].max(), lr_model.predict([[aggregated_df["total_carbs"].max()]]) ax.plot([x1, x2], [y1, y2[0]], label="Insulin Prediction LR Model (Weights: {})".format(weight_scheme)) ax.set_xlabel("Total Exogenous Glucose in Period T") ax.set_ylabel("Total Insulin in Period T") # Equations and Settings ax.text(0.6, 0.25, "y={:.4f}*x + {:.2f}, (R^2={:.2f})".format(lr_model.coef_[0], lr_model.intercept_, r2_fit), ha="left", va="top", transform=ax.transAxes) ax.text(0.6, 0.2, "CIR={:.2f} g/U \nBasal Rate={:.2f}U/hr \nISF={:.2f} mg/dL/U (K={:.2f})".format(cir_estimate_slope, basal_insulin_estimate / period_window_size_hours, isf_estimate_slope, K), ha="left", va="top", transform=ax.transAxes) # Stars ax.plot(0, basal_insulin_estimate, marker="*", markersize=12, color="green", label="Basal Insulin LR Estimate") ax.plot(basal_glucose_lr, 0, marker="*", markersize=12, color="orange", label="Endogenous Glucose LR Estimate") mean_insulin = aggregated_df["total_insulin"].mean() mean_carbs = aggregated_df["total_carbs"].mean() ax.plot(mean_carbs, mean_insulin, marker="*", markersize=12, color="red", label="Mean Insulin/Carbs") # Shades ax.fill_between([0, x2], [basal_insulin_estimate, basal_insulin_estimate], color="blue", alpha=0.2, label="Endogenous") ax.fill_between([0, x2], [basal_insulin_estimate, basal_insulin_estimate], [basal_insulin_estimate, y2[0]], color="orange", alpha=0.2, label="Exogenous") # AACE line if using 1-day windows if plot_aace and period_window_size_hours == 1: tdd_mean = aggregated_df["total_insulin"].mean() aace_basal_insulin_estimate = tdd_mean / 2 cir_aace = 450 / tdd_mean aace_basal_glucose_estimate = -aace_basal_insulin_estimate / (1/cir_aace) x2 = aggregated_df["total_carbs"].max() x1, y1 = (aace_basal_glucose_estimate, 0) y2 = 1.0 / (cir_aace) * x2 + aace_basal_insulin_estimate star_description = "AACE Basal Estimate (mean(TDD) / 2)" line_description = "AACE (*mean(TDD) only)" ax.plot([x1, x2], [y1, y2], label=line_description, color="gray", linestyle="--") ax.plot(0, aace_basal_insulin_estimate, marker="*", markersize=12, color="gray", label=star_description) ax.legend() plt.show() def analyze_settings_lr(user, data_start_date, data_end_date, K=12.5, do_plots=True, use_circadian_hour_estimate=True, agg_period_window_size_hours=24, agg_period_hop_size_hours=24, weight_scheme=None): """ Aggregate the data and fit a linear model """ window_stats = user.compute_window_stats(data_start_date, data_end_date, use_circadian=use_circadian_hour_estimate, window_size_hours=agg_period_window_size_hours, hop_size_hours=agg_period_hop_size_hours, plot_hop_raw=False) window_df = pd.DataFrame(window_stats) logger.debug(f'Mean of CGM Mean, {np.round(window_df["cgm_mean"].mean(), 2)}') logger.debug(f'Mean of CGM Geo Mean, {np.round(window_df["cgm_geo_mean"].mean(), 2)}') logger.debug(f'Total Period Insulin Mean: {np.round(window_df["total_insulin"].mean(), 2)}') logger.debug(f"{len(window_df)} Data Rows") settings = estimate_therapy_settings_from_window_stats_lr(window_df, K, period_window_size_hours=agg_period_window_size_hours, target_bg=110, x="total_carbs", y="total_insulin", do_plots=do_plots, weight_scheme=weight_scheme) cir_estimate, isf_estimate, basal_insulin_estimate, lr_model, lr_score, K = settings return cir_estimate, basal_insulin_estimate, isf_estimate, lr_score def load_user_data(username, password, data_start_date, data_end_date, estimation_window_size_days): """ Load the Tidepool user_obj data for the given user_obj and parameters. Args: username: tidepool username password: tidepool password data_start_date: start date of analysis data_end_date: end date of analysis estimation_window_size_days: size of estimation window in days Returns: TidepoolUser object """ tp_api_obj = TidepoolAPI(username, password) user = TidepoolUser() user.load_from_api(tp_api_obj, data_start_date, data_end_date, user_id=None, # For a user_obj that is sharing their Tidepool account with this one save_data=False) total_basal_days = user.get_num_days_span(data_type="basal") total_bolus_days = user.get_num_days_span(data_type="bolus") total_cgm_days = user.get_num_days_span(data_type="cgm") total_food_days = user.get_num_days_span(data_type="food") if np.sum([total_basal_days, total_bolus_days, total_cgm_days, total_food_days]) != (estimation_window_size_days*4): logger.warning(f"*** Warning *** : Num data days span not the size of estimation window size of {estimation_window_size_days} days") logger.warning(f"Basal Days Span {total_basal_days}. Bolus Days Span {total_bolus_days}. CGM Days Span {total_cgm_days}. Food Days Span {total_food_days}") user.analyze_duplicates(time_diff_thresh_sec=60 * 60) return user def main(): parser = argparse.ArgumentParser("InsuLearner: Estimate Insulin Pump Settings with Linear Regression") parser.add_argument("tp_username", type=str, help="Email username for Tidepool Account") parser.add_argument("tp_password", type=str, help="Password for Tidepool Account") parser.add_argument("-ht", "--height_inches", type=float, help="Your height in inches") parser.add_argument("-wt", "--weight_lbs", type=float, help="Your weight in pounds") parser.add_argument("-g", "--gender", choices=["male", "female"]) parser.add_argument("--num_days", type=int, help="Number of days in the past to analyze data", default=60) parser.add_argument("--CSF", type=float, help="If entered, will use this CSF instead of estimating it from height and weight.") parser.add_argument("-eb", "--estimate_agg_boundaries", "-eb", action="store_true", default=True, help="Use an autocorrelation-like algorithm to estimate aggregation boundaries to denoise the fit.") parser.add_argument("-aw", "--agg_period_window_size_hours", type=int, help="The size in hours of each period to aggregate for fitting the model.", default=24) parser.add_argument("-ah", "--agg_period_hop_size_hours", "-ah", type=int, help="The size in hours to hop each period for aggregation.", default=24) args = parser.parse_args() tp_username = args.tp_username tp_password = args.tp_password estimation_window_size_days = args.num_days height_inches = args.height_inches weight_lbs = args.weight_lbs gender = args.gender CSF = args.CSF estimate_agg_boundaries = args.estimate_agg_boundaries agg_period_window_size_hours = args.agg_period_window_size_hours agg_period_hop_size_hours = args.agg_period_hop_size_hours logger.debug(f"Args:") logger.debug(f"estimation_window_size_days: {estimation_window_size_days}") logger.debug(f"height_inches: {height_inches}") logger.debug(f"weight_lbs: {weight_lbs}") logger.debug(f"gender: {gender}") logger.debug(f"CSF: {CSF}") logger.debug(f"estimate_agg_boundaries: {estimate_agg_boundaries}") logger.debug(f"agg_period_window_size_hours: {agg_period_window_size_hours}") logger.debug(f"agg_period_hop_size_hours: {agg_period_hop_size_hours}") K = CSF if CSF is None: # Estimate CSF from blood volume based on height and weight K = estimate_csf(height_inches, weight_lbs, gender, metabolism_efficiency_percentage=0.23) logger.info(f"CSF estimated to be {np.round(K, 2)} for height_inches {height_inches}, weight_lbs {weight_lbs}, and gender {gender}") else: logger.info(f"Provided CSF={K}") # Get date info today = dt.datetime.now() data_start_date = today - dt.timedelta(days=estimation_window_size_days + 1) data_end_date = today - dt.timedelta(days=1) # Uncomment if specific dates desired # data_start_date = dt.datetime(year=2022, month=12, day=1) # data_end_date = dt.datetime(year=2023, month=1, day=26) logger.info(f"Running for dates {data_start_date} to {data_end_date}") # Load user_obj data into an object user_obj = load_user_data(tp_username, tp_password, data_start_date, data_end_date, estimation_window_size_days) # Run settings analysis analyze_settings_lr(user_obj, data_start_date=data_start_date, data_end_date=data_end_date, K=K, do_plots=True, use_circadian_hour_estimate=estimate_agg_boundaries, agg_period_window_size_hours=agg_period_window_size_hours, agg_period_hop_size_hours=agg_period_hop_size_hours, ) if __name__ == "__main__": main()
PypiClean
/RsCMPX_NrFr1Meas-4.0.185.tar.gz/RsCMPX_NrFr1Meas-4.0.185/RsCMPX_NrFr1Meas/Implementations/NrSubMeas/MultiEval/Trace/Cc/Layer/EvmSymbol/Current.py
from typing import List from ........Internal.Core import Core from ........Internal.CommandsGroup import CommandsGroup from ........Internal.ArgSingleSuppressed import ArgSingleSuppressed from ........Internal.Types import DataType from ........ import repcap # noinspection PyPep8Naming,PyAttributeOutsideInit,SpellCheckingInspection class CurrentCls: """Current commands group definition. 2 total commands, 0 Subgroups, 2 group commands""" def __init__(self, core: Core, parent): self._core = core self._cmd_group = CommandsGroup("current", core, parent) def read(self, carrierComponent=repcap.CarrierComponent.Default, layer=repcap.Layer.Default) -> List[float]: """SCPI: READ:NRSub:MEASurement<Instance>:MEValuation:TRACe[:CC<no>][:LAYer<layer>]:EVMSymbol:CURRent \n Snippet: value: List[float] = driver.nrSubMeas.multiEval.trace.cc.layer.evmSymbol.current.read(carrierComponent = repcap.CarrierComponent.Default, layer = repcap.Layer.Default) \n Returns the values of the EVM vs modulation symbol trace for carrier <no>, layer/antenna <l>. See also 'Square EVM'. \n Suppressed linked return values: reliability \n :param carrierComponent: optional repeated capability selector. Default value: Nr1 (settable in the interface 'Cc') :param layer: optional repeated capability selector. Default value: Nr1 (settable in the interface 'Layer') :return: ratio: Comma-separated list of EVM values, one value per modulation symbol""" carrierComponent_cmd_val = self._cmd_group.get_repcap_cmd_value(carrierComponent, repcap.CarrierComponent) layer_cmd_val = self._cmd_group.get_repcap_cmd_value(layer, repcap.Layer) suppressed = ArgSingleSuppressed(0, DataType.Integer, False, 1, 'Reliability') response = self._core.io.query_bin_or_ascii_float_list_suppressed(f'READ:NRSub:MEASurement<Instance>:MEValuation:TRACe:CC{carrierComponent_cmd_val}:LAYer{layer_cmd_val}:EVMSymbol:CURRent?', suppressed) return response def fetch(self, carrierComponent=repcap.CarrierComponent.Default, layer=repcap.Layer.Default) -> List[float]: """SCPI: FETCh:NRSub:MEASurement<Instance>:MEValuation:TRACe[:CC<no>][:LAYer<layer>]:EVMSymbol:CURRent \n Snippet: value: List[float] = driver.nrSubMeas.multiEval.trace.cc.layer.evmSymbol.current.fetch(carrierComponent = repcap.CarrierComponent.Default, layer = repcap.Layer.Default) \n Returns the values of the EVM vs modulation symbol trace for carrier <no>, layer/antenna <l>. See also 'Square EVM'. \n Suppressed linked return values: reliability \n :param carrierComponent: optional repeated capability selector. Default value: Nr1 (settable in the interface 'Cc') :param layer: optional repeated capability selector. Default value: Nr1 (settable in the interface 'Layer') :return: ratio: Comma-separated list of EVM values, one value per modulation symbol""" carrierComponent_cmd_val = self._cmd_group.get_repcap_cmd_value(carrierComponent, repcap.CarrierComponent) layer_cmd_val = self._cmd_group.get_repcap_cmd_value(layer, repcap.Layer) suppressed = ArgSingleSuppressed(0, DataType.Integer, False, 1, 'Reliability') response = self._core.io.query_bin_or_ascii_float_list_suppressed(f'FETCh:NRSub:MEASurement<Instance>:MEValuation:TRACe:CC{carrierComponent_cmd_val}:LAYer{layer_cmd_val}:EVMSymbol:CURRent?', suppressed) return response
PypiClean
/diplomacy-research-1.0.0.tar.gz/diplomacy-research-1.0.0/diplomacy_research/players/rulesets/dumbbot_ruleset.py
""" A Python version of David Norman's DumbBot. """ import collections import logging import random from diplomacy_research.models.state_space import build_game_from_state_proto # --- Constants --- LOGGER = logging.getLogger(__name__) # Nb of proximity maps PROXIMITY_DEPTHS = 10 # Shape the power size by ax^2 + bx + c SIZE_SQUARE_COEFFICIENT = 1. SIZE_COEFFICIENT = 4. SIZE_CONSTANT = 16 # Importance of attack SC we don't own in spring/fall SPRING_PROXIMITY_ATTACK_WEIGHT = 700 FALL_PROXIMITY_ATTACK_WEIGHT = 600 # Importance of defending our own center in spring/fall SPRING_PROXIMITY_DEFENSE_WEIGHT = 300 FALL_PROXIMITY_DEFENSE_WEIGHT = 400 # Importance of proximity_map[n] in Spring/Fall/Building/Disbanding SPRING_PROXIMITY_WEIGHTS = [100, 1000, 30, 10, 6, 5, 4, 3, 2, 1] FALL_PROXIMITY_WEIGHTS = [1000, 100, 30, 10, 6, 5, 4, 3, 2, 1] BUILD_PROXIMITY_WEIGHTS = [1000, 100, 30, 10, 6, 5, 4, 3, 2, 1] REMOVE_PROXIMITY_WEIGHTS = [1000, 100, 30, 10, 6, 5, 4, 3, 2, 1] # Importance of attack strength in Spring/Fall SPRING_STRENGTH_WEIGHT = 1000 FALL_STRENGTH_WEIGHT = 1000 # Importance of lack of competition in Spring/Fall SPRING_COMPETITION_WEIGHT = 1000 FALL_COMPETITION_WEIGHT = 1000 # Importance of building in province we need to defend BUILD_DEFENSE_WEIGHT = 1000 # Importance of removing unit we don't need to defend REMOVE_DEFENSE_WEIGHT = 1000 # If not automatic, chance of playing best move if inferior move is nearly as good ALTERNATIVE_DIFF_MODIFIER = 5 # percentage chance of automatically playing the next move PLAY_ALTERNATIVE = 0.5 # --- Named tuples --- class Factors( collections.namedtuple('Factors', ('proximity_maps', # A list of maps, with unit as key 'competition_map', # A dict with province as key 'strength_map', # A dict with province as key 'defense_map'))): # A dict with province as key """ A class to hold all factors computed for the encoding """ class FactorWeights( collections.namedtuple('FactorWeights', ('proximity_weights', 'competition_weight', 'strength_weight', 'defense_weight'))): """ A class to hold all factor weights used for the encoding """ # --------------------------------- # MAIN FUNCTION # --------------------------------- def run_ruleset(state_proto, power_name): """ Gets the move for the given power according to the ruleset. :param state_proto: A `.proto.game.State` representation of the state of the game. :param power_name: The name of the power we are playing :return: A list of orders for that power. """ # Power has been eliminated if not state_proto.units[power_name].value and not state_proto.centers[power_name].value: return [] # Building the game object game = build_game_from_state_proto(state_proto) # Game is forming / completed if game.get_current_phase()[0] not in 'SFW': return [] # Encoding the board to factors dest_unit_value, factors = get_board_factors(game, power_name) # Decode orders return decode_orders(game, power_name, dest_unit_value, factors) # --------------------------------- # ENCODING # --------------------------------- def get_board_factors(game, power_name): """ Compute destination value by computing various factors :param game: An instance of `diplomacy.Game` :param power_name: The name of the power we are playing :return: A tuple consisting of 1) the dest_unit_value, 2) the factors :type game: diplomacy.Game """ season = game.get_current_phase()[0] power = game.get_power(power_name) # Compute factors if season in 'SW': factors = calculate_factors(game=game, power_name=power_name, proximity_attack_weight=SPRING_PROXIMITY_ATTACK_WEIGHT, proximity_defense_weight=SPRING_PROXIMITY_DEFENSE_WEIGHT) else: factors = calculate_factors(game=game, power_name=power_name, proximity_attack_weight=FALL_PROXIMITY_ATTACK_WEIGHT, proximity_defense_weight=FALL_PROXIMITY_DEFENSE_WEIGHT) # Computing factor weights if season == 'S': factor_weights = FactorWeights(proximity_weights=SPRING_PROXIMITY_WEIGHTS, competition_weight=SPRING_COMPETITION_WEIGHT, strength_weight=SPRING_STRENGTH_WEIGHT, defense_weight=0) elif season == 'F': factor_weights = FactorWeights(proximity_weights=FALL_PROXIMITY_WEIGHTS, competition_weight=FALL_COMPETITION_WEIGHT, strength_weight=FALL_STRENGTH_WEIGHT, defense_weight=0) else: nb_builds = len(power.centers) - len(power.units) # Build if nb_builds >= 0: factor_weights = FactorWeights(proximity_weights=BUILD_PROXIMITY_WEIGHTS, competition_weight=0, strength_weight=0, defense_weight=BUILD_DEFENSE_WEIGHT) # Disband else: factor_weights = FactorWeights(proximity_weights=REMOVE_PROXIMITY_WEIGHTS, competition_weight=0, strength_weight=0, defense_weight=REMOVE_DEFENSE_WEIGHT) # Computing destination value dest_unit_value = calculate_dest_unit_value(factors=factors, factor_weights=factor_weights, is_winter=(season == 'W')) # Returning board factors return dest_unit_value, factors def calculate_factors(game, power_name, proximity_attack_weight, proximity_defense_weight): """ Compute the proximity_maps, competition_map, and strength_map, as defined in the original C++ code. :param game: An instance of `diplomacy.Game` :param power_name: The name of the power we are playing :param proximity_attack_weight: The weight used to compute the importance of attacking. :param proximity_defense_weight: The weight used to compute the importance of defending. :return: The factors (proximity_maps, competition_map, strength_map, defense_map) :type game: diplomacy.Game :rtype: Factors """ # Get attack, defense values attack_map, defense_map = calculate_attack_defense(game, power_name) # List of all possible units all_units = ['{} {}'.format(unit_type, loc.upper()) for unit_type in 'AF' for loc in game.map.locs if game.map.is_valid_unit('{} {}'.format(unit_type, loc.upper()))] # Compute initial proximity value, for all non-dislodged units on the board init_proximity_map = {} for unit in all_units: init_proximity_map[unit] = (attack_map[unit[2:5]] * proximity_attack_weight + defense_map[unit[2:5]] * proximity_defense_weight) proximity_maps = [init_proximity_map] # Building deeper proximity maps # For deeper maps, the value of a location is equal to the (sum of adjacent units + self) / 5 for proximity_depth in range(1, PROXIMITY_DEPTHS): prev_proximity_map = proximity_maps[proximity_depth - 1] curr_proximity_map = {unit: 0 for unit in all_units} # Updating all units for unit in all_units: # Finding adjacent locations adj_locs = set() for dest_coast in game.map.find_coasts(unit[2:5]): adj_locs |= {loc.upper()[:3] for loc in game.map.abut_list(dest_coast, incl_no_coast=True)} # Finding potentially adjacent units adj_units = [adj_unit for adj_unit in all_units if adj_unit[2:5] in adj_locs] # Finding units that could in the current provice self_units = [self_unit for self_unit in all_units if self_unit[2:5] == unit[2:5]] # Computing self contributions self_contrib = 0 for self_unit in self_units: self_contrib = max(self_contrib, prev_proximity_map[self_unit]) # Computing other contributions other_contrib = 0. for adj_unit in adj_units: if game.map.abuts(adj_unit[0], adj_unit[2:], '-', unit[2:]) \ or game.map.abuts(adj_unit[0], adj_unit[2:], '-', unit[2:5]): other_contrib += prev_proximity_map[adj_unit] # Update score # Dividing by 5, since each location has on average 4 locations (+ itself) curr_proximity_map[unit] = (self_contrib + other_contrib) / 5. # Append proximity map to list proximity_maps += [curr_proximity_map] # Compute adjacent unit counts adjacent_unit_counts = calculate_adjacent_unit_counts(game) # Compute strength and competition map # Strength map: Number of adjacent units from same power # Competition map: Largest number of enemy adjacent units provinces = [loc.upper() for loc in game.map.locs if '/' not in loc] strength_map = {loc: 0 for loc in provinces} competition_map = {loc: 0 for loc in provinces} for loc in provinces: for adjacent_power, nb_adjacent_units in adjacent_unit_counts[loc].items(): if adjacent_power == power_name: strength_map[loc] = nb_adjacent_units else: competition_map[loc] = max(competition_map[loc], nb_adjacent_units) # Returning factors return Factors(proximity_maps=proximity_maps, competition_map=competition_map, strength_map=strength_map, defense_map=defense_map) def calculate_attack_defense(game, power_name): """ Compute the attack and defense maps for the current power. :param game: An instance of `diplomacy.Game` :param power_name: The name of the power we are playing :return: A tuple consisting of: 1) attack_map: Dictionary with province as key and attack weight as value 2) defense_map: Dictionary with province as key and defense weight as value :type game: diplomacy.Game """ # compute power size power_sizes = get_power_sizes(game) # Compute attack and defense value for each province provinces = [loc.upper() for loc in game.map.locs if '/' not in loc] attack_map = {loc: 0 for loc in provinces} defense_map = {loc: 0 for loc in provinces} for power in game.powers.values(): for loc in power.centers: # Not ours, updating attack value by the size of the owning power if power.name != power_name: attack_map[loc] = power_sizes[power.name] # It's ours, update defense value by the size of the largest enemy which has a unit that can move in else: defense_map[loc] = get_defense_value(game=game, power_name=power_name, loc=loc, power_sizes=power_sizes) # Returning the attack, defense map return attack_map, defense_map def get_power_sizes(game): """ Return a dict that with power_name as key and value of its supply center as value. :param game: An instance of `diplomacy.Game` :return: A dict for power name as key and A * (nb_sc) ^2 + B * nb_sc + C as value :type game: diplomacy.Game """ A, B, C = SIZE_SQUARE_COEFFICIENT, SIZE_COEFFICIENT, SIZE_CONSTANT # pylint: disable=invalid-name return {power.name: A * len(power.centers) ** 2 + B * len(power.centers) + C for power in game.powers.values()} def get_defense_value(game, power_name, loc, power_sizes): """ Compute the defense value of a location (i.e. the power size of the largest power of an adjacent unit) :param game: An instance of `diplomacy.Game` :param power_name: The name of the power we are playing :param loc: The location for which we want to compute the defense value. :param power_sizes: Dictionary with the name of a power as key and (A*nb_sc^2 + B*nb_sc + C) as value :return: The location defense value. i.e. the power_size of the largest power with a unit that can move in. :type game: diplomacy.Game """ largest_power_size = 0 loc_with_coasts = game.map.find_coasts(loc) # Finding the largest enemy unit that can move to loc for power in game.powers.values(): if power.name == power_name: continue for unit in power.units: for dest in loc_with_coasts: if game.map.abuts(unit[0], unit[2:], '-', dest): largest_power_size = max(power_sizes[power.name], largest_power_size) break return largest_power_size def calculate_adjacent_unit_counts(game): """ Compute the number of units from a power that are adjacent to each location :param game: An instance of `diplomacy.Game` :param units_on_board: A set containing all the units on the board {'A PAR', 'F BRE', ...} :return: A dict with - loc as key and a dictionary of power_name as key and nb adj units from power as value e.g. {'PAR': {'FRANCE': 2, 'ENGLAND': 0, ...}} if 2 units for FRANCE can move in Paris, but none from England :type game: diplomacy.Game """ provinces = [loc.upper() for loc in game.map.locs if '/' not in loc] adjacent_unit_counts = {loc: {power_name: set() for power_name in game.powers} for loc in provinces} for dest in provinces: # Building a list of src locs that could move to dest src_locs = set() for dest_coast in game.map.find_coasts(dest): src_locs |= {loc.upper() for loc in game.map.abut_list(dest_coast, incl_no_coast=True)} for src in src_locs: # Trying to check if we have an occupant occupant = game._occupant(src) # STP -> A STP - pylint: disable=protected-access if occupant is None: continue # Finding if the occupant can move for dest_coast in game.map.find_coasts(dest): if game.map.abuts(occupant[0], occupant[2:], '-', dest_coast): break else: continue # Increasing the count of the owner occupant_owner = game._unit_owner(occupant) # pylint: disable=protected-access adjacent_unit_counts[dest][occupant_owner.name].add(occupant) # Returning the adjacent_unit_counts return {loc: {power_name: len(adjacent_unit_counts[loc][power_name]) for power_name in adjacent_unit_counts[loc]} for loc in adjacent_unit_counts} def calculate_dest_unit_value(factors, factor_weights, is_winter=False): """ Compute the destination value for each loc :param factors: An instance of `Factors` :param factor_weights: An instance of `FactorWeights` :param is_winter: Whether or not it's in adjustment phase :return: dest_unit_value. A dict with unit as key, and the unit value as value :type factors: Factors :type factor_weights: FactorWeights """ assert len(factors.proximity_maps) == len(factor_weights.proximity_weights), 'Different proximity lengths.' assert len(factors.proximity_maps) == PROXIMITY_DEPTHS, 'Expected %d proximity maps.' % PROXIMITY_DEPTHS # Destination value is computed by two parts: # 1. weighted sum of proximity values # 2. balance between competition and strength if not winter # 3. add defense value if winter. dest_unit_value = {loc: 0 for loc in factors.proximity_maps[0]} for unit in dest_unit_value: for prox_ix in range(PROXIMITY_DEPTHS): dest_unit_value[unit] += factor_weights.proximity_weights[prox_ix] * factors.proximity_maps[prox_ix][unit] if is_winter: dest_unit_value[unit] += factor_weights.defense_weight * factors.defense_map[unit[2:5]] else: dest_unit_value[unit] += factor_weights.strength_weight * factors.strength_map[unit[2:5]] dest_unit_value[unit] -= factor_weights.competition_weight * factors.competition_map[unit[2:5]] return dest_unit_value # --------------------------------- # DECODING # --------------------------------- def decode_orders(game, power_name, dest_unit_value, factors): """ Decode orders from computed factors :param game: An instance of `diplomacy.Game` :param power_name: The name of the power we are playing :param dest_unit_value: A dict with unit as key, and unit value as value :param factors: An instance of `Factors` :return: A list of orders :type factors: Factors :type game: diplomacy.Game """ phase_type = game.get_current_phase()[-1] # Movement phase if phase_type == 'M': return generate_movement_orders(game, power_name, dest_unit_value, factors) # Retreat Phaes if phase_type == 'R': return generate_retreat_orders(game, power_name, dest_unit_value) # Adjustment if phase_type == 'A': power = game.get_power(power_name) nb_builds = len(power.centers) - len(power.units) # Building if nb_builds >= 0: return generate_build_orders(game, power_name, dest_unit_value) # Disbanding return generate_disband_orders(game, power_name, dest_unit_value) # Otherwise, invalid phase_type LOGGER.error('Invalid phase type. Got %s. Expected M, R, A', phase_type) return [] def generate_movement_orders(game, power_name, dest_unit_value, factors): """ Generate movement orders :param game: An instance of `diplomacy.Game` :param power_name: The name of the power we are playing :param dest_unit_value: A dict with unit as key, and unit value as value :param factors: An instance of `Factors` :return: A list of orders :type factors: Factors :type game: diplomacy.Game """ # Shuffling units power = game.get_power(power_name) unordered_units = power.units[:] units = power.units[:] all_units = [unit for unit in dest_unit_value] random.shuffle(unordered_units) # Moving units: {unit: dest} e.g. 'F STP/NC -> BAR' would have {'F STP/NC': 'BAR'} moving_units = {} # Dependencies: List of locations that depend of the key # e.g. {'PAR': ['MAR', 'BRE']} indicates that MAR and BRE are waiting for (depends on) the PAR order dependencies = {unit[2:5]: [] for unit in units} # List of final orders - {province: order} orders = {} # Generating orders while unordered_units: curr_unit = unordered_units.pop(0) # Finding adjacent locs adj_locs = set() for coast in game.map.find_coasts(curr_unit[2:5]): adj_locs |= {loc.upper() for loc in game.map.abut_list(coast, incl_no_coast=True)} # Building a list of destinations in reverse order (i.e. destination with highest value first) # Including itself, but excluding dependencies dest_units = sorted([unit for unit in all_units if unit[2:] in adj_locs and game.map.abuts(curr_unit[0], curr_unit[2:], '-', unit[2:]) # Valid dest only and unit[2:5] not in dependencies[curr_unit[2:5]]] # except if waiting + [curr_unit], # including itself key=lambda unit: dest_unit_value[unit], reverse=True) # Picking destination selection_is_okay = False unit_ordered_to_move = True while not selection_is_okay: # Getting next destination selected_dest_unit = get_next_item(dest_units, dest_unit_value) selection_is_okay = True # Case 0 - Unit is holding (moving to same location) if selected_dest_unit[2:5] == curr_unit[2:5]: orders[curr_unit[2:5]] = '{} H'.format(curr_unit) break # Case 1 - Deal with occupying situation unit_occupying = [unit for unit in units if unit[2:5] == selected_dest_unit[2:5]] unit_occupying = None if not unit_occupying else unit_occupying[0] # If occupying unit is not ordered, insert current unit back after occupying unit # since we can't decide yet. if unit_occupying and unit_occupying[2:5] not in orders: unordered_units.insert(unordered_units.index(unit_occupying) + 1, curr_unit) unit_ordered_to_move = False dependencies[unit_occupying[2:5]] += [curr_unit[2:5]] # If occupying unit is not moving # Check if it needs support, otherwise the destination is not acceptable. elif unit_occupying and unit_occupying not in moving_units: if factors.competition_map[unit_occupying[2:5]] > 1: orders[curr_unit[2:5]] = '{} S {}'.format(curr_unit, unit_occupying) unit_ordered_to_move = False else: selection_is_okay = False dest_units.remove(selected_dest_unit) # Case 2 - Deal with units moving to the same location if selection_is_okay: unit_moving = [unit for unit, dest in moving_units.items() if dest[:3] == selected_dest_unit[2:5]] unit_moving = None if not unit_moving else unit_moving[0] # Support is someone already move in and there is competition on that location # Otherwise, the destination is not acceptable if unit_moving: if factors.competition_map[selected_dest_unit[2:5]] > 0: orders[curr_unit[2:5]] = '{} S {} - {}'.format(curr_unit, unit_moving, moving_units[unit_moving][:3]) unit_ordered_to_move = False else: selection_is_okay = False dest_units.remove(selected_dest_unit) # Ready to issue move order if selection_is_okay and unit_ordered_to_move: orders[curr_unit[2:5]] = '{} - {}'.format(curr_unit, selected_dest_unit[2:]) moving_units[curr_unit] = selected_dest_unit[2:] # Check for wasted holds orders = check_wasted_holds(game, orders, moving_units, dest_unit_value, factors) # Extract orders from order details return list(orders.values()) def generate_retreat_orders(game, power_name, dest_unit_value): """ Generate retreat orders :param game: An instance of `diplomacy.Game` :param power_name: The name of the power we are playing :param dest_unit_value: A dict with unit as key, and unit value as value :param factors: An instance of `Factors` :return: A list of orders :type factors: Factors :type game: diplomacy.Game """ # Shuffling units power = game.get_power(power_name) unordered_units = [unit for unit in power.retreats] all_units = [unit for unit in dest_unit_value] random.shuffle(unordered_units) # Moving units: {unit: dest} e.g. 'F STP/NC -> BAR' would have {'F STP/NC': 'BAR'} moving_units = {} # List of final orders - {province: order} orders = {} # Generating orders while unordered_units: curr_unit = unordered_units.pop(0) # Finding adjacent locs adj_locs = set() for coast in game.map.find_coasts(curr_unit[2:5]): adj_locs |= {loc.upper() for loc in game.map.abut_list(coast, incl_no_coast=True)} # Building a list of destinations in reverse order (i.e. destination with highest value first) dest_units = sorted([unit for unit in all_units if unit[2:] in adj_locs and game.map.abuts(curr_unit[0], curr_unit[2:], '-', unit[2:])], # Valid dest only key=lambda unit: dest_unit_value[unit], reverse=True) # Picking destination selection_is_okay = False while not selection_is_okay: # No destination - Disbanding if not dest_units: orders[curr_unit[2:5]] = '{} D'.format(curr_unit) break # Getting next destination selected_dest_unit = get_next_item(dest_units, dest_unit_value) selection_is_okay = True # Selecting next destination if there is already a moving unit unit_moving = [unit for unit, dest in moving_units.items() if dest[:3] == selected_dest_unit[2:5]] unit_moving = None if not unit_moving else unit_moving[0] if unit_moving: selection_is_okay = False dest_units.remove(selected_dest_unit) # Check if that destination is already occupied occupant = game._occupant(selected_dest_unit[2:5], any_coast=1) # pylint: disable=protected-access if occupant: selection_is_okay = False dest_units.remove(selected_dest_unit) # Otherwise, it's okay to retreat there if selection_is_okay: orders[curr_unit[2:5]] = '{} R {}'.format(curr_unit, selected_dest_unit[2:]) moving_units[curr_unit] = selected_dest_unit[2:] # Returning orders return list(orders.values()) def generate_build_orders(game, power_name, dest_unit_value): """ Generate build orders :param game: An instance of `diplomacy.Game` :param power_name: The name of the power we are playing :param dest_unit_value: A dict with unit as key, and unit value as value :param factors: An instance of `Factors` :return: A list of orders :type factors: Factors :type game: diplomacy.Game """ open_homes = game.get_orderable_locations(power_name) power = game.get_power(power_name) nb_builds = min(len(open_homes), len(power.centers) - len(power.units)) # Getting the list of possible units that can be built # Sorted by decreasing value sorted_units = sorted(['{} {}'.format(unit_type, coast) for unit_type in 'AF' for loc in open_homes for coast in game.map.find_coasts(loc) if game.map.is_valid_unit('{} {}'.format(unit_type, coast))], key=lambda unit: dest_unit_value[unit], reverse=True) # Generating orders orders = {} # {province: order} while len(orders) < nb_builds and sorted_units: selected_unit = get_next_item(sorted_units, dest_unit_value) orders[selected_unit[2:5]] = '{} B'.format(selected_unit) sorted_units = [unit for unit in sorted_units if unit[2:5] != selected_unit[2:5]] # Returning return list(orders.values()) def generate_disband_orders(game, power_name, dest_unit_value): """ Generate disband orders :param game: An instance of `diplomacy.Game` :param power_name: The name of the power we are playing :param dest_unit_value: A dict with unit as key, and unit value as value :param factors: An instance of `Factors` :return: A list of orders :type factors: Factors :type game: diplomacy.Game """ power = game.get_power(power_name) nb_disbands = abs(len(power.centers) - len(power.units)) # Getting the list of units that can be disbanded # Sorted by increasing value sorted_units = sorted([unit for unit in power.units], key=lambda unit: dest_unit_value[unit]) # Generating orders orders = {} # {province: order} for _ in range(nb_disbands): selected_unit = get_next_item(sorted_units, dest_unit_value) orders[selected_unit[2:5]] = '{} D'.format(selected_unit) sorted_units = [unit for unit in sorted_units if unit != selected_unit] # Returning return list(orders.values()) def check_wasted_holds(game, orders, moving_units, dest_unit_value, factors): """ Replace unnecessary holds with a support if possible :param game: An instance of `diplomacy.Game` :param orders: A dictionary with the province as key and the order for the unit at that province as value :param dest_unit_value: A dict with unit as key, and unit value as value :param factors: An instance of `Factors` :return: An updated orders dictionary :type factors: Factors :type game: diplomacy.Game """ holding_units = [' '.join(order.split()[:2]) for order in orders.values() if order.split()[-1] == 'H'] for unit in holding_units: # Track the best destination we could support max_dest_value = 0. other_unit = None other_unit_dest = None # Destinations that the unit can move to for other_loc in [loc.upper() for loc in game.map.abut_list(unit[2:]) if game.map.abuts(unit[0], unit[2:], '-', loc.upper())]: # There is a moving unit there and it needs support # Recording unit if it has the best value unit_moving = [unit for unit, dest in moving_units.items() if dest[:3] == other_loc[:3]] unit_moving = None if not unit_moving else unit_moving[0] if unit_moving and factors.competition_map[other_loc[:3]] > 0: if dest_unit_value[unit_moving] > max_dest_value: max_dest_value = dest_unit_value[unit_moving] other_unit_dest = other_loc other_unit = unit_moving # Checking if there is a unit occupying the location, not moving and needing support unit_occupying = [' '.join(order.split()[:2]) for loc, order in orders.items() if loc == other_loc] unit_occupying = None if not unit_occupying else unit_occupying[0] if unit_occupying and unit_occupying not in moving_units and factors.competition_map[other_loc[:3]] > 1: if dest_unit_value[unit_occupying] > max_dest_value: max_dest_value = dest_unit_value[unit_occupying] other_unit_dest = other_loc other_unit = unit_occupying # If there is something worth supporting, changing the H to a S if max_dest_value > 0: if other_unit[2:5] == other_unit_dest[:3]: orders[unit[2:5]] = '{} S {}'.format(unit, other_unit) else: orders[unit[2:5]] = '{} S {} - {}'.format(unit, other_unit, other_unit_dest) # Returning orders return orders def get_next_item(sorted_units, dest_unit_value): """ Selects the next destination :param sorted_units: A sorted list of units (increasing or decreasing) :param dest_unit_value: A dict with unit as key, and unit value as value :return: The next item """ item_ix = 0 while True: # Last item if item_ix + 1 == len(sorted_units): break # Determining whether or not to pick the item curr_item_value = dest_unit_value[sorted_units[item_ix + 0]] next_item_value = dest_unit_value[sorted_units[item_ix + 1]] if curr_item_value == 0: next_chance = 0 else: next_chance = abs(curr_item_value - next_item_value) * ALTERNATIVE_DIFF_MODIFIER / curr_item_value # Selecting next move if PLAY_ALTERNATIVE > random.random() >= next_chance: item_ix += 1 continue # Otherwise, selecting the current move break # Returning the chosen item return sorted_units[item_ix]
PypiClean
/mcworldmanager-0.1.0.zip/mcworldmanager-0.1.0/README.md
# Minecraft World Checker [![Github Project Stars](https://img.shields.io/github/stars/nolte/minecraft-world-manager.svg?label=Stars&style=social)](https://github.com/nolte/minecraft-world-manager) [![Travis CI build status](https://travis-ci.org/nolte/minecraft-world-manager.svg?branch=master)](https://travis-ci.org/nolte/minecraft-world-manager) [![CircleCI build status](https://circleci.com/gh/nolte/minecraft-world-manager.svg?style=svg)](https://circleci.com/gh/nolte/minecraft-world-manager) [![Documentation Status](https://readthedocs.org/projects/minecraft-world-manager/badge/?version=latest)](https://minecraft-world-manager.readthedocs.io/en/stable/?badge=stable) [![Github Issue Tracking](https://img.shields.io/github/issues-raw/nolte/minecraft-world-manager.svg)](https://github.com/nolte/minecraft-world-manager) [![Github LatestRelease](https://img.shields.io/github/release/nolte/minecraft-world-manager.svg)](https://github.com/nolte/minecraft-world-manager) [![CodeFactor](https://www.codefactor.io/repository/github/nolte/minecraft-world-manager/badge)](https://www.codefactor.io/repository/github/nolte/minecraft-world-manager) [![microbadger image](https://images.microbadger.com/badges/image/nolte/minecraft-world-manager.svg)](https://microbadger.com/images/nolte/minecraft-world-manager) [![version](https://images.microbadger.com/badges/version/nolte/minecraft-world-manager.svg)](https://microbadger.com/images/nolte/minecraft-world-manager) [![docker stars](https://img.shields.io/docker/stars/nolte/minecraft-world-manager.svg?style=flat)](https://hub.docker.com/r/nolte/minecraft-world-manager) [![docker pulls](https://img.shields.io/docker/pulls/nolte/minecraft-world-manager.svg?style=flat)](https://hub.docker.com/r/nolte/minecraft-world-manager) [![pypi.org version](https://img.shields.io/pypi/v/mcworldmanager.svg?style=flat)](https://pypi.org/project/mcworldmanager) Insperated by [Fenixin/Minecraft-Region-Fixer](https://github.com/Fenixin/Minecraft-Region-Fixer), but a optimized commandline usage, and integration to your backup Process. For more informations take a look to the [Documentation](https://nolte.github.io/minecraft-world-manager/). ## Features - Scanning a single Region File - Scanning a list of given Worlds - Scanning all World Folders from a Minecraft Server Structure - Saving the Report as yaml File ## Supported Systems For executing you need Python 3.5 or later, or you use the Preconfigured Docker Container from [DockerHub](https://hub.docker.com/r/nolte/minecraft-world-manager). ## Example Calls ```bash mcworldmanager server ~/repos-ansible/minecraft-server-project-repos/docker_compose-world-maps/worldfolder/world ``` ```bash mcworldmanager worlds ~/repos-ansible/minecraft-server-project-repos/docker_compose-world-maps/worldfolder/world ``` ```bash mcworldmanager region ~/repos-ansible/minecraft-server-project-repos/docker_compose-world-maps/worldfolder/world_flat/region/r.1.1.mca ```
PypiClean
/aioredis-cluster-2.5.0.tar.gz/aioredis-cluster-2.5.0/src/aioredis_cluster/command_info/__init__.py
import dataclasses from typing import AnyStr, FrozenSet, List, NoReturn, Sequence from aioredis_cluster.util import ensure_str from .commands import ( BLOCKING_COMMANDS, COMMANDS, EVAL_COMMANDS, XREAD_COMMAND, XREADGROUP_COMMAND, ZUNION_COMMANDS, ZUNIONSTORE_COMMANDS, ) __all__ = ( "CommandsRegistry", "CommandInfo", "CommandInfoError", "InvalidCommandError", "extract_keys", "create_registry", "unknown_command", ) class CommandInfoError(Exception): pass class InvalidCommandError(CommandInfoError): pass def _raise_wrong_num_of_arguments(cmd: "CommandInfo") -> NoReturn: raise InvalidCommandError(f"Wrong number of arguments for {cmd.name!r} command") @dataclasses.dataclass class CommandInfo: name: str arity: int flags: FrozenSet[str] first_key_arg: int last_key_arg: int key_args_step: int _is_unknown: bool = False def is_readonly(self) -> bool: return "readonly" in self.flags def is_blocking(self) -> bool: return self.name in BLOCKING_COMMANDS def is_unknown(self) -> bool: return self._is_unknown class CommandsRegistry: def __init__(self, commands: Sequence[CommandInfo]) -> None: self._commands = {cmd.name: cmd for cmd in commands} def get_info(self, cmd: AnyStr) -> CommandInfo: cmd_name = ensure_str(cmd).upper() try: info = self._commands[cmd_name] except KeyError: return unknown_command(cmd_name) return info def size(self) -> int: return len(self._commands) def _extract_keys_general(info: CommandInfo, exec_command: Sequence[bytes]) -> List[bytes]: keys: List[bytes] = [] if info.first_key_arg <= 0: return [] if info.last_key_arg < 0: last_key_arg = len(exec_command) + info.last_key_arg else: last_key_arg = info.last_key_arg num_of_args = last_key_arg - info.first_key_arg + 1 if info.key_args_step > 1 and num_of_args % info.key_args_step != 0: _raise_wrong_num_of_arguments(info) for key_idx in range(info.first_key_arg, last_key_arg + 1, info.key_args_step): keys.append(exec_command[key_idx]) return keys def _extract_keys_eval(info: CommandInfo, exec_command: Sequence[bytes]) -> List[bytes]: abs_arity = abs(info.arity) num_of_keys = int(exec_command[abs_arity - 1]) keys = exec_command[abs_arity : abs_arity + num_of_keys] if len(keys) != num_of_keys: _raise_wrong_num_of_arguments(info) return list(keys) def _extract_keys_zunion( info: CommandInfo, exec_command: Sequence[bytes], store: bool, ) -> List[bytes]: keys: List[bytes] = [] if store: keys.append(exec_command[1]) # dest key + numkeys arguments num_of_keys = int(exec_command[2]) + 1 first_key_arg = 3 last_key_arg = first_key_arg + num_of_keys - 2 else: num_of_keys = int(exec_command[1]) first_key_arg = 2 last_key_arg = first_key_arg + num_of_keys - 1 if num_of_keys == 0: _raise_wrong_num_of_arguments(info) keys.extend(exec_command[first_key_arg : last_key_arg + 1]) if len(keys) != num_of_keys: _raise_wrong_num_of_arguments(info) return keys _STREAMS_OPTION = frozenset((b"STREAMS", b"streams")) def _extract_keys_xread( info: CommandInfo, exec_command: Sequence[bytes], read_group: bool, ) -> List[bytes]: exec_command_len = len(exec_command) first_key_arg = 0 if read_group: first_key_find_range = range(4, min(10, exec_command_len + 1)) else: first_key_find_range = range(1, min(6, exec_command_len + 1)) for idx in first_key_find_range: if exec_command[idx] in _STREAMS_OPTION: first_key_arg = idx + 1 break if first_key_arg == 0: _raise_wrong_num_of_arguments(info) num_of_stream_args = exec_command_len - first_key_arg if num_of_stream_args % 2 == 1: _raise_wrong_num_of_arguments(info) nom_of_keys = num_of_stream_args // 2 return list(exec_command[first_key_arg : first_key_arg + nom_of_keys]) def _extract_params_check(info: CommandInfo, exec_command: Sequence[bytes]) -> None: if len(exec_command) < 1: raise ValueError("Execute command is empty") cmd_name = ensure_str(exec_command[0]).upper() if info.name != cmd_name: raise ValueError(f"Incorrect info command: {info.name} != {cmd_name}") if info.arity > 0 and len(exec_command) > info.arity or len(exec_command) < abs(info.arity): _raise_wrong_num_of_arguments(info) def extract_keys(info: CommandInfo, exec_command: Sequence[bytes]) -> List[bytes]: _extract_params_check(info, exec_command) # special parsing for command if info.name in EVAL_COMMANDS: keys = _extract_keys_eval(info, exec_command) elif info.name in ZUNION_COMMANDS: keys = _extract_keys_zunion(info, exec_command, False) elif info.name in ZUNIONSTORE_COMMANDS: keys = _extract_keys_zunion(info, exec_command, True) elif info.name == XREAD_COMMAND: keys = _extract_keys_xread(info, exec_command, False) elif info.name == XREADGROUP_COMMAND: keys = _extract_keys_xread(info, exec_command, True) else: keys = _extract_keys_general(info, exec_command) return keys def create_registry(raw_commands: Sequence[List]) -> CommandsRegistry: cmds = [] for raw_cmd in raw_commands: first_key_arg = raw_cmd[3] last_key_arg = raw_cmd[4] key_args_step = raw_cmd[5] if first_key_arg >= 1 and (key_args_step == 0 or last_key_arg == 0): raise ValueError("Incorrect command") cmd = CommandInfo( name=raw_cmd[0].upper(), arity=raw_cmd[1], flags=frozenset(raw_cmd[2]), first_key_arg=first_key_arg, last_key_arg=last_key_arg, key_args_step=key_args_step, ) cmds.append(cmd) return CommandsRegistry(cmds) def unknown_command(name: str) -> CommandInfo: return CommandInfo( name=name, arity=0, flags=frozenset(), first_key_arg=0, last_key_arg=0, key_args_step=0, _is_unknown=True, ) default_registry = create_registry(COMMANDS)
PypiClean
/pyimfit-0.12.0.tar.gz/pyimfit-0.12.0/docs/index.rst
.. Pyimfit documentation master file, created by sphinx-quickstart on Mon Jan 28 14:24:16 2019. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. Documentation for PyImfit =================================== .. toctree:: :maxdepth: 3 :caption: OVERVIEW AND SAMPLE USAGE: installation sample_usage overview .. toctree:: :maxdepth: 2 :caption: USER DOCUMENTATION: defining_models psf_convolution fit_statistics_and_solvers bootstrap pyimfit_emcee pyimfit_bootstrap_BtoT acknowledgments .. toctree:: :maxdepth: 2 :caption: API DOCUMENTATION: api_ref/api_index `Imfit manual (PDF) <https://www.mpe.mpg.de/~erwin/resources/imfit/imfit_howto.pdf>`_
PypiClean
/seven_wxapp-1.0.14.7-py3-none-any.whl/seven_wxapp/handlers/base/seven_helper.py
from seven_framework import * import random import hashlib import datetime class SevenHelper: """ :description: 常用帮助类 :last_editors: HuangJingCan """ @classmethod def merge_dict_list(self, source_dict_list, source_key, merge_dict_list, merge_key, merge_columns_names): """ :description: 两个字典列表合并 :param source_dict_list:源字典表 :param source_key:源表用来关联的字段 :param merge_dict_list:需要合并的字典表 :param merge_key:需要合并的字典表用来关联的字段 :param merge_columns_names:需要合并的字典表中需要展示的字段 :return: :last_editors: HuangJingCan """ result = [] for source_dict in source_dict_list: info_list = [i for i in merge_dict_list if source_dict[source_key] != "" and i[merge_key] == source_dict[source_key]] if info_list: list_key = list(merge_columns_names.split(",")) source_dict = dict(source_dict, **dict.fromkeys(list_key)) for item in list_key: source_dict[item] = info_list[0].get(item) else: list1 = list(merge_columns_names.split(",")) source_dict = dict(source_dict, **dict.fromkeys(list1)) result.append(source_dict) return result @classmethod def get_now_int(self, hours=0): """ :description: 获取整形的时间 格式为yyyyMMddHHmmss,如2009年12月27日9点10分10秒表示为20091227091010 :return: :last_editors: HuangJianYi """ now_date = (datetime.datetime.now() + datetime.timedelta(hours=hours)) return int(int(now_date.strftime('%Y%m%d%H%M%S'))) @classmethod def get_now_day_int(self, hours=0): """ :description: 获取整形的天20200506 :return: :last_editors: HuangJianYi """ now_date = (datetime.datetime.now() + datetime.timedelta(hours=hours)) now_day = int(TimeHelper.datetime_to_format_time(now_date, "%Y%m%d")) return now_day @classmethod def get_now_month_int(self, hours=0): """ :description: 获取整形的月202005 :return: :last_editors: HuangJianYi """ now_date = (datetime.datetime.now() + datetime.timedelta(hours=hours)) now_month = int(TimeHelper.datetime_to_format_time(now_date, "%Y%m")) return now_month @classmethod def get_random(self, num, many): """ :description: 获取随机数 :param num:位数 :param many:个数 :return: str :last_editors: HuangJianYi """ result = "" for x in range(many): s = "" for i in range(num): # n=1 生成数字 n=2 生成字母 n = random.randint(1, 2) if n == 1: numb = random.randint(0, 9) s += str(numb) else: nn = random.randint(1, 2) cc = random.randint(1, 26) if nn == 1: numb = chr(64 + cc) s += numb else: numb = chr(96 + cc) s += numb result += s return result @classmethod def is_ip(self, ip_str): """ :description: 判断是否IP地址 :param ip_str: ip串 :return: :last_editors: HuangJianYi """ p = re.compile('^((25[0-5]|2[0-4]\d|[01]?\d\d?)\.){3}(25[0-5]|2[0-4]\d|[01]?\d\d?)$') if p.match(str(ip_str)): return True return False @classmethod def get_condition_by_str_list(self, field_name, str_list): """ :description: 根据str_list返回查询条件 :param field_name: 字段名 :param str_list: 字符串数组 :return: :last_editors: HuangJianYi """ if not str_list: return "" list_str = ','.join(["'%s'" % str(item) for item in str_list]) return f"{field_name} IN({list_str})" @classmethod def get_condition_by_int_list(self, field_name, int_list=None): ''' :description: 根据int_list返回查询条件 :param field_name:字段名 :param int_list:整形数组 :return: str :last_editors: HuangJianYi ''' if not int_list: return "" list_str = str(int_list).strip('[').strip(']') return f"{field_name} IN({list_str})" @classmethod def get_page_count(self, page_size, record_count): """ @description: 计算页数 @param page_size:页大小 @param record_count:总记录数 @return: 页数 @last_editors: HuangJingCan """ page_count = record_count / page_size + 1 if page_size == 0: page_count = 0 if record_count % page_size == 0: page_count = record_count / page_size page_count = int(page_count) return page_count @classmethod def create_order_id(self, ran=5): """ :description: 生成订单号 :param ran:随机数位数,默认5位随机数(0-5) :return: 25位的订单号 :last_editors: HuangJianYi """ ran_num = "" if ran == 1: ran_num = random.randint(0, 9) elif ran == 2: ran_num = random.randint(10, 99) elif ran == 3: ran_num = random.randint(100, 999) elif ran == 4: ran_num = random.randint(1000, 9999) elif ran == 5: ran_num = random.randint(10000, 99999) # cur_time = TimeHelper.get_now_format_time('%Y%m%d%H%M%S%f') cur_time = TimeHelper.get_now_timestamp(True) order_id = str(cur_time) + str(ran_num) return order_id
PypiClean
/convertbng-0.6.41.tar.gz/convertbng-0.6.41/release.py
import io import tarfile import zipfile import requests from subprocess import check_output from multiprocessing import Pool from urllib.parse import urlsplit path = "dist/" url = "https://github.com/urschrei/convertbng/releases/download/{tag}/convertbng-{tag}-{target}.{extension}" # get latest tag tag = check_output(["git", "describe", "--abbrev=0", "--tags"]).strip().decode() releases = [ {"tag": tag, "target": "x86_64-apple-darwin-cp27", "extension": "tar.gz"}, {"tag": tag, "target": "x86_64-apple-darwin-cp36", "extension": "tar.gz"}, {"tag": tag, "target": "x86_64-apple-darwin-cp37", "extension": "tar.gz"}, {"tag": tag, "target": "x86_64-apple-darwin-cp38", "extension": "tar.gz"}, {"tag": tag, "target": "x86_64-pc-windows-msvc-cp37", "extension": "zip"}, {"tag": tag, "target": "i686-pc-windows-msvc-cp37", "extension": "zip"}, {"tag": tag, "target": "i686-pc-windows-msvc-cp27", "extension": "zip"}, {"tag": tag, "target": "x86_64-unknown-linux-gnu", "extension": "tar.gz"}, {"tag": tag, "target": "x86_64-pc-windows-msvc-cp27", "extension": "zip"}, {"tag": tag, "target": "i686-pc-windows-msvc-cp27", "extension": "zip"}, {"tag": tag, "target": "x86_64-pc-windows-msvc-cp36", "extension": "zip"}, {"tag": tag, "target": "i686-pc-windows-msvc-cp36", "extension": "zip"}, ] def retrieve(url): sess = requests.Session() print("Getting %s" % urlsplit(url).path.split("/")[-1]) retrieved = sess.get(url, stream=True) # don't continue if something's wrong retrieved.raise_for_status() try: raw_zip = zipfile.ZipFile(io.BytesIO(retrieved.content)) raw_zip.extractall(path) except zipfile.BadZipfile: # it's a tar tar = tarfile.open(mode="r:gz", fileobj=io.BytesIO(retrieved.content)) tar.extractall(path) urls = (url.format(**release) for release in releases) # let's do this in parallel pool = Pool(processes=len(releases)) # we could use map, but it consumes the entire iterable (doesn't matter for small n) res = pool.map_async(retrieve, urls) # need these if we use _async pool.close() pool.join()
PypiClean
/criteo_api_marketingsolutions_sdk-2023.7.0.230831-py3-none-any.whl/criteo_api_marketingsolutions_v2023_07/model/nillable_decimal.py
import re # noqa: F401 import sys # noqa: F401 from criteo_api_marketingsolutions_v2023_07.model_utils import ( # noqa: F401 ApiTypeError, ModelComposed, ModelNormal, ModelSimple, cached_property, change_keys_js_to_python, convert_js_args_to_python_args, date, datetime, file_type, none_type, validate_get_composed_info, OpenApiModel ) from criteo_api_marketingsolutions_v2023_07.exceptions import ApiAttributeError class NillableDecimal(ModelNormal): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. Attributes: allowed_values (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict with a capitalized key describing the allowed value and an allowed value. These dicts store the allowed enum values. attribute_map (dict): The key is attribute name and the value is json key in definition. discriminator_value_class_map (dict): A dict to go from the discriminator variable value to the discriminator class name. validations (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict that stores validations for max_length, min_length, max_items, min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, inclusive_minimum, and regex. additional_properties_type (tuple): A tuple of classes accepted as additional properties values. """ allowed_values = { } validations = { } @cached_property def additional_properties_type(): """ This must be a method because a model may have properties that are of type self, this must run after the class is loaded """ return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501 _nullable = False @cached_property def openapi_types(): """ This must be a method because a model may have properties that are of type self, this must run after the class is loaded Returns openapi_types (dict): The key is attribute name and the value is attribute type. """ return { 'value': (float, none_type,), # noqa: E501 } @cached_property def discriminator(): return None attribute_map = { 'value': 'value', # noqa: E501 } read_only_vars = { } _composed_schemas = {} @classmethod @convert_js_args_to_python_args def _from_openapi_data(cls, value, *args, **kwargs): # noqa: E501 """NillableDecimal - a model defined in OpenAPI Args: value (float, none_type): Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) """ _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', True) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) self = super(OpenApiModel, cls).__new__(cls) if args: for arg in args: if isinstance(arg, dict): kwargs.update(arg) else: raise ApiTypeError( "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( args, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) self.value = value for var_name, var_value in kwargs.items(): if var_name not in self.attribute_map and \ self._configuration is not None and \ self._configuration.discard_unknown_keys and \ self.additional_properties_type is None: # discard variable. continue setattr(self, var_name, var_value) return self required_properties = set([ '_data_store', '_check_type', '_spec_property_naming', '_path_to_item', '_configuration', '_visited_composed_classes', ]) @convert_js_args_to_python_args def __init__(self, value, *args, **kwargs): # noqa: E501 """NillableDecimal - a model defined in OpenAPI Args: value (float, none_type): Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) """ _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) if args: for arg in args: if isinstance(arg, dict): kwargs.update(arg) else: raise ApiTypeError( "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( args, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) self.value = value for var_name, var_value in kwargs.items(): if var_name not in self.attribute_map and \ self._configuration is not None and \ self._configuration.discard_unknown_keys and \ self.additional_properties_type is None: # discard variable. continue setattr(self, var_name, var_value) if var_name in self.read_only_vars: raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate " f"class with read only attributes.")
PypiClean
/smartsheet-python-sdk-py39-2.105.1.10.tar.gz/smartsheet-python-sdk-py39-2.105.1.10/docs/_static/underscore.js
(function(){function q(a,c,d){if(a===c)return a!==0||1/a==1/c;if(a==null||c==null)return a===c;if(a._chain)a=a._wrapped;if(c._chain)c=c._wrapped;if(a.isEqual&&b.isFunction(a.isEqual))return a.isEqual(c);if(c.isEqual&&b.isFunction(c.isEqual))return c.isEqual(a);var e=l.call(a);if(e!=l.call(c))return false;switch(e){case "[object String]":return a==String(c);case "[object Number]":return a!=+a?c!=+c:a==0?1/a==1/c:a==+c;case "[object Date]":case "[object Boolean]":return+a==+c;case "[object RegExp]":return a.source== c.source&&a.global==c.global&&a.multiline==c.multiline&&a.ignoreCase==c.ignoreCase}if(typeof a!="object"||typeof c!="object")return false;for(var f=d.length;f--;)if(d[f]==a)return true;d.push(a);var f=0,g=true;if(e=="[object Array]"){if(f=a.length,g=f==c.length)for(;f--;)if(!(g=f in a==f in c&&q(a[f],c[f],d)))break}else{if("constructor"in a!="constructor"in c||a.constructor!=c.constructor)return false;for(var h in a)if(b.has(a,h)&&(f++,!(g=b.has(c,h)&&q(a[h],c[h],d))))break;if(g){for(h in c)if(b.has(c, h)&&!f--)break;g=!f}}d.pop();return g}var r=this,G=r._,n={},k=Array.prototype,o=Object.prototype,i=k.slice,H=k.unshift,l=o.toString,I=o.hasOwnProperty,w=k.forEach,x=k.map,y=k.reduce,z=k.reduceRight,A=k.filter,B=k.every,C=k.some,p=k.indexOf,D=k.lastIndexOf,o=Array.isArray,J=Object.keys,s=Function.prototype.bind,b=function(a){return new m(a)};if(typeof exports!=="undefined"){if(typeof module!=="undefined"&&module.exports)exports=module.exports=b;exports._=b}else r._=b;b.VERSION="1.3.1";var j=b.each= b.forEach=function(a,c,d){if(a!=null)if(w&&a.forEach===w)a.forEach(c,d);else if(a.length===+a.length)for(var e=0,f=a.length;e<f;e++){if(e in a&&c.call(d,a[e],e,a)===n)break}else for(e in a)if(b.has(a,e)&&c.call(d,a[e],e,a)===n)break};b.map=b.collect=function(a,c,b){var e=[];if(a==null)return e;if(x&&a.map===x)return a.map(c,b);j(a,function(a,g,h){e[e.length]=c.call(b,a,g,h)});if(a.length===+a.length)e.length=a.length;return e};b.reduce=b.foldl=b.inject=function(a,c,d,e){var f=arguments.length>2;a== null&&(a=[]);if(y&&a.reduce===y)return e&&(c=b.bind(c,e)),f?a.reduce(c,d):a.reduce(c);j(a,function(a,b,i){f?d=c.call(e,d,a,b,i):(d=a,f=true)});if(!f)throw new TypeError("Reduce of empty array with no initial value");return d};b.reduceRight=b.foldr=function(a,c,d,e){var f=arguments.length>2;a==null&&(a=[]);if(z&&a.reduceRight===z)return e&&(c=b.bind(c,e)),f?a.reduceRight(c,d):a.reduceRight(c);var g=b.toArray(a).reverse();e&&!f&&(c=b.bind(c,e));return f?b.reduce(g,c,d,e):b.reduce(g,c)};b.find=b.detect= function(a,c,b){var e;E(a,function(a,g,h){if(c.call(b,a,g,h))return e=a,true});return e};b.filter=b.select=function(a,c,b){var e=[];if(a==null)return e;if(A&&a.filter===A)return a.filter(c,b);j(a,function(a,g,h){c.call(b,a,g,h)&&(e[e.length]=a)});return e};b.reject=function(a,c,b){var e=[];if(a==null)return e;j(a,function(a,g,h){c.call(b,a,g,h)||(e[e.length]=a)});return e};b.every=b.all=function(a,c,b){var e=true;if(a==null)return e;if(B&&a.every===B)return a.every(c,b);j(a,function(a,g,h){if(!(e= e&&c.call(b,a,g,h)))return n});return e};var E=b.some=b.any=function(a,c,d){c||(c=b.identity);var e=false;if(a==null)return e;if(C&&a.some===C)return a.some(c,d);j(a,function(a,b,h){if(e||(e=c.call(d,a,b,h)))return n});return!!e};b.include=b.contains=function(a,c){var b=false;if(a==null)return b;return p&&a.indexOf===p?a.indexOf(c)!=-1:b=E(a,function(a){return a===c})};b.invoke=function(a,c){var d=i.call(arguments,2);return b.map(a,function(a){return(b.isFunction(c)?c||a:a[c]).apply(a,d)})};b.pluck= function(a,c){return b.map(a,function(a){return a[c]})};b.max=function(a,c,d){if(!c&&b.isArray(a))return Math.max.apply(Math,a);if(!c&&b.isEmpty(a))return-Infinity;var e={computed:-Infinity};j(a,function(a,b,h){b=c?c.call(d,a,b,h):a;b>=e.computed&&(e={value:a,computed:b})});return e.value};b.min=function(a,c,d){if(!c&&b.isArray(a))return Math.min.apply(Math,a);if(!c&&b.isEmpty(a))return Infinity;var e={computed:Infinity};j(a,function(a,b,h){b=c?c.call(d,a,b,h):a;b<e.computed&&(e={value:a,computed:b})}); return e.value};b.shuffle=function(a){var b=[],d;j(a,function(a,f){f==0?b[0]=a:(d=Math.floor(Math.random()*(f+1)),b[f]=b[d],b[d]=a)});return b};b.sortBy=function(a,c,d){return b.pluck(b.map(a,function(a,b,g){return{value:a,criteria:c.call(d,a,b,g)}}).sort(function(a,b){var c=a.criteria,d=b.criteria;return c<d?-1:c>d?1:0}),"value")};b.groupBy=function(a,c){var d={},e=b.isFunction(c)?c:function(a){return a[c]};j(a,function(a,b){var c=e(a,b);(d[c]||(d[c]=[])).push(a)});return d};b.sortedIndex=function(a, c,d){d||(d=b.identity);for(var e=0,f=a.length;e<f;){var g=e+f>>1;d(a[g])<d(c)?e=g+1:f=g}return e};b.toArray=function(a){return!a?[]:a.toArray?a.toArray():b.isArray(a)?i.call(a):b.isArguments(a)?i.call(a):b.values(a)};b.size=function(a){return b.toArray(a).length};b.first=b.head=function(a,b,d){return b!=null&&!d?i.call(a,0,b):a[0]};b.initial=function(a,b,d){return i.call(a,0,a.length-(b==null||d?1:b))};b.last=function(a,b,d){return b!=null&&!d?i.call(a,Math.max(a.length-b,0)):a[a.length-1]};b.rest= b.tail=function(a,b,d){return i.call(a,b==null||d?1:b)};b.compact=function(a){return b.filter(a,function(a){return!!a})};b.flatten=function(a,c){return b.reduce(a,function(a,e){if(b.isArray(e))return a.concat(c?e:b.flatten(e));a[a.length]=e;return a},[])};b.without=function(a){return b.difference(a,i.call(arguments,1))};b.uniq=b.unique=function(a,c,d){var d=d?b.map(a,d):a,e=[];b.reduce(d,function(d,g,h){if(0==h||(c===true?b.last(d)!=g:!b.include(d,g)))d[d.length]=g,e[e.length]=a[h];return d},[]); return e};b.union=function(){return b.uniq(b.flatten(arguments,true))};b.intersection=b.intersect=function(a){var c=i.call(arguments,1);return b.filter(b.uniq(a),function(a){return b.every(c,function(c){return b.indexOf(c,a)>=0})})};b.difference=function(a){var c=b.flatten(i.call(arguments,1));return b.filter(a,function(a){return!b.include(c,a)})};b.zip=function(){for(var a=i.call(arguments),c=b.max(b.pluck(a,"length")),d=Array(c),e=0;e<c;e++)d[e]=b.pluck(a,""+e);return d};b.indexOf=function(a,c, d){if(a==null)return-1;var e;if(d)return d=b.sortedIndex(a,c),a[d]===c?d:-1;if(p&&a.indexOf===p)return a.indexOf(c);for(d=0,e=a.length;d<e;d++)if(d in a&&a[d]===c)return d;return-1};b.lastIndexOf=function(a,b){if(a==null)return-1;if(D&&a.lastIndexOf===D)return a.lastIndexOf(b);for(var d=a.length;d--;)if(d in a&&a[d]===b)return d;return-1};b.range=function(a,b,d){arguments.length<=1&&(b=a||0,a=0);for(var d=arguments[2]||1,e=Math.max(Math.ceil((b-a)/d),0),f=0,g=Array(e);f<e;)g[f++]=a,a+=d;return g}; var F=function(){};b.bind=function(a,c){var d,e;if(a.bind===s&&s)return s.apply(a,i.call(arguments,1));if(!b.isFunction(a))throw new TypeError;e=i.call(arguments,2);return d=function(){if(!(this instanceof d))return a.apply(c,e.concat(i.call(arguments)));F.prototype=a.prototype;var b=new F,g=a.apply(b,e.concat(i.call(arguments)));return Object(g)===g?g:b}};b.bindAll=function(a){var c=i.call(arguments,1);c.length==0&&(c=b.functions(a));j(c,function(c){a[c]=b.bind(a[c],a)});return a};b.memoize=function(a, c){var d={};c||(c=b.identity);return function(){var e=c.apply(this,arguments);return b.has(d,e)?d[e]:d[e]=a.apply(this,arguments)}};b.delay=function(a,b){var d=i.call(arguments,2);return setTimeout(function(){return a.apply(a,d)},b)};b.defer=function(a){return b.delay.apply(b,[a,1].concat(i.call(arguments,1)))};b.throttle=function(a,c){var d,e,f,g,h,i=b.debounce(function(){h=g=false},c);return function(){d=this;e=arguments;var b;f||(f=setTimeout(function(){f=null;h&&a.apply(d,e);i()},c));g?h=true: a.apply(d,e);i();g=true}};b.debounce=function(a,b){var d;return function(){var e=this,f=arguments;clearTimeout(d);d=setTimeout(function(){d=null;a.apply(e,f)},b)}};b.once=function(a){var b=false,d;return function(){if(b)return d;b=true;return d=a.apply(this,arguments)}};b.wrap=function(a,b){return function(){var d=[a].concat(i.call(arguments,0));return b.apply(this,d)}};b.compose=function(){var a=arguments;return function(){for(var b=arguments,d=a.length-1;d>=0;d--)b=[a[d].apply(this,b)];return b[0]}}; b.after=function(a,b){return a<=0?b():function(){if(--a<1)return b.apply(this,arguments)}};b.keys=J||function(a){if(a!==Object(a))throw new TypeError("Invalid object");var c=[],d;for(d in a)b.has(a,d)&&(c[c.length]=d);return c};b.values=function(a){return b.map(a,b.identity)};b.functions=b.methods=function(a){var c=[],d;for(d in a)b.isFunction(a[d])&&c.push(d);return c.sort()};b.extend=function(a){j(i.call(arguments,1),function(b){for(var d in b)a[d]=b[d]});return a};b.defaults=function(a){j(i.call(arguments, 1),function(b){for(var d in b)a[d]==null&&(a[d]=b[d])});return a};b.clone=function(a){return!b.isObject(a)?a:b.isArray(a)?a.slice():b.extend({},a)};b.tap=function(a,b){b(a);return a};b.isEqual=function(a,b){return q(a,b,[])};b.isEmpty=function(a){if(b.isArray(a)||b.isString(a))return a.length===0;for(var c in a)if(b.has(a,c))return false;return true};b.isElement=function(a){return!!(a&&a.nodeType==1)};b.isArray=o||function(a){return l.call(a)=="[object Array]"};b.isObject=function(a){return a===Object(a)}; b.isArguments=function(a){return l.call(a)=="[object Arguments]"};if(!b.isArguments(arguments))b.isArguments=function(a){return!(!a||!b.has(a,"callee"))};b.isFunction=function(a){return l.call(a)=="[object Function]"};b.isString=function(a){return l.call(a)=="[object String]"};b.isNumber=function(a){return l.call(a)=="[object Number]"};b.isNaN=function(a){return a!==a};b.isBoolean=function(a){return a===true||a===false||l.call(a)=="[object Boolean]"};b.isDate=function(a){return l.call(a)=="[object Date]"}; b.isRegExp=function(a){return l.call(a)=="[object RegExp]"};b.isNull=function(a){return a===null};b.isUndefined=function(a){return a===void 0};b.has=function(a,b){return I.call(a,b)};b.noConflict=function(){r._=G;return this};b.identity=function(a){return a};b.times=function(a,b,d){for(var e=0;e<a;e++)b.call(d,e)};b.escape=function(a){return(""+a).replace(/&/g,"&amp;").replace(/</g,"&lt;").replace(/>/g,"&gt;").replace(/"/g,"&quot;").replace(/'/g,"&#x27;").replace(/\//g,"&#x2F;")};b.mixin=function(a){j(b.functions(a), function(c){K(c,b[c]=a[c])})};var L=0;b.uniqueId=function(a){var b=L++;return a?a+b:b};b.templateSettings={evaluate:/<%([\s\S]+?)%>/g,interpolate:/<%=([\s\S]+?)%>/g,escape:/<%-([\s\S]+?)%>/g};var t=/.^/,u=function(a){return a.replace(/\\\\/g,"\\").replace(/\\'/g,"'")};b.template=function(a,c){var d=b.templateSettings,d="var __p=[],print=function(){__p.push.apply(__p,arguments);};with(obj||{}){__p.push('"+a.replace(/\\/g,"\\\\").replace(/'/g,"\\'").replace(d.escape||t,function(a,b){return"',_.escape("+ u(b)+"),'"}).replace(d.interpolate||t,function(a,b){return"',"+u(b)+",'"}).replace(d.evaluate||t,function(a,b){return"');"+u(b).replace(/[\r\n\t]/g," ")+";__p.push('"}).replace(/\r/g,"\\r").replace(/\n/g,"\\n").replace(/\t/g,"\\t")+"');}return __p.join('');",e=new Function("obj","_",d);return c?e(c,b):function(a){return e.call(this,a,b)}};b.chain=function(a){return b(a).chain()};var m=function(a){this._wrapped=a};b.prototype=m.prototype;var v=function(a,c){return c?b(a).chain():a},K=function(a,c){m.prototype[a]= function(){var a=i.call(arguments);H.call(a,this._wrapped);return v(c.apply(b,a),this._chain)}};b.mixin(b);j("pop,push,reverse,shift,sort,splice,unshift".split(","),function(a){var b=k[a];m.prototype[a]=function(){var d=this._wrapped;b.apply(d,arguments);var e=d.length;(a=="shift"||a=="splice")&&e===0&&delete d[0];return v(d,this._chain)}});j(["concat","join","slice"],function(a){var b=k[a];m.prototype[a]=function(){return v(b.apply(this._wrapped,arguments),this._chain)}});m.prototype.chain=function(){this._chain= true;return this};m.prototype.value=function(){return this._wrapped}}).call(this);
PypiClean
/LitReview-0.6989ev.tar.gz/LitReview-0.6989ev/src/webapp/router.py
from flask import request, render_template, redirect, url_for, flash from flask.app import Flask from flask_login import login_required, current_user from queries.associate import link_paper, get_ref_summary, \ check_form_validity_and_convert_to_tasks from queries.misc import get_reftemps, get_recent_history, \ find_genes_in_abstract from queries.move_ref import move_reftemp_to_refbad, MoveRefException from webapp.forms import LoginForm from webapp.litreview_logger import log_it_info, log_it from webapp.login_handler import confirm_login_lit_review_user, \ logout_lit_review_user, login_lit_review_user, setup_app, LoginException, \ LogoutException, check_for_other_users import json import logging app = Flask(__name__) setup_app(app) model = None #Configure logger logging.basicConfig(filename='/www/logs/litreview_log', format='%(asctime)s %(levelname)s: %(message)s', level=logging.DEBUG) def setup_app(): setup_app(app) @app.route("/") def index(): labels = [] data = [] try: if not current_user.name == 'Anonymous': recent_history = model.execute(get_recent_history(), current_user.name) sorted_history = recent_history.items() sorted_history.sort() for k, v in sorted_history: labels.append(k.strftime("%m/%d")) data.append([v.refbad_count, v.ref_count]) except Exception as e: flash(str(e), 'error') return render_template("index.html", history_labels=labels, history_data=data) @app.route("/reference", methods=['GET', 'POST']) @login_required def reference(): refs=[] num_of_refs=0 try: check_for_other_users(current_user.name) refs = model.execute(get_reftemps(), current_user.name) num_of_refs = len(refs) except Exception as e: flash(str(e), 'error') return render_template('literature_review.html', ref_list=refs, ref_count=num_of_refs, user=current_user.name) @app.route("/reference/remove_multiple/<pmids>", methods=['GET', 'POST']) @login_required def remove_multiple(pmids): log_it_info('remove_multiple', 'BEGIN', str(pmids)) try: check_for_other_users(current_user.name) if request.method == "POST": to_be_removed = pmids.split('_') to_be_removed.remove('') for pmid in to_be_removed: moved = model.execute(move_reftemp_to_refbad(pmid), current_user.name, commit=True) if not moved: raise MoveRefException('An error occurred when deleting the reference for pmid=" + pmid + " from the database.') #Reference deleted flash("References for pmids= " + str(to_be_removed) + " have been removed from the database.", 'success') log_it_info('remove_multiple', 'SUCCESS') except Exception as e: flash(e.message, 'error') log_it_info('remove_multiple', 'FAILURE') logging.error(e.message) return redirect(request.args.get("next") or url_for("reference")) @app.route("/reference/extract_genes/<pmid>", methods=['GET']) def extract_genes(pmid): log_it_info('extract_genes', 'BEGIN', str(pmid)) try: check_for_other_users(current_user.name) words = model.execute(find_genes_in_abstract(pmid), current_user.name) feature_name_words = words['features'].keys() alias_name_words = words['aliases'].keys() message = 'No genes found.' feature_message = words['feature_message'] alias_message = words['alias_message'] if feature_message != '' and alias_message != '': message = feature_message + ', ' + alias_message elif feature_message != '': message = feature_message elif alias_message != '': message = alias_message log_it_info('extract_genes', 'SUCCESS') return_value = json.dumps({'message':message, 'highlight_red':list(alias_name_words), 'highlight_blue':list(feature_name_words)}) return return_value except Exception as e: flash(e.message, 'error') log_it_info('extract_genes', 'FAILURE') logging.error(e.message) return 'Error.' @app.route("/reference/delete/<pmid>", methods=['GET', 'POST']) @login_required def discard_ref(pmid): log_it_info('discard_ref', 'BEGIN', str(pmid)) response = "" try: check_for_other_users(current_user.name) #if request.method == "POST": moved = model.execute(move_reftemp_to_refbad(pmid), current_user.name, commit=True) if not moved: raise MoveRefException('An error occurred when deleting the reference for pmid=" + pmid + " from the database.') #Reference deleted response = "Reference for pmid=" + pmid + " has been removed from the database." log_it_info('discard_ref', 'SUCCESS') except Exception as e: response = "Error:<br>" + e.message log_it_info('discard_ref', 'FAILURE') logging.error(e.message) return response @app.route("/reference/link/<pmid>", methods=['GET', 'POST']) @login_required def link_ref(pmid): log_it_info('link_ref', 'BEGIN', str(pmid)) response = "" try: check_for_other_users(current_user.name) log_it('check_for_other_users', 'SUCCESS') #if request.method == "POST": tasks = check_form_validity_and_convert_to_tasks(request.form) log_it('check_form_validity_and_convert_to_tasks', 'SUCCESS', str(tasks)) model.execute(link_paper(pmid, tasks), current_user.name, commit=True) log_it('link_paper', 'SUCCESS') #Link successful summary = model.execute(get_ref_summary(pmid), current_user.name) response = summary log_it_info('link_ref', 'SUCCESS') except Exception as e: response = "Error:<br>" + e.message; log_it_info('link_ref', 'FAILURE') logging.error(e.message) return response @app.route("/login", methods=["GET", "POST"]) def login(): log_it_info('login', 'BEGIN') form = LoginForm(request.form) try: if request.method == "POST" and form.validate(): username = form.username.data.lower() password = form.password.data remember = False check_for_other_users(username) logged_in = login_lit_review_user(username, password, model, remember) if not logged_in: raise LoginException('Login unsuccessful. Reason unknown.') #Login successful. flash("Logged in!", 'login') current_user.login() log_it_info('login', 'SUCCESS') return redirect(request.args.get("next") or url_for("index")) except Exception as e: flash(e.message, 'error') log_it_info('login', 'FAILURE') logging.error(e.message) return render_template("login.html", form=form) @app.route("/reauth", methods=["GET", "POST"]) @login_required def reauth(): try: if request.method == "POST": output = confirm_login_lit_review_user() flash(output, 'login') return redirect(url_for("index")) except Exception as e: flash(e.message, 'error') return render_template("reauth.html") @app.route("/logout") def logout(): log_it_info('logout', 'BEGIN') try: current_user.logout() logged_out = logout_lit_review_user() if not logged_out: raise LogoutException('Logout unsuccessful. Reason unknown.') #Logout successful flash('Logged out.', 'login') log_it_info('logout', 'SUCCESS') except Exception as e: flash(e.message, 'error') log_it_info('logout', 'FAILURE') logging.error(e.message) return redirect(url_for("index"))
PypiClean
/boot-synth-1.2.0.tar.gz/boot-synth-1.2.0/synth/projects_master/nginx_router/frontend/react/node_modules/process/browser.js
var process = module.exports = {}; // cached from whatever global is present so that test runners that stub it // don't break things. But we need to wrap it in a try catch in case it is // wrapped in strict mode code which doesn't define any globals. It's inside a // function because try/catches deoptimize in certain engines. var cachedSetTimeout; var cachedClearTimeout; function defaultSetTimout() { throw new Error('setTimeout has not been defined'); } function defaultClearTimeout () { throw new Error('clearTimeout has not been defined'); } (function () { try { if (typeof setTimeout === 'function') { cachedSetTimeout = setTimeout; } else { cachedSetTimeout = defaultSetTimout; } } catch (e) { cachedSetTimeout = defaultSetTimout; } try { if (typeof clearTimeout === 'function') { cachedClearTimeout = clearTimeout; } else { cachedClearTimeout = defaultClearTimeout; } } catch (e) { cachedClearTimeout = defaultClearTimeout; } } ()) function runTimeout(fun) { if (cachedSetTimeout === setTimeout) { //normal enviroments in sane situations return setTimeout(fun, 0); } // if setTimeout wasn't available but was latter defined if ((cachedSetTimeout === defaultSetTimout || !cachedSetTimeout) && setTimeout) { cachedSetTimeout = setTimeout; return setTimeout(fun, 0); } try { // when when somebody has screwed with setTimeout but no I.E. maddness return cachedSetTimeout(fun, 0); } catch(e){ try { // When we are in I.E. but the script has been evaled so I.E. doesn't trust the global object when called normally return cachedSetTimeout.call(null, fun, 0); } catch(e){ // same as above but when it's a version of I.E. that must have the global object for 'this', hopfully our context correct otherwise it will throw a global error return cachedSetTimeout.call(this, fun, 0); } } } function runClearTimeout(marker) { if (cachedClearTimeout === clearTimeout) { //normal enviroments in sane situations return clearTimeout(marker); } // if clearTimeout wasn't available but was latter defined if ((cachedClearTimeout === defaultClearTimeout || !cachedClearTimeout) && clearTimeout) { cachedClearTimeout = clearTimeout; return clearTimeout(marker); } try { // when when somebody has screwed with setTimeout but no I.E. maddness return cachedClearTimeout(marker); } catch (e){ try { // When we are in I.E. but the script has been evaled so I.E. doesn't trust the global object when called normally return cachedClearTimeout.call(null, marker); } catch (e){ // same as above but when it's a version of I.E. that must have the global object for 'this', hopfully our context correct otherwise it will throw a global error. // Some versions of I.E. have different rules for clearTimeout vs setTimeout return cachedClearTimeout.call(this, marker); } } } var queue = []; var draining = false; var currentQueue; var queueIndex = -1; function cleanUpNextTick() { if (!draining || !currentQueue) { return; } draining = false; if (currentQueue.length) { queue = currentQueue.concat(queue); } else { queueIndex = -1; } if (queue.length) { drainQueue(); } } function drainQueue() { if (draining) { return; } var timeout = runTimeout(cleanUpNextTick); draining = true; var len = queue.length; while(len) { currentQueue = queue; queue = []; while (++queueIndex < len) { if (currentQueue) { currentQueue[queueIndex].run(); } } queueIndex = -1; len = queue.length; } currentQueue = null; draining = false; runClearTimeout(timeout); } process.nextTick = function (fun) { var args = new Array(arguments.length - 1); if (arguments.length > 1) { for (var i = 1; i < arguments.length; i++) { args[i - 1] = arguments[i]; } } queue.push(new Item(fun, args)); if (queue.length === 1 && !draining) { runTimeout(drainQueue); } }; // v8 likes predictible objects function Item(fun, array) { this.fun = fun; this.array = array; } Item.prototype.run = function () { this.fun.apply(null, this.array); }; process.title = 'browser'; process.browser = true; process.env = {}; process.argv = []; process.version = ''; // empty string to avoid regexp issues process.versions = {}; function noop() {} process.on = noop; process.addListener = noop; process.once = noop; process.off = noop; process.removeListener = noop; process.removeAllListeners = noop; process.emit = noop; process.prependListener = noop; process.prependOnceListener = noop; process.listeners = function (name) { return [] } process.binding = function (name) { throw new Error('process.binding is not supported'); }; process.cwd = function () { return '/' }; process.chdir = function (dir) { throw new Error('process.chdir is not supported'); }; process.umask = function() { return 0; };
PypiClean
/pulumi_azure_native-2.5.1a1693590910.tar.gz/pulumi_azure_native-2.5.1a1693590910/pulumi_azure_native/machinelearningservices/v20230401/get_registry_model_container.py
import copy import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from ... import _utilities from . import outputs __all__ = [ 'GetRegistryModelContainerResult', 'AwaitableGetRegistryModelContainerResult', 'get_registry_model_container', 'get_registry_model_container_output', ] @pulumi.output_type class GetRegistryModelContainerResult: """ Azure Resource Manager resource envelope. """ def __init__(__self__, id=None, model_container_properties=None, name=None, system_data=None, type=None): if id and not isinstance(id, str): raise TypeError("Expected argument 'id' to be a str") pulumi.set(__self__, "id", id) if model_container_properties and not isinstance(model_container_properties, dict): raise TypeError("Expected argument 'model_container_properties' to be a dict") pulumi.set(__self__, "model_container_properties", model_container_properties) if name and not isinstance(name, str): raise TypeError("Expected argument 'name' to be a str") pulumi.set(__self__, "name", name) if system_data and not isinstance(system_data, dict): raise TypeError("Expected argument 'system_data' to be a dict") pulumi.set(__self__, "system_data", system_data) if type and not isinstance(type, str): raise TypeError("Expected argument 'type' to be a str") pulumi.set(__self__, "type", type) @property @pulumi.getter def id(self) -> str: """ Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} """ return pulumi.get(self, "id") @property @pulumi.getter(name="modelContainerProperties") def model_container_properties(self) -> 'outputs.ModelContainerResponse': """ [Required] Additional attributes of the entity. """ return pulumi.get(self, "model_container_properties") @property @pulumi.getter def name(self) -> str: """ The name of the resource """ return pulumi.get(self, "name") @property @pulumi.getter(name="systemData") def system_data(self) -> 'outputs.SystemDataResponse': """ Azure Resource Manager metadata containing createdBy and modifiedBy information. """ return pulumi.get(self, "system_data") @property @pulumi.getter def type(self) -> str: """ The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" """ return pulumi.get(self, "type") class AwaitableGetRegistryModelContainerResult(GetRegistryModelContainerResult): # pylint: disable=using-constant-test def __await__(self): if False: yield self return GetRegistryModelContainerResult( id=self.id, model_container_properties=self.model_container_properties, name=self.name, system_data=self.system_data, type=self.type) def get_registry_model_container(model_name: Optional[str] = None, registry_name: Optional[str] = None, resource_group_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetRegistryModelContainerResult: """ Azure Resource Manager resource envelope. :param str model_name: Container name. This is case-sensitive. :param str registry_name: Name of Azure Machine Learning registry. This is case-insensitive :param str resource_group_name: The name of the resource group. The name is case insensitive. """ __args__ = dict() __args__['modelName'] = model_name __args__['registryName'] = registry_name __args__['resourceGroupName'] = resource_group_name opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts) __ret__ = pulumi.runtime.invoke('azure-native:machinelearningservices/v20230401:getRegistryModelContainer', __args__, opts=opts, typ=GetRegistryModelContainerResult).value return AwaitableGetRegistryModelContainerResult( id=pulumi.get(__ret__, 'id'), model_container_properties=pulumi.get(__ret__, 'model_container_properties'), name=pulumi.get(__ret__, 'name'), system_data=pulumi.get(__ret__, 'system_data'), type=pulumi.get(__ret__, 'type')) @_utilities.lift_output_func(get_registry_model_container) def get_registry_model_container_output(model_name: Optional[pulumi.Input[str]] = None, registry_name: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetRegistryModelContainerResult]: """ Azure Resource Manager resource envelope. :param str model_name: Container name. This is case-sensitive. :param str registry_name: Name of Azure Machine Learning registry. This is case-insensitive :param str resource_group_name: The name of the resource group. The name is case insensitive. """ ...
PypiClean
/gevent-socketio-hartwork-0.3.6.post201803091952.tar.gz/gevent-socketio-hartwork-0.3.6.post201803091952/examples/pyramid_backbone_redis_chat_persistence/chatter4/static/backbone.js
(function(){ // Initial Setup // ------------- // Save a reference to the global object (`window` in the browser, `global` // on the server). var root = this; // Save the previous value of the `Backbone` variable, so that it can be // restored later on, if `noConflict` is used. var previousBackbone = root.Backbone; // Create a local reference to slice/splice. var slice = Array.prototype.slice; var splice = Array.prototype.splice; // The top-level namespace. All public Backbone classes and modules will // be attached to this. Exported for both CommonJS and the browser. var Backbone; if (typeof exports !== 'undefined') { Backbone = exports; } else { Backbone = root.Backbone = {}; } // Current version of the library. Keep in sync with `package.json`. Backbone.VERSION = '0.9.0'; // Require Underscore, if we're on the server, and it's not already present. var _ = root._; if (!_ && (typeof require !== 'undefined')) _ = require('underscore'); // For Backbone's purposes, jQuery, Zepto, or Ender owns the `$` variable. var $ = root.jQuery || root.Zepto || root.ender; // Runs Backbone.js in *noConflict* mode, returning the `Backbone` variable // to its previous owner. Returns a reference to this Backbone object. Backbone.noConflict = function() { root.Backbone = previousBackbone; return this; }; // Turn on `emulateHTTP` to support legacy HTTP servers. Setting this option // will fake `"PUT"` and `"DELETE"` requests via the `_method` parameter and // set a `X-Http-Method-Override` header. Backbone.emulateHTTP = false; // Turn on `emulateJSON` to support legacy servers that can't deal with direct // `application/json` requests ... will encode the body as // `application/x-www-form-urlencoded` instead and will send the model in a // form param named `model`. Backbone.emulateJSON = false; // Backbone.Events // ----------------- // A module that can be mixed in to *any object* in order to provide it with // custom events. You may bind with `on` or remove with `off` callback functions // to an event; trigger`-ing an event fires all callbacks in succession. // // var object = {}; // _.extend(object, Backbone.Events); // object.on('expand', function(){ alert('expanded'); }); // object.trigger('expand'); // Backbone.Events = { // Bind an event, specified by a string name, `ev`, to a `callback` // function. Passing `"all"` will bind the callback to all events fired. on: function(events, callback, context) { var ev; events = events.split(/\s+/); var calls = this._callbacks || (this._callbacks = {}); while (ev = events.shift()) { // Create an immutable callback list, allowing traversal during // modification. The tail is an empty object that will always be used // as the next node. var list = calls[ev] || (calls[ev] = {}); var tail = list.tail || (list.tail = list.next = {}); tail.callback = callback; tail.context = context; list.tail = tail.next = {}; } return this; }, // Remove one or many callbacks. If `context` is null, removes all callbacks // with that function. If `callback` is null, removes all callbacks for the // event. If `ev` is null, removes all bound callbacks for all events. off: function(events, callback, context) { var ev, calls, node; if (!events) { delete this._callbacks; } else if (calls = this._callbacks) { events = events.split(/\s+/); while (ev = events.shift()) { node = calls[ev]; delete calls[ev]; if (!callback || !node) continue; // Create a new list, omitting the indicated event/context pairs. while ((node = node.next) && node.next) { if (node.callback === callback && (!context || node.context === context)) continue; this.on(ev, node.callback, node.context); } } } return this; }, // Trigger an event, firing all bound callbacks. Callbacks are passed the // same arguments as `trigger` is, apart from the event name. // Listening for `"all"` passes the true event name as the first argument. trigger: function(events) { var event, node, calls, tail, args, all, rest; if (!(calls = this._callbacks)) return this; all = calls['all']; (events = events.split(/\s+/)).push(null); // Save references to the current heads & tails. while (event = events.shift()) { if (all) events.push({next: all.next, tail: all.tail, event: event}); if (!(node = calls[event])) continue; events.push({next: node.next, tail: node.tail}); } // Traverse each list, stopping when the saved tail is reached. rest = slice.call(arguments, 1); while (node = events.pop()) { tail = node.tail; args = node.event ? [node.event].concat(rest) : rest; while ((node = node.next) !== tail) { node.callback.apply(node.context || this, args); } } return this; } }; // Aliases for backwards compatibility. Backbone.Events.bind = Backbone.Events.on; Backbone.Events.unbind = Backbone.Events.off; // Backbone.Model // -------------- // Create a new model, with defined attributes. A client id (`cid`) // is automatically generated and assigned for you. Backbone.Model = function(attributes, options) { var defaults; attributes || (attributes = {}); if (options && options.parse) attributes = this.parse(attributes); if (defaults = getValue(this, 'defaults')) { attributes = _.extend({}, defaults, attributes); } if (options && options.collection) this.collection = options.collection; this.attributes = {}; this._escapedAttributes = {}; this.cid = _.uniqueId('c'); this._changed = {}; if (!this.set(attributes, {silent: true})) { throw new Error("Can't create an invalid model"); } this._changed = {}; this._previousAttributes = _.clone(this.attributes); this.initialize.apply(this, arguments); }; // Attach all inheritable methods to the Model prototype. _.extend(Backbone.Model.prototype, Backbone.Events, { // The default name for the JSON `id` attribute is `"id"`. MongoDB and // CouchDB users may want to set this to `"_id"`. idAttribute: 'id', // Initialize is an empty function by default. Override it with your own // initialization logic. initialize: function(){}, // Return a copy of the model's `attributes` object. toJSON: function() { return _.clone(this.attributes); }, // Get the value of an attribute. get: function(attr) { return this.attributes[attr]; }, // Get the HTML-escaped value of an attribute. escape: function(attr) { var html; if (html = this._escapedAttributes[attr]) return html; var val = this.attributes[attr]; return this._escapedAttributes[attr] = _.escape(val == null ? '' : '' + val); }, // Returns `true` if the attribute contains a value that is not null // or undefined. has: function(attr) { return this.attributes[attr] != null; }, // Set a hash of model attributes on the object, firing `"change"` unless // you choose to silence it. set: function(key, value, options) { var attrs, attr, val; if (_.isObject(key) || key == null) { attrs = key; options = value; } else { attrs = {}; attrs[key] = value; } // Extract attributes and options. options || (options = {}); if (!attrs) return this; if (attrs instanceof Backbone.Model) attrs = attrs.attributes; if (options.unset) for (var attr in attrs) attrs[attr] = void 0; // Run validation. if (this.validate && !this._performValidation(attrs, options)) return false; // Check for changes of `id`. if (this.idAttribute in attrs) this.id = attrs[this.idAttribute]; var now = this.attributes; var escaped = this._escapedAttributes; var prev = this._previousAttributes || {}; var alreadyChanging = this._changing; this._changing = true; // Update attributes. for (attr in attrs) { val = attrs[attr]; if (!_.isEqual(now[attr], val)) delete escaped[attr]; options.unset ? delete now[attr] : now[attr] = val; delete this._changed[attr]; if (!_.isEqual(prev[attr], val) || (_.has(now, attr) != _.has(prev, attr))) { this._changed[attr] = val; } } // Fire the `"change"` events, if the model has been changed. if (!alreadyChanging) { if (!options.silent && this.hasChanged()) this.change(options); this._changing = false; } return this; }, // Remove an attribute from the model, firing `"change"` unless you choose // to silence it. `unset` is a noop if the attribute doesn't exist. unset: function(attr, options) { (options || (options = {})).unset = true; return this.set(attr, null, options); }, // Clear all attributes on the model, firing `"change"` unless you choose // to silence it. clear: function(options) { (options || (options = {})).unset = true; return this.set(_.clone(this.attributes), options); }, // Fetch the model from the server. If the server's representation of the // model differs from its current attributes, they will be overriden, // triggering a `"change"` event. fetch: function(options) { options = options ? _.clone(options) : {}; var model = this; var success = options.success; options.success = function(resp, status, xhr) { if (!model.set(model.parse(resp, xhr), options)) return false; if (success) success(model, resp); }; options.error = Backbone.wrapError(options.error, model, options); return (this.sync || Backbone.sync).call(this, 'read', this, options); }, // Set a hash of model attributes, and sync the model to the server. // If the server returns an attributes hash that differs, the model's // state will be `set` again. save: function(key, value, options) { var attrs; if (_.isObject(key) || key == null) { attrs = key; options = value; } else { attrs = {}; attrs[key] = value; } options = options ? _.clone(options) : {}; if (attrs && !this[options.wait ? '_performValidation' : 'set'](attrs, options)) return false; var model = this; var success = options.success; options.success = function(resp, status, xhr) { var serverAttrs = model.parse(resp, xhr); if (options.wait) serverAttrs = _.extend(attrs || {}, serverAttrs); if (!model.set(serverAttrs, options)) return false; if (success) { success(model, resp); } else { model.trigger('sync', model, resp, options); } }; options.error = Backbone.wrapError(options.error, model, options); var method = this.isNew() ? 'create' : 'update'; return (this.sync || Backbone.sync).call(this, method, this, options); }, // Destroy this model on the server if it was already persisted. // Optimistically removes the model from its collection, if it has one. // If `wait: true` is passed, waits for the server to respond before removal. destroy: function(options) { options = options ? _.clone(options) : {}; var model = this; var success = options.success; var triggerDestroy = function() { model.trigger('destroy', model, model.collection, options); }; if (this.isNew()) return triggerDestroy(); options.success = function(resp) { if (options.wait) triggerDestroy(); if (success) { success(model, resp); } else { model.trigger('sync', model, resp, options); } }; options.error = Backbone.wrapError(options.error, model, options); var xhr = (this.sync || Backbone.sync).call(this, 'delete', this, options); if (!options.wait) triggerDestroy(); return xhr; }, // Default URL for the model's representation on the server -- if you're // using Backbone's restful methods, override this to change the endpoint // that will be called. url: function() { var base = getValue(this.collection, 'url') || getValue(this, 'urlRoot') || urlError(); if (this.isNew()) return base; return base + (base.charAt(base.length - 1) == '/' ? '' : '/') + encodeURIComponent(this.id); }, // **parse** converts a response into the hash of attributes to be `set` on // the model. The default implementation is just to pass the response along. parse: function(resp, xhr) { return resp; }, // Create a new model with identical attributes to this one. clone: function() { return new this.constructor(this.attributes); }, // A model is new if it has never been saved to the server, and lacks an id. isNew: function() { return this.id == null; }, // Call this method to manually fire a `"change"` event for this model and // a `"change:attribute"` event for each changed attribute. // Calling this will cause all objects observing the model to update. change: function(options) { for (var attr in this._changed) { this.trigger('change:' + attr, this, this._changed[attr], options); } this.trigger('change', this, options); this._previousAttributes = _.clone(this.attributes); this._changed = {}; }, // Determine if the model has changed since the last `"change"` event. // If you specify an attribute name, determine if that attribute has changed. hasChanged: function(attr) { if (attr) return _.has(this._changed, attr); return !_.isEmpty(this._changed); }, // Return an object containing all the attributes that have changed, or // false if there are no changed attributes. Useful for determining what // parts of a view need to be updated and/or what attributes need to be // persisted to the server. Unset attributes will be set to undefined. // You can also pass an attributes object to diff against the model, // determining if there *would be* a change. changedAttributes: function(diff) { if (!diff) return this.hasChanged() ? _.clone(this._changed) : false; var val, changed = false, old = this._previousAttributes; for (var attr in diff) { if (_.isEqual(old[attr], (val = diff[attr]))) continue; (changed || (changed = {}))[attr] = val; } return changed; }, // Get the previous value of an attribute, recorded at the time the last // `"change"` event was fired. previous: function(attr) { if (!attr || !this._previousAttributes) return null; return this._previousAttributes[attr]; }, // Get all of the attributes of the model at the time of the previous // `"change"` event. previousAttributes: function() { return _.clone(this._previousAttributes); }, // Run validation against a set of incoming attributes, returning `true` // if all is well. If a specific `error` callback has been passed, // call that instead of firing the general `"error"` event. _performValidation: function(attrs, options) { var newAttrs = _.extend({}, this.attributes, attrs); var error = this.validate(newAttrs, options); if (error) { if (options.error) { options.error(this, error, options); } else { this.trigger('error', this, error, options); } return false; } return true; } }); // Backbone.Collection // ------------------- // Provides a standard collection class for our sets of models, ordered // or unordered. If a `comparator` is specified, the Collection will maintain // its models in sort order, as they're added and removed. Backbone.Collection = function(models, options) { options || (options = {}); if (options.comparator) this.comparator = options.comparator; this._reset(); this.initialize.apply(this, arguments); if (models) this.reset(models, {silent: true, parse: options.parse}); }; // Define the Collection's inheritable methods. _.extend(Backbone.Collection.prototype, Backbone.Events, { // The default model for a collection is just a **Backbone.Model**. // This should be overridden in most cases. model: Backbone.Model, // Initialize is an empty function by default. Override it with your own // initialization logic. initialize: function(){}, // The JSON representation of a Collection is an array of the // models' attributes. toJSON: function() { return this.map(function(model){ return model.toJSON(); }); }, // Add a model, or list of models to the set. Pass **silent** to avoid // firing the `add` event for every new model. add: function(models, options) { var i, index, length, model, cid, id, cids = {}, ids = {}; options || (options = {}); models = _.isArray(models) ? models.slice() : [models]; // Begin by turning bare objects into model references, and preventing // invalid models or duplicate models from being added. for (i = 0, length = models.length; i < length; i++) { if (!(model = models[i] = this._prepareModel(models[i], options))) { throw new Error("Can't add an invalid model to a collection"); } if (cids[cid = model.cid] || this._byCid[cid] || (((id = model.id) != null) && (ids[id] || this._byId[id]))) { throw new Error("Can't add the same model to a collection twice"); } cids[cid] = ids[id] = model; } // Listen to added models' events, and index models for lookup by // `id` and by `cid`. for (i = 0; i < length; i++) { (model = models[i]).on('all', this._onModelEvent, this); this._byCid[model.cid] = model; if (model.id != null) this._byId[model.id] = model; } // Insert models into the collection, re-sorting if needed, and triggering // `add` events unless silenced. this.length += length; index = options.at != null ? options.at : this.models.length; splice.apply(this.models, [index, 0].concat(models)); if (this.comparator) this.sort({silent: true}); if (options.silent) return this; for (i = 0, length = this.models.length; i < length; i++) { if (!cids[(model = this.models[i]).cid]) continue; options.index = i; model.trigger('add', model, this, options); } return this; }, // Remove a model, or a list of models from the set. Pass silent to avoid // firing the `remove` event for every model removed. remove: function(models, options) { var i, l, index, model; options || (options = {}); models = _.isArray(models) ? models.slice() : [models]; for (i = 0, l = models.length; i < l; i++) { model = this.getByCid(models[i]) || this.get(models[i]); if (!model) continue; delete this._byId[model.id]; delete this._byCid[model.cid]; index = this.indexOf(model); this.models.splice(index, 1); this.length--; if (!options.silent) { options.index = index; model.trigger('remove', model, this, options); } this._removeReference(model); } return this; }, // Get a model from the set by id. get: function(id) { if (id == null) return null; return this._byId[id.id != null ? id.id : id]; }, // Get a model from the set by client id. getByCid: function(cid) { return cid && this._byCid[cid.cid || cid]; }, // Get the model at the given index. at: function(index) { return this.models[index]; }, // Force the collection to re-sort itself. You don't need to call this under // normal circumstances, as the set will maintain sort order as each item // is added. sort: function(options) { options || (options = {}); if (!this.comparator) throw new Error('Cannot sort a set without a comparator'); var boundComparator = _.bind(this.comparator, this); if (this.comparator.length == 1) { this.models = this.sortBy(boundComparator); } else { this.models.sort(boundComparator); } if (!options.silent) this.trigger('reset', this, options); return this; }, // Pluck an attribute from each model in the collection. pluck: function(attr) { return _.map(this.models, function(model){ return model.get(attr); }); }, // When you have more items than you want to add or remove individually, // you can reset the entire set with a new list of models, without firing // any `add` or `remove` events. Fires `reset` when finished. reset: function(models, options) { models || (models = []); options || (options = {}); for (var i = 0, l = this.models.length; i < l; i++) { this._removeReference(this.models[i]); } this._reset(); this.add(models, {silent: true, parse: options.parse}); if (!options.silent) this.trigger('reset', this, options); return this; }, // Fetch the default set of models for this collection, resetting the // collection when they arrive. If `add: true` is passed, appends the // models to the collection instead of resetting. fetch: function(options) { options = options ? _.clone(options) : {}; if (options.parse === undefined) options.parse = true; var collection = this; var success = options.success; options.success = function(resp, status, xhr) { collection[options.add ? 'add' : 'reset'](collection.parse(resp, xhr), options); if (success) success(collection, resp); }; options.error = Backbone.wrapError(options.error, collection, options); return (this.sync || Backbone.sync).call(this, 'read', this, options); }, // Create a new instance of a model in this collection. Add the model to the // collection immediately, unless `wait: true` is passed, in which case we // wait for the server to agree. create: function(model, options) { var coll = this; options = options ? _.clone(options) : {}; model = this._prepareModel(model, options); if (!model) return false; if (!options.wait) coll.add(model, options); var success = options.success; options.success = function(nextModel, resp, xhr) { if (options.wait) coll.add(nextModel, options); if (success) { success(nextModel, resp); } else { nextModel.trigger('sync', model, resp, options); } }; model.save(null, options); return model; }, // **parse** converts a response into a list of models to be added to the // collection. The default implementation is just to pass it through. parse: function(resp, xhr) { return resp; }, // Proxy to _'s chain. Can't be proxied the same way the rest of the // underscore methods are proxied because it relies on the underscore // constructor. chain: function () { return _(this.models).chain(); }, // Reset all internal state. Called when the collection is reset. _reset: function(options) { this.length = 0; this.models = []; this._byId = {}; this._byCid = {}; }, // Prepare a model or hash of attributes to be added to this collection. _prepareModel: function(model, options) { if (!(model instanceof Backbone.Model)) { var attrs = model; options.collection = this; model = new this.model(attrs, options); if (model.validate && !model._performValidation(model.attributes, options)) model = false; } else if (!model.collection) { model.collection = this; } return model; }, // Internal method to remove a model's ties to a collection. _removeReference: function(model) { if (this == model.collection) { delete model.collection; } model.off('all', this._onModelEvent, this); }, // Internal method called every time a model in the set fires an event. // Sets need to update their indexes when models change ids. All other // events simply proxy through. "add" and "remove" events that originate // in other collections are ignored. _onModelEvent: function(ev, model, collection, options) { if ((ev == 'add' || ev == 'remove') && collection != this) return; if (ev == 'destroy') { this.remove(model, options); } if (model && ev === 'change:' + model.idAttribute) { delete this._byId[model.previous(model.idAttribute)]; this._byId[model.id] = model; } this.trigger.apply(this, arguments); } }); // Underscore methods that we want to implement on the Collection. var methods = ['forEach', 'each', 'map', 'reduce', 'reduceRight', 'find', 'detect', 'filter', 'select', 'reject', 'every', 'all', 'some', 'any', 'include', 'contains', 'invoke', 'max', 'min', 'sortBy', 'sortedIndex', 'toArray', 'size', 'first', 'initial', 'rest', 'last', 'without', 'indexOf', 'shuffle', 'lastIndexOf', 'isEmpty', 'groupBy']; // Mix in each Underscore method as a proxy to `Collection#models`. _.each(methods, function(method) { Backbone.Collection.prototype[method] = function() { return _[method].apply(_, [this.models].concat(_.toArray(arguments))); }; }); // Backbone.Router // ------------------- // Routers map faux-URLs to actions, and fire events when routes are // matched. Creating a new one sets its `routes` hash, if not set statically. Backbone.Router = function(options) { options || (options = {}); if (options.routes) this.routes = options.routes; this._bindRoutes(); this.initialize.apply(this, arguments); }; // Cached regular expressions for matching named param parts and splatted // parts of route strings. var namedParam = /:\w+/g; var splatParam = /\*\w+/g; var escapeRegExp = /[-[\]{}()+?.,\\^$|#\s]/g; // Set up all inheritable **Backbone.Router** properties and methods. _.extend(Backbone.Router.prototype, Backbone.Events, { // Initialize is an empty function by default. Override it with your own // initialization logic. initialize: function(){}, // Manually bind a single named route to a callback. For example: // // this.route('search/:query/p:num', 'search', function(query, num) { // ... // }); // route: function(route, name, callback) { Backbone.history || (Backbone.history = new Backbone.History); if (!_.isRegExp(route)) route = this._routeToRegExp(route); if (!callback) callback = this[name]; Backbone.history.route(route, _.bind(function(fragment) { var args = this._extractParameters(route, fragment); callback && callback.apply(this, args); this.trigger.apply(this, ['route:' + name].concat(args)); Backbone.history.trigger('route', this, name, args); }, this)); return this; }, // Simple proxy to `Backbone.history` to save a fragment into the history. navigate: function(fragment, options) { Backbone.history.navigate(fragment, options); }, // Bind all defined routes to `Backbone.history`. We have to reverse the // order of the routes here to support behavior where the most general // routes can be defined at the bottom of the route map. _bindRoutes: function() { if (!this.routes) return; var routes = []; for (var route in this.routes) { routes.unshift([route, this.routes[route]]); } for (var i = 0, l = routes.length; i < l; i++) { this.route(routes[i][0], routes[i][1], this[routes[i][1]]); } }, // Convert a route string into a regular expression, suitable for matching // against the current location hash. _routeToRegExp: function(route) { route = route.replace(escapeRegExp, '\\$&') .replace(namedParam, '([^\/]+)') .replace(splatParam, '(.*?)'); return new RegExp('^' + route + '$'); }, // Given a route, and a URL fragment that it matches, return the array of // extracted parameters. _extractParameters: function(route, fragment) { return route.exec(fragment).slice(1); } }); // Backbone.History // ---------------- // Handles cross-browser history management, based on URL fragments. If the // browser does not support `onhashchange`, falls back to polling. Backbone.History = function() { this.handlers = []; _.bindAll(this, 'checkUrl'); }; // Cached regex for cleaning leading hashes and slashes . var routeStripper = /^[#\/]/; // Cached regex for detecting MSIE. var isExplorer = /msie [\w.]+/; // Has the history handling already been started? var historyStarted = false; // Set up all inheritable **Backbone.History** properties and methods. _.extend(Backbone.History.prototype, Backbone.Events, { // The default interval to poll for hash changes, if necessary, is // twenty times a second. interval: 50, // Get the cross-browser normalized URL fragment, either from the URL, // the hash, or the override. getFragment: function(fragment, forcePushState) { if (fragment == null) { if (this._hasPushState || forcePushState) { fragment = window.location.pathname; var search = window.location.search; if (search) fragment += search; } else { fragment = window.location.hash; } } fragment = decodeURIComponent(fragment.replace(routeStripper, '')); if (!fragment.indexOf(this.options.root)) fragment = fragment.substr(this.options.root.length); return fragment; }, // Start the hash change handling, returning `true` if the current URL matches // an existing route, and `false` otherwise. start: function(options) { // Figure out the initial configuration. Do we need an iframe? // Is pushState desired ... is it available? if (historyStarted) throw new Error("Backbone.history has already been started"); this.options = _.extend({}, {root: '/'}, this.options, options); this._wantsHashChange = this.options.hashChange !== false; this._wantsPushState = !!this.options.pushState; this._hasPushState = !!(this.options.pushState && window.history && window.history.pushState); var fragment = this.getFragment(); var docMode = document.documentMode; var oldIE = (isExplorer.exec(navigator.userAgent.toLowerCase()) && (!docMode || docMode <= 7)); if (oldIE) { this.iframe = $('<iframe src="javascript:0" tabindex="-1" />').hide().appendTo('body')[0].contentWindow; this.navigate(fragment); } // Depending on whether we're using pushState or hashes, and whether // 'onhashchange' is supported, determine how we check the URL state. if (this._hasPushState) { $(window).bind('popstate', this.checkUrl); } else if (this._wantsHashChange && ('onhashchange' in window) && !oldIE) { $(window).bind('hashchange', this.checkUrl); } else if (this._wantsHashChange) { this._checkUrlInterval = setInterval(this.checkUrl, this.interval); } // Determine if we need to change the base url, for a pushState link // opened by a non-pushState browser. this.fragment = fragment; historyStarted = true; var loc = window.location; var atRoot = loc.pathname == this.options.root; // If we've started off with a route from a `pushState`-enabled browser, // but we're currently in a browser that doesn't support it... if (this._wantsHashChange && this._wantsPushState && !this._hasPushState && !atRoot) { this.fragment = this.getFragment(null, true); window.location.replace(this.options.root + '#' + this.fragment); // Return immediately as browser will do redirect to new url return true; // Or if we've started out with a hash-based route, but we're currently // in a browser where it could be `pushState`-based instead... } else if (this._wantsPushState && this._hasPushState && atRoot && loc.hash) { this.fragment = loc.hash.replace(routeStripper, ''); window.history.replaceState({}, document.title, loc.protocol + '//' + loc.host + this.options.root + this.fragment); } if (!this.options.silent) { return this.loadUrl(); } }, // Disable Backbone.history, perhaps temporarily. Not useful in a real app, // but possibly useful for unit testing Routers. stop: function() { $(window).unbind('popstate', this.checkUrl).unbind('hashchange', this.checkUrl); clearInterval(this._checkUrlInterval); historyStarted = false; }, // Add a route to be tested when the fragment changes. Routes added later // may override previous routes. route: function(route, callback) { this.handlers.unshift({route: route, callback: callback}); }, // Checks the current URL to see if it has changed, and if it has, // calls `loadUrl`, normalizing across the hidden iframe. checkUrl: function(e) { var current = this.getFragment(); if (current == this.fragment && this.iframe) current = this.getFragment(this.iframe.location.hash); if (current == this.fragment || current == decodeURIComponent(this.fragment)) return false; if (this.iframe) this.navigate(current); this.loadUrl() || this.loadUrl(window.location.hash); }, // Attempt to load the current URL fragment. If a route succeeds with a // match, returns `true`. If no defined routes matches the fragment, // returns `false`. loadUrl: function(fragmentOverride) { var fragment = this.fragment = this.getFragment(fragmentOverride); var matched = _.any(this.handlers, function(handler) { if (handler.route.test(fragment)) { handler.callback(fragment); return true; } }); return matched; }, // Save a fragment into the hash history, or replace the URL state if the // 'replace' option is passed. You are responsible for properly URL-encoding // the fragment in advance. // // The options object can contain `trigger: true` if you wish to have the // route callback be fired (not usually desirable), or `replace: true`, if // you which to modify the current URL without adding an entry to the history. navigate: function(fragment, options) { if (!historyStarted) return false; if (!options || options === true) options = {trigger: options}; var frag = (fragment || '').replace(routeStripper, ''); if (this.fragment == frag || this.fragment == decodeURIComponent(frag)) return; // If pushState is available, we use it to set the fragment as a real URL. if (this._hasPushState) { if (frag.indexOf(this.options.root) != 0) frag = this.options.root + frag; this.fragment = frag; window.history[options.replace ? 'replaceState' : 'pushState']({}, document.title, frag); // If hash changes haven't been explicitly disabled, update the hash // fragment to store history. } else if (this._wantsHashChange) { this.fragment = frag; this._updateHash(window.location, frag, options.replace); if (this.iframe && (frag != this.getFragment(this.iframe.location.hash))) { // Opening and closing the iframe tricks IE7 and earlier to push a history entry on hash-tag change. // When replace is true, we don't want this. if(!options.replace) this.iframe.document.open().close(); this._updateHash(this.iframe.location, frag, options.replace); } // If you've told us that you explicitly don't want fallback hashchange- // based history, then `navigate` becomes a page refresh. } else { window.location.assign(this.options.root + fragment); } if (options.trigger) this.loadUrl(fragment); }, // Update the hash location, either replacing the current entry, or adding // a new one to the browser history. _updateHash: function(location, fragment, replace) { if (replace) { location.replace(location.toString().replace(/(javascript:|#).*$/, '') + '#' + fragment); } else { location.hash = fragment; } } }); // Backbone.View // ------------- // Creating a Backbone.View creates its initial element outside of the DOM, // if an existing element is not provided... Backbone.View = function(options) { this.cid = _.uniqueId('view'); this._configure(options || {}); this._ensureElement(); this.initialize.apply(this, arguments); this.delegateEvents(); }; // Cached regex to split keys for `delegate`. var eventSplitter = /^(\S+)\s*(.*)$/; // List of view options to be merged as properties. var viewOptions = ['model', 'collection', 'el', 'id', 'attributes', 'className', 'tagName']; // Set up all inheritable **Backbone.View** properties and methods. _.extend(Backbone.View.prototype, Backbone.Events, { // The default `tagName` of a View's element is `"div"`. tagName: 'div', // jQuery delegate for element lookup, scoped to DOM elements within the // current view. This should be prefered to global lookups where possible. $: function(selector) { return this.$el.find(selector); }, // Initialize is an empty function by default. Override it with your own // initialization logic. initialize: function(){}, // **render** is the core function that your view should override, in order // to populate its element (`this.el`), with the appropriate HTML. The // convention is for **render** to always return `this`. render: function() { return this; }, // Remove this view from the DOM. Note that the view isn't present in the // DOM by default, so calling this method may be a no-op. remove: function() { this.$el.remove(); return this; }, // For small amounts of DOM Elements, where a full-blown template isn't // needed, use **make** to manufacture elements, one at a time. // // var el = this.make('li', {'class': 'row'}, this.model.escape('title')); // make: function(tagName, attributes, content) { var el = document.createElement(tagName); if (attributes) $(el).attr(attributes); if (content) $(el).html(content); return el; }, // Change the view's element (`this.el` property), including event // re-delegation. setElement: function(element, delegate) { this.$el = $(element); this.el = this.$el[0]; if (delegate !== false) this.delegateEvents(); }, // Set callbacks, where `this.events` is a hash of // // *{"event selector": "callback"}* // // { // 'mousedown .title': 'edit', // 'click .button': 'save' // 'click .open': function(e) { ... } // } // // pairs. Callbacks will be bound to the view, with `this` set properly. // Uses event delegation for efficiency. // Omitting the selector binds the event to `this.el`. // This only works for delegate-able events: not `focus`, `blur`, and // not `change`, `submit`, and `reset` in Internet Explorer. delegateEvents: function(events) { if (!(events || (events = getValue(this, 'events')))) return; this.undelegateEvents(); for (var key in events) { var method = events[key]; if (!_.isFunction(method)) method = this[events[key]]; if (!method) throw new Error('Event "' + events[key] + '" does not exist'); var match = key.match(eventSplitter); var eventName = match[1], selector = match[2]; method = _.bind(method, this); eventName += '.delegateEvents' + this.cid; if (selector === '') { this.$el.bind(eventName, method); } else { this.$el.delegate(selector, eventName, method); } } }, // Clears all callbacks previously bound to the view with `delegateEvents`. // You usually don't need to use this, but may wish to if you have multiple // Backbone views attached to the same DOM element. undelegateEvents: function() { this.$el.unbind('.delegateEvents' + this.cid); }, // Performs the initial configuration of a View with a set of options. // Keys with special meaning *(model, collection, id, className)*, are // attached directly to the view. _configure: function(options) { if (this.options) options = _.extend({}, this.options, options); for (var i = 0, l = viewOptions.length; i < l; i++) { var attr = viewOptions[i]; if (options[attr]) this[attr] = options[attr]; } this.options = options; }, // Ensure that the View has a DOM element to render into. // If `this.el` is a string, pass it through `$()`, take the first // matching element, and re-assign it to `el`. Otherwise, create // an element from the `id`, `className` and `tagName` properties. _ensureElement: function() { if (!this.el) { var attrs = getValue(this, 'attributes') || {}; if (this.id) attrs.id = this.id; if (this.className) attrs['class'] = this.className; this.setElement(this.make(this.tagName, attrs), false); } else { this.setElement(this.el, false); } } }); // The self-propagating extend function that Backbone classes use. var extend = function (protoProps, classProps) { var child = inherits(this, protoProps, classProps); child.extend = this.extend; return child; }; // Set up inheritance for the model, collection, and view. Backbone.Model.extend = Backbone.Collection.extend = Backbone.Router.extend = Backbone.View.extend = extend; // Backbone.sync // ------------- // Map from CRUD to HTTP for our default `Backbone.sync` implementation. var methodMap = { 'create': 'POST', 'update': 'PUT', 'delete': 'DELETE', 'read': 'GET' }; // Override this function to change the manner in which Backbone persists // models to the server. You will be passed the type of request, and the // model in question. By default, makes a RESTful Ajax request // to the model's `url()`. Some possible customizations could be: // // * Use `setTimeout` to batch rapid-fire updates into a single request. // * Send up the models as XML instead of JSON. // * Persist models via WebSockets instead of Ajax. // // Turn on `Backbone.emulateHTTP` in order to send `PUT` and `DELETE` requests // as `POST`, with a `_method` parameter containing the true HTTP method, // as well as all requests with the body as `application/x-www-form-urlencoded` // instead of `application/json` with the model in a param named `model`. // Useful when interfacing with server-side languages like **PHP** that make // it difficult to read the body of `PUT` requests. Backbone.sync = function(method, model, options) { var type = methodMap[method]; // Default JSON-request options. var params = {type: type, dataType: 'json'}; // Ensure that we have a URL. if (!options.url) { params.url = getValue(model, 'url') || urlError(); } // Ensure that we have the appropriate request data. if (!options.data && model && (method == 'create' || method == 'update')) { params.contentType = 'application/json'; params.data = JSON.stringify(model.toJSON()); } // For older servers, emulate JSON by encoding the request into an HTML-form. if (Backbone.emulateJSON) { params.contentType = 'application/x-www-form-urlencoded'; params.data = params.data ? {model: params.data} : {}; } // For older servers, emulate HTTP by mimicking the HTTP method with `_method` // And an `X-HTTP-Method-Override` header. if (Backbone.emulateHTTP) { if (type === 'PUT' || type === 'DELETE') { if (Backbone.emulateJSON) params.data._method = type; params.type = 'POST'; params.beforeSend = function(xhr) { xhr.setRequestHeader('X-HTTP-Method-Override', type); }; } } // Don't process data on a non-GET request. if (params.type !== 'GET' && !Backbone.emulateJSON) { params.processData = false; } // Make the request, allowing the user to override any Ajax options. return $.ajax(_.extend(params, options)); }; // Wrap an optional error callback with a fallback error event. Backbone.wrapError = function(onError, originalModel, options) { return function(model, resp) { var resp = model === originalModel ? resp : model; if (onError) { onError(model, resp, options); } else { originalModel.trigger('error', model, resp, options); } }; }; // Helpers // ------- // Shared empty constructor function to aid in prototype-chain creation. var ctor = function(){}; // Helper function to correctly set up the prototype chain, for subclasses. // Similar to `goog.inherits`, but uses a hash of prototype properties and // class properties to be extended. var inherits = function(parent, protoProps, staticProps) { var child; // The constructor function for the new subclass is either defined by you // (the "constructor" property in your `extend` definition), or defaulted // by us to simply call the parent's constructor. if (protoProps && protoProps.hasOwnProperty('constructor')) { child = protoProps.constructor; } else { child = function(){ parent.apply(this, arguments); }; } // Inherit class (static) properties from parent. _.extend(child, parent); // Set the prototype chain to inherit from `parent`, without calling // `parent`'s constructor function. ctor.prototype = parent.prototype; child.prototype = new ctor(); // Add prototype properties (instance properties) to the subclass, // if supplied. if (protoProps) _.extend(child.prototype, protoProps); // Add static properties to the constructor function, if supplied. if (staticProps) _.extend(child, staticProps); // Correctly set child's `prototype.constructor`. child.prototype.constructor = child; // Set a convenience property in case the parent's prototype is needed later. child.__super__ = parent.prototype; return child; }; // Helper function to get a value from a Backbone object as a property // or as a function. var getValue = function(object, prop) { if (!(object && object[prop])) return null; return _.isFunction(object[prop]) ? object[prop]() : object[prop]; }; // Throw an error when a URL is needed, and none is supplied. var urlError = function() { throw new Error('A "url" property or function must be specified'); }; }).call(this);
PypiClean
/dsin100days603v37-6.0.3.tar.gz/dsin100days603v37-6.0.3/notebook/static/components/MathJax/localization/it/MathMenu.js
MathJax.Localization.addTranslation("it","MathMenu",{version:"2.7.8",isLoaded:true,strings:{Show:"Mostra formula come",MathMLcode:"Codice MathML",OriginalMathML:"MathML originale",TeXCommands:"Comandi TeX",AsciiMathInput:"Input AsciiMathML",Original:"Modulo originale",ErrorMessage:"Messaggio d'errore",Annotation:"Annotation",TeX:"TeX",StarMath:"StarMath",Maple:"Maple",ContentMathML:"Content MathML",OpenMath:"OpenMath",texHints:"Aggiungi suggerimenti Tex a MathML",Settings:"Impostazioni formule",ZoomTrigger:"Attivazione zoom",Hover:"Sopra",Click:"Click",DoubleClick:"Doppio-Click",NoZoom:"Niente zoom",TriggerRequires:"L'attivazione richiede:",Option:"Option",Alt:"Alt",Command:"Command",Control:"Control",Shift:"Shift",ZoomFactor:"Fattore di zoom",Renderer:"Processore per le formule",MPHandles:"Affida a MathPlayer",MenuEvents:"Eventi menu",MouseEvents:"Eventi mouse",MenuAndMouse:"Eventi mouse e menu",FontPrefs:"Preferenze font",ForHTMLCSS:"Per HTML-CSS:",Auto:"Auto",TeXLocal:"TeX (locale)",TeXWeb:"TeX (web)",TeXImage:"TeX (immagini)",STIXLocal:"STIX (locale)",STIXWeb:"STIX (web)",AsanaMathWeb:"Asana Math (web)",GyrePagellaWeb:"Gyre Pagella (web)",GyreTermesWeb:"Gyre Termes (web)",LatinModernWeb:"Latin Modern (web)",NeoEulerWeb:"Neo Euler (web)",ContextMenu:"Menu contestuale",Browser:"Browser",Scale:"Scala tutte le formule...",Discoverable:"Evidenzia al passaggio",Locale:"Lingua",LoadLocale:"Scarica dall'URL ...",About:"Informazioni su MathJax",Help:"Aiuto di MathJax",localTeXfonts:"usare font TeX locale",webTeXfonts:"usare font Tex dal web",imagefonts:"usare font immagine",localSTIXfonts:"usare font STIX locale",webSVGfonts:"usare font SVG dal web",genericfonts:"usare generici font unicode",wofforotffonts:"font woff oppure otf",eotffonts:"font eot",svgfonts:"font svg",WebkitNativeMMLWarning:"Il tuo browser non sembra supportare MathML nativamente, perci\u00F2 il passaggio ora all'output MathML potrebbe rendere illegibili le formule della pagina.",MSIENativeMMLWarning:"Internet Explorer richiede il plugin MathPlayer per processare output MathML.",OperaNativeMMLWarning:"Il supporto di Opera a MathML \u00E8 limitato, perci\u00F2 passando ora all'output MathML potrebbe succedere che alcune espressioni siano rese in modo scadente.",SafariNativeMMLWarning:"L'implementazione di MathML del tuo browser non comprende tutte le caratteristiche usate da MathJax, perci\u00F2 alcune espressioni potrebbero non essere visualizzate perfettamente.",FirefoxNativeMMLWarning:"L'implementazione di MathML del tuo browser non comprende tutte le caratteristiche usate da MathJax, perci\u00F2 alcune espressioni potrebbero non essere visualizzate perfettamente.",MSIESVGWarning:"SVG non \u00E8 implementato nelle versioni precedenti IE9 oppure quando si sta emulando IE8 o precedenti. Passando all'output SVG le formule non saranno visualizzate correttamente.",LoadURL:"Scaricamento traduzione da questo indirizzo:",BadURL:"L'indirizzo dovrebbe puntare a un file Javascript con una traduzione di MathJax. I nomi di file Javascript dovrebbero avere estensione '.js'",BadData:"Impossibile scaricare la traduzione da %1",SwitchAnyway:"Passare comunque a questo interprete?\n\n(Premi OK per cambiare, ANNULLA per continuare con la modalit\u00E1 corrente",ScaleMath:"Scala tutte le formule (comparate al testo circostante) del",NonZeroScale:"Il fattore di scala non deve essere zero",PercentScale:"Il fattore di scala deve essere in percentuale (es. 120%%)",IE8warning:"Questo disabiliter\u00E1 il menu di MathJax e la possibilit\u00E1 di zoom, puoi per\u00F2 accedere lo stesso al menu con Alt-Click su una formula.\n\nCambiare davvero le impostazioni di MathPlayer?",IE9warning:"Il menu contestuale di MathJax verr\u00E1 disabilitato, ma puoi sempre premere Alt-Click sopra una formula per accedervi comunque.",NoOriginalForm:"Modulo originale non disponibile",Close:"Chiudi",EqSource:"Codice sorgente formula MathJax",CloseAboutDialog:"Chiudi finestra di informazioni su MathJax",FastPreview:"Anteprima veloce",AssistiveMML:"MathML ausiliario",InTabOrder:"Includi nell'ordine di tabulazione"}});MathJax.Ajax.loadComplete("[MathJax]/localization/it/MathMenu.js");
PypiClean
/latimes-appengine-template-0.022.tar.gz/latimes-appengine-template-0.022/appengine_template/google_appengine/lib/django_1_2/django/contrib/gis/geos/prototypes/geom.py
from ctypes import c_char_p, c_int, c_size_t, c_ubyte, c_uint, POINTER from django.contrib.gis.geos.libgeos import CS_PTR, GEOM_PTR, PREPGEOM_PTR, GEOS_PREPARE from django.contrib.gis.geos.prototypes.errcheck import \ check_geom, check_minus_one, check_sized_string, check_string, check_zero from django.contrib.gis.geos.prototypes.threadsafe import GEOSFunc # This is the return type used by binary output (WKB, HEX) routines. c_uchar_p = POINTER(c_ubyte) # We create a simple subclass of c_char_p here because when the response # type is set to c_char_p, you get a _Python_ string and there's no way # to access the string's address inside the error checking function. # In other words, you can't free the memory allocated inside GEOS. Previously, # the return type would just be omitted and the integer address would be # used -- but this allows us to be specific in the function definition and # keeps the reference so it may be free'd. class geos_char_p(c_char_p): pass ### ctypes generation functions ### def bin_constructor(func): "Generates a prototype for binary construction (HEX, WKB) GEOS routines." func.argtypes = [c_char_p, c_size_t] func.restype = GEOM_PTR func.errcheck = check_geom return func # HEX & WKB output def bin_output(func): "Generates a prototype for the routines that return a a sized string." func.argtypes = [GEOM_PTR, POINTER(c_size_t)] func.errcheck = check_sized_string func.restype = c_uchar_p return func def geom_output(func, argtypes): "For GEOS routines that return a geometry." if argtypes: func.argtypes = argtypes func.restype = GEOM_PTR func.errcheck = check_geom return func def geom_index(func): "For GEOS routines that return geometries from an index." return geom_output(func, [GEOM_PTR, c_int]) def int_from_geom(func, zero=False): "Argument is a geometry, return type is an integer." func.argtypes = [GEOM_PTR] func.restype = c_int if zero: func.errcheck = check_zero else: func.errcheck = check_minus_one return func def string_from_geom(func): "Argument is a Geometry, return type is a string." func.argtypes = [GEOM_PTR] func.restype = geos_char_p func.errcheck = check_string return func ### ctypes prototypes ### # Deprecated creation routines from WKB, HEX, WKT from_hex = bin_constructor(GEOSFunc('GEOSGeomFromHEX_buf')) from_wkb = bin_constructor(GEOSFunc('GEOSGeomFromWKB_buf')) from_wkt = geom_output(GEOSFunc('GEOSGeomFromWKT'), [c_char_p]) # Deprecated output routines to_hex = bin_output(GEOSFunc('GEOSGeomToHEX_buf')) to_wkb = bin_output(GEOSFunc('GEOSGeomToWKB_buf')) to_wkt = string_from_geom(GEOSFunc('GEOSGeomToWKT')) # The GEOS geometry type, typeid, num_coordites and number of geometries geos_normalize = int_from_geom(GEOSFunc('GEOSNormalize')) geos_type = string_from_geom(GEOSFunc('GEOSGeomType')) geos_typeid = int_from_geom(GEOSFunc('GEOSGeomTypeId')) get_dims = int_from_geom(GEOSFunc('GEOSGeom_getDimensions'), zero=True) get_num_coords = int_from_geom(GEOSFunc('GEOSGetNumCoordinates')) get_num_geoms = int_from_geom(GEOSFunc('GEOSGetNumGeometries')) # Geometry creation factories create_point = geom_output(GEOSFunc('GEOSGeom_createPoint'), [CS_PTR]) create_linestring = geom_output(GEOSFunc('GEOSGeom_createLineString'), [CS_PTR]) create_linearring = geom_output(GEOSFunc('GEOSGeom_createLinearRing'), [CS_PTR]) # Polygon and collection creation routines are special and will not # have their argument types defined. create_polygon = geom_output(GEOSFunc('GEOSGeom_createPolygon'), None) create_collection = geom_output(GEOSFunc('GEOSGeom_createCollection'), None) # Ring routines get_extring = geom_output(GEOSFunc('GEOSGetExteriorRing'), [GEOM_PTR]) get_intring = geom_index(GEOSFunc('GEOSGetInteriorRingN')) get_nrings = int_from_geom(GEOSFunc('GEOSGetNumInteriorRings')) # Collection Routines get_geomn = geom_index(GEOSFunc('GEOSGetGeometryN')) # Cloning geom_clone = GEOSFunc('GEOSGeom_clone') geom_clone.argtypes = [GEOM_PTR] geom_clone.restype = GEOM_PTR # Destruction routine. destroy_geom = GEOSFunc('GEOSGeom_destroy') destroy_geom.argtypes = [GEOM_PTR] destroy_geom.restype = None # SRID routines geos_get_srid = GEOSFunc('GEOSGetSRID') geos_get_srid.argtypes = [GEOM_PTR] geos_get_srid.restype = c_int geos_set_srid = GEOSFunc('GEOSSetSRID') geos_set_srid.argtypes = [GEOM_PTR, c_int] geos_set_srid.restype = None
PypiClean
/pulumi_huaweicloud-0.0.8.tar.gz/pulumi_huaweicloud-0.0.8/pulumi_huaweicloud/dli/_inputs.py
import copy import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities __all__ = [ 'SparkJobDependentPackageArgs', 'SparkJobDependentPackagePackageArgs', 'SqlJobConfArgs', 'TableColumnArgs', ] @pulumi.input_type class SparkJobDependentPackageArgs: def __init__(__self__, *, group_name: pulumi.Input[str], packages: pulumi.Input[Sequence[pulumi.Input['SparkJobDependentPackagePackageArgs']]]): """ :param pulumi.Input[str] group_name: Specifies the user group name. Changing this parameter will submit a new spark job. :param pulumi.Input[Sequence[pulumi.Input['SparkJobDependentPackagePackageArgs']]] packages: Specifies the user group resource for details. Changing this parameter will submit a new spark job. The object structure is documented below. """ pulumi.set(__self__, "group_name", group_name) pulumi.set(__self__, "packages", packages) @property @pulumi.getter(name="groupName") def group_name(self) -> pulumi.Input[str]: """ Specifies the user group name. Changing this parameter will submit a new spark job. """ return pulumi.get(self, "group_name") @group_name.setter def group_name(self, value: pulumi.Input[str]): pulumi.set(self, "group_name", value) @property @pulumi.getter def packages(self) -> pulumi.Input[Sequence[pulumi.Input['SparkJobDependentPackagePackageArgs']]]: """ Specifies the user group resource for details. Changing this parameter will submit a new spark job. The object structure is documented below. """ return pulumi.get(self, "packages") @packages.setter def packages(self, value: pulumi.Input[Sequence[pulumi.Input['SparkJobDependentPackagePackageArgs']]]): pulumi.set(self, "packages", value) @pulumi.input_type class SparkJobDependentPackagePackageArgs: def __init__(__self__, *, package_name: pulumi.Input[str], type: pulumi.Input[str]): """ :param pulumi.Input[str] package_name: Specifies the resource name of the package. Changing this parameter will submit a new spark job. :param pulumi.Input[str] type: Specifies the resource type of the package. Changing this parameter will submit a new spark job. """ pulumi.set(__self__, "package_name", package_name) pulumi.set(__self__, "type", type) @property @pulumi.getter(name="packageName") def package_name(self) -> pulumi.Input[str]: """ Specifies the resource name of the package. Changing this parameter will submit a new spark job. """ return pulumi.get(self, "package_name") @package_name.setter def package_name(self, value: pulumi.Input[str]): pulumi.set(self, "package_name", value) @property @pulumi.getter def type(self) -> pulumi.Input[str]: """ Specifies the resource type of the package. Changing this parameter will submit a new spark job. """ return pulumi.get(self, "type") @type.setter def type(self, value: pulumi.Input[str]): pulumi.set(self, "type", value) @pulumi.input_type class SqlJobConfArgs: def __init__(__self__, *, dli_sql_job_timeout: Optional[pulumi.Input[int]] = None, dli_sql_sqlasync_enabled: Optional[pulumi.Input[bool]] = None, spark_sql_auto_broadcast_join_threshold: Optional[pulumi.Input[int]] = None, spark_sql_bad_records_path: Optional[pulumi.Input[str]] = None, spark_sql_dynamic_partition_overwrite_enabled: Optional[pulumi.Input[bool]] = None, spark_sql_files_max_partition_bytes: Optional[pulumi.Input[int]] = None, spark_sql_max_records_per_file: Optional[pulumi.Input[int]] = None, spark_sql_shuffle_partitions: Optional[pulumi.Input[int]] = None): """ :param pulumi.Input[int] dli_sql_job_timeout: Sets the job running timeout interval. If the timeout interval expires, the job is canceled. Unit: `ms`. Changing this parameter will create a new resource. :param pulumi.Input[bool] dli_sql_sqlasync_enabled: Specifies whether DDL and DCL statements are executed asynchronously. The value true indicates that asynchronous execution is enabled. Default value is `false`. Changing this parameter will create a new resource. :param pulumi.Input[int] spark_sql_auto_broadcast_join_threshold: Maximum size of the table that displays all working nodes when a connection is executed. You can set this parameter to -1 to disable the display. Default value is `209715200`. Changing this parameter will create a new resource. :param pulumi.Input[str] spark_sql_bad_records_path: Path of bad records. Changing this parameter will create a new resource. :param pulumi.Input[bool] spark_sql_dynamic_partition_overwrite_enabled: In dynamic mode, Spark does not delete the previous partitions and only overwrites the partitions without data during execution. Default value is `false`. Changing this parameter will create a new resource. :param pulumi.Input[int] spark_sql_files_max_partition_bytes: Maximum number of bytes to be packed into a single partition when a file is read. Default value is `134217728`. Changing this parameter will create a new resource. :param pulumi.Input[int] spark_sql_max_records_per_file: Maximum number of records to be written into a single file. If the value is zero or negative, there is no limit. Default value is `0`. Changing this parameter will create a new resource. :param pulumi.Input[int] spark_sql_shuffle_partitions: Default number of partitions used to filter data for join or aggregation. Default value is `4096`. Changing this parameter will create a new resource. """ if dli_sql_job_timeout is not None: pulumi.set(__self__, "dli_sql_job_timeout", dli_sql_job_timeout) if dli_sql_sqlasync_enabled is not None: pulumi.set(__self__, "dli_sql_sqlasync_enabled", dli_sql_sqlasync_enabled) if spark_sql_auto_broadcast_join_threshold is not None: pulumi.set(__self__, "spark_sql_auto_broadcast_join_threshold", spark_sql_auto_broadcast_join_threshold) if spark_sql_bad_records_path is not None: pulumi.set(__self__, "spark_sql_bad_records_path", spark_sql_bad_records_path) if spark_sql_dynamic_partition_overwrite_enabled is not None: pulumi.set(__self__, "spark_sql_dynamic_partition_overwrite_enabled", spark_sql_dynamic_partition_overwrite_enabled) if spark_sql_files_max_partition_bytes is not None: pulumi.set(__self__, "spark_sql_files_max_partition_bytes", spark_sql_files_max_partition_bytes) if spark_sql_max_records_per_file is not None: pulumi.set(__self__, "spark_sql_max_records_per_file", spark_sql_max_records_per_file) if spark_sql_shuffle_partitions is not None: pulumi.set(__self__, "spark_sql_shuffle_partitions", spark_sql_shuffle_partitions) @property @pulumi.getter(name="dliSqlJobTimeout") def dli_sql_job_timeout(self) -> Optional[pulumi.Input[int]]: """ Sets the job running timeout interval. If the timeout interval expires, the job is canceled. Unit: `ms`. Changing this parameter will create a new resource. """ return pulumi.get(self, "dli_sql_job_timeout") @dli_sql_job_timeout.setter def dli_sql_job_timeout(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "dli_sql_job_timeout", value) @property @pulumi.getter(name="dliSqlSqlasyncEnabled") def dli_sql_sqlasync_enabled(self) -> Optional[pulumi.Input[bool]]: """ Specifies whether DDL and DCL statements are executed asynchronously. The value true indicates that asynchronous execution is enabled. Default value is `false`. Changing this parameter will create a new resource. """ return pulumi.get(self, "dli_sql_sqlasync_enabled") @dli_sql_sqlasync_enabled.setter def dli_sql_sqlasync_enabled(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "dli_sql_sqlasync_enabled", value) @property @pulumi.getter(name="sparkSqlAutoBroadcastJoinThreshold") def spark_sql_auto_broadcast_join_threshold(self) -> Optional[pulumi.Input[int]]: """ Maximum size of the table that displays all working nodes when a connection is executed. You can set this parameter to -1 to disable the display. Default value is `209715200`. Changing this parameter will create a new resource. """ return pulumi.get(self, "spark_sql_auto_broadcast_join_threshold") @spark_sql_auto_broadcast_join_threshold.setter def spark_sql_auto_broadcast_join_threshold(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "spark_sql_auto_broadcast_join_threshold", value) @property @pulumi.getter(name="sparkSqlBadRecordsPath") def spark_sql_bad_records_path(self) -> Optional[pulumi.Input[str]]: """ Path of bad records. Changing this parameter will create a new resource. """ return pulumi.get(self, "spark_sql_bad_records_path") @spark_sql_bad_records_path.setter def spark_sql_bad_records_path(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "spark_sql_bad_records_path", value) @property @pulumi.getter(name="sparkSqlDynamicPartitionOverwriteEnabled") def spark_sql_dynamic_partition_overwrite_enabled(self) -> Optional[pulumi.Input[bool]]: """ In dynamic mode, Spark does not delete the previous partitions and only overwrites the partitions without data during execution. Default value is `false`. Changing this parameter will create a new resource. """ return pulumi.get(self, "spark_sql_dynamic_partition_overwrite_enabled") @spark_sql_dynamic_partition_overwrite_enabled.setter def spark_sql_dynamic_partition_overwrite_enabled(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "spark_sql_dynamic_partition_overwrite_enabled", value) @property @pulumi.getter(name="sparkSqlFilesMaxPartitionBytes") def spark_sql_files_max_partition_bytes(self) -> Optional[pulumi.Input[int]]: """ Maximum number of bytes to be packed into a single partition when a file is read. Default value is `134217728`. Changing this parameter will create a new resource. """ return pulumi.get(self, "spark_sql_files_max_partition_bytes") @spark_sql_files_max_partition_bytes.setter def spark_sql_files_max_partition_bytes(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "spark_sql_files_max_partition_bytes", value) @property @pulumi.getter(name="sparkSqlMaxRecordsPerFile") def spark_sql_max_records_per_file(self) -> Optional[pulumi.Input[int]]: """ Maximum number of records to be written into a single file. If the value is zero or negative, there is no limit. Default value is `0`. Changing this parameter will create a new resource. """ return pulumi.get(self, "spark_sql_max_records_per_file") @spark_sql_max_records_per_file.setter def spark_sql_max_records_per_file(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "spark_sql_max_records_per_file", value) @property @pulumi.getter(name="sparkSqlShufflePartitions") def spark_sql_shuffle_partitions(self) -> Optional[pulumi.Input[int]]: """ Default number of partitions used to filter data for join or aggregation. Default value is `4096`. Changing this parameter will create a new resource. """ return pulumi.get(self, "spark_sql_shuffle_partitions") @spark_sql_shuffle_partitions.setter def spark_sql_shuffle_partitions(self, value: Optional[pulumi.Input[int]]): pulumi.set(self, "spark_sql_shuffle_partitions", value) @pulumi.input_type class TableColumnArgs: def __init__(__self__, *, name: pulumi.Input[str], type: pulumi.Input[str], description: Optional[pulumi.Input[str]] = None, is_partition: Optional[pulumi.Input[bool]] = None): """ :param pulumi.Input[str] name: Specifies the name of column. Changing this parameter will create a new resource. :param pulumi.Input[str] type: Specifies data type of column. Changing this parameter will create a new resource. :param pulumi.Input[str] description: Specifies the description of column. Changing this parameter will create a new resource. :param pulumi.Input[bool] is_partition: Specifies whether the column is a partition column. The value `true` indicates a partition column, and the value false indicates a non-partition column. The default value is false. Changing this parameter will create a new resource. """ pulumi.set(__self__, "name", name) pulumi.set(__self__, "type", type) if description is not None: pulumi.set(__self__, "description", description) if is_partition is not None: pulumi.set(__self__, "is_partition", is_partition) @property @pulumi.getter def name(self) -> pulumi.Input[str]: """ Specifies the name of column. Changing this parameter will create a new resource. """ return pulumi.get(self, "name") @name.setter def name(self, value: pulumi.Input[str]): pulumi.set(self, "name", value) @property @pulumi.getter def type(self) -> pulumi.Input[str]: """ Specifies data type of column. Changing this parameter will create a new resource. """ return pulumi.get(self, "type") @type.setter def type(self, value: pulumi.Input[str]): pulumi.set(self, "type", value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ Specifies the description of column. Changing this parameter will create a new resource. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter(name="isPartition") def is_partition(self) -> Optional[pulumi.Input[bool]]: """ Specifies whether the column is a partition column. The value `true` indicates a partition column, and the value false indicates a non-partition column. The default value is false. Changing this parameter will create a new resource. """ return pulumi.get(self, "is_partition") @is_partition.setter def is_partition(self, value: Optional[pulumi.Input[bool]]): pulumi.set(self, "is_partition", value)
PypiClean
/aliyun-python-sdk-cbn-1.0.39.tar.gz/aliyun-python-sdk-cbn-1.0.39/aliyunsdkcbn/request/v20170912/CreateCenInterRegionTrafficQosPolicyRequest.py
from aliyunsdkcore.request import RpcRequest from aliyunsdkcbn.endpoint import endpoint_data class CreateCenInterRegionTrafficQosPolicyRequest(RpcRequest): def __init__(self): RpcRequest.__init__(self, 'Cbn', '2017-09-12', 'CreateCenInterRegionTrafficQosPolicy') self.set_method('POST') if hasattr(self, "endpoint_map"): setattr(self, "endpoint_map", endpoint_data.getEndpointMap()) if hasattr(self, "endpoint_regional"): setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional()) def get_ResourceOwnerId(self): # Long return self.get_query_params().get('ResourceOwnerId') def set_ResourceOwnerId(self, ResourceOwnerId): # Long self.add_query_param('ResourceOwnerId', ResourceOwnerId) def get_ClientToken(self): # String return self.get_query_params().get('ClientToken') def set_ClientToken(self, ClientToken): # String self.add_query_param('ClientToken', ClientToken) def get_TrafficQosQueuess(self): # RepeatList return self.get_query_params().get('TrafficQosQueues') def set_TrafficQosQueuess(self, TrafficQosQueues): # RepeatList for depth1 in range(len(TrafficQosQueues)): if TrafficQosQueues[depth1].get('Dscps') is not None: for depth2 in range(len(TrafficQosQueues[depth1].get('Dscps'))): self.add_query_param('TrafficQosQueues.' + str(depth1 + 1) + '.Dscps.' + str(depth2 + 1), TrafficQosQueues[depth1].get('Dscps')[depth2]) if TrafficQosQueues[depth1].get('QosQueueName') is not None: self.add_query_param('TrafficQosQueues.' + str(depth1 + 1) + '.QosQueueName', TrafficQosQueues[depth1].get('QosQueueName')) if TrafficQosQueues[depth1].get('RemainBandwidthPercent') is not None: self.add_query_param('TrafficQosQueues.' + str(depth1 + 1) + '.RemainBandwidthPercent', TrafficQosQueues[depth1].get('RemainBandwidthPercent')) if TrafficQosQueues[depth1].get('QosQueueDescription') is not None: self.add_query_param('TrafficQosQueues.' + str(depth1 + 1) + '.QosQueueDescription', TrafficQosQueues[depth1].get('QosQueueDescription')) def get_TrafficQosPolicyName(self): # String return self.get_query_params().get('TrafficQosPolicyName') def set_TrafficQosPolicyName(self, TrafficQosPolicyName): # String self.add_query_param('TrafficQosPolicyName', TrafficQosPolicyName) def get_DryRun(self): # Boolean return self.get_query_params().get('DryRun') def set_DryRun(self, DryRun): # Boolean self.add_query_param('DryRun', DryRun) def get_ResourceOwnerAccount(self): # String return self.get_query_params().get('ResourceOwnerAccount') def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount) def get_OwnerAccount(self): # String return self.get_query_params().get('OwnerAccount') def set_OwnerAccount(self, OwnerAccount): # String self.add_query_param('OwnerAccount', OwnerAccount) def get_TrafficQosPolicyDescription(self): # String return self.get_query_params().get('TrafficQosPolicyDescription') def set_TrafficQosPolicyDescription(self, TrafficQosPolicyDescription): # String self.add_query_param('TrafficQosPolicyDescription', TrafficQosPolicyDescription) def get_OwnerId(self): # Long return self.get_query_params().get('OwnerId') def set_OwnerId(self, OwnerId): # Long self.add_query_param('OwnerId', OwnerId) def get_TransitRouterId(self): # String return self.get_query_params().get('TransitRouterId') def set_TransitRouterId(self, TransitRouterId): # String self.add_query_param('TransitRouterId', TransitRouterId) def get_TransitRouterAttachmentId(self): # String return self.get_query_params().get('TransitRouterAttachmentId') def set_TransitRouterAttachmentId(self, TransitRouterAttachmentId): # String self.add_query_param('TransitRouterAttachmentId', TransitRouterAttachmentId)
PypiClean
/void-terminal-0.0.3.tar.gz/void-terminal-0.0.3/void_terminal/core_functional.py
import importlib from toolbox import clear_line_break def get_core_functions(): return { "英语学术润色": { # 前缀,会被加在你的输入之前。例如,用来描述你的要求,例如翻译、解释代码、润色等等 "Prefix": r"Below is a paragraph from an academic paper. Polish the writing to meet the academic style, " + r"improve the spelling, grammar, clarity, concision and overall readability. When necessary, rewrite the whole sentence. " + r"Furthermore, list all modification and explain the reasons to do so in markdown table." + "\n\n", # 后缀,会被加在你的输入之后。例如,配合前缀可以把你的输入内容用引号圈起来 "Suffix": r"", # 按钮颜色 (默认 secondary) "Color": r"secondary", # 按钮是否可见 (默认 True,即可见) "Visible": True, # 是否在触发时清除历史 (默认 False,即不处理之前的对话历史) "AutoClearHistory": False }, "中文学术润色": { "Prefix": r"作为一名中文学术论文写作改进助理,你的任务是改进所提供文本的拼写、语法、清晰、简洁和整体可读性," + r"同时分解长句,减少重复,并提供改进建议。请只提供文本的更正版本,避免包括解释。请编辑以下文本" + "\n\n", "Suffix": r"", }, "查找语法错误": { "Prefix": r"Can you help me ensure that the grammar and the spelling is correct? " + r"Do not try to polish the text, if no mistake is found, tell me that this paragraph is good." + r"If you find grammar or spelling mistakes, please list mistakes you find in a two-column markdown table, " + r"put the original text the first column, " + r"put the corrected text in the second column and highlight the key words you fixed.""\n" r"Example:""\n" r"Paragraph: How is you? Do you knows what is it?""\n" r"| Original sentence | Corrected sentence |""\n" r"| :--- | :--- |""\n" r"| How **is** you? | How **are** you? |""\n" r"| Do you **knows** what **is** **it**? | Do you **know** what **it** **is** ? |""\n" r"Below is a paragraph from an academic paper. " r"You need to report all grammar and spelling mistakes as the example before." + "\n\n", "Suffix": r"", "PreProcess": clear_line_break, # 预处理:清除换行符 }, "中译英": { "Prefix": r"Please translate following sentence to English:" + "\n\n", "Suffix": r"", }, "学术中英互译": { "Prefix": r"I want you to act as a scientific English-Chinese translator, " + r"I will provide you with some paragraphs in one language " + r"and your task is to accurately and academically translate the paragraphs only into the other language. " + r"Do not repeat the original provided paragraphs after translation. " + r"You should use artificial intelligence tools, " + r"such as natural language processing, and rhetorical knowledge " + r"and experience about effective writing techniques to reply. " + r"I'll give you my paragraphs as follows, tell me what language it is written in, and then translate:" + "\n\n", "Suffix": "", "Color": "secondary", }, "英译中": { "Prefix": r"翻译成地道的中文:" + "\n\n", "Suffix": r"", }, "找图片": { "Prefix": r"我需要你找一张网络图片。使用Unsplash API(https://source.unsplash.com/960x640/?<英语关键词>)获取图片URL," + r"然后请使用Markdown格式封装,并且不要有反斜线,不要用代码块。现在,请按以下描述给我发送图片:" + "\n\n", "Suffix": r"", "Visible": False, }, "解释代码": { "Prefix": r"请解释以下代码:" + "\n```\n", "Suffix": "\n```\n", }, "参考文献转Bib": { "Prefix": r"Here are some bibliography items, please transform them into bibtex style." + r"Note that, reference styles maybe more than one kind, you should transform each item correctly." + r"Items need to be transformed:", "Suffix": r"", } } def handle_core_functionality(additional_fn, inputs, history, chatbot): import core_functional importlib.reload(core_functional) # 热更新prompt core_functional = core_functional.get_core_functions() if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话) inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"] if core_functional[additional_fn].get("AutoClearHistory", False): history = [] return inputs, history
PypiClean
/cropy-0.2.tar.gz/cropy-0.2/README.rst
cropy : Python content based image crop ========================================= Command line tool and module to crop an image to a specific resolution removing less important parts first. First started with the approch of this publication but seems a bit complex and slow (http://research.microsoft.com/en-us/um/people/jiansun/papers/SalientDetection_CVPR07.pdf). cropy uses entropy information to identify slices of the image with less informations. Usage ----- To use with command line:: cropy -i [input image] -r [width] [height] -o [output name] -s [maxSteps] - input image : location of the image to crop - width, eight : dimensions of the resultant cropped image - output name : name of the output image (default : original_name.width.eight.orginal_extension) - maxSteps : number of iteration : greater means more precision but slower (default : 10) More info and examples on http://blog.mapado.com/cropy-how-to-crop-an-image-keeping-the-best-content/ Installation ------------ You can install cropy using pip:: $ pip install cropy Note that cropy requires ``scikit-learn``, which itself is based on ``numpy`` and ``scipy`` and requires ``cython`` to compile. Possible upgrade ---------------- - locate faces inside image to prevent removing - locate text on images to crop first Thanks ------ Inspired from slycrop (php entropy based crop) : https://github.com/stojg/slycrop
PypiClean
/kubernetes-py-1.10.14.tar.gz/kubernetes-py-1.10.14/kubernetes_py/K8sVolume.py
# # This file is subject to the terms and conditions defined in # file 'LICENSE.md', which is part of this source code package. # import json import yaml from kubernetes_py.models.v1.Volume import Volume class K8sVolume(object): VALID_VOLUME_TYPES = Volume.VOLUME_TYPES_TO_SOURCE_MAP.keys() def __init__(self, name=None, type=None): super(K8sVolume, self).__init__() self._type = None self.model = Volume() self.name = name self.type = type # ------------------------------------------------------------------------------------- name @property def name(self): return self.model.name @name.setter def name(self, name=None): self.model.name = name # ------------------------------------------------------------------------------------- type @property def type(self): return self._type @type.setter def type(self, t=None): if t not in self.VALID_VOLUME_TYPES: raise SyntaxError("K8sVolume: type: [ {} ] is invalid.".format(t)) self._type = t setattr(self.model, t, Volume.vol_type_to_source(t)) # ------------------------------------------------------------------------------------- source @property def source(self): return getattr(self.model, self._type, None) @source.setter def source(self, s=None): raise NotImplementedError() # ------------------------------------------------------------------------------------- medium (emptyDir) @property def medium(self): if not hasattr(self.source, "medium"): raise NotImplementedError() return self.source.medium @medium.setter def medium(self, m=None): if not hasattr(self.source, "medium"): raise NotImplementedError() self.source.medium = m # ------------------------------------------------------------------------------------- path (hostPath) @property def path(self): if not hasattr(self.source, "path"): raise NotImplementedError() return self.source.path @path.setter def path(self, p=None): if not hasattr(self.source, "path"): raise NotImplementedError() self.source.path = p # ------------------------------------------------------------------------------------- secret_name (secret) @property def secret_name(self): if not hasattr(self.source, "secret_name"): raise NotImplementedError() return self.source.secret_name @secret_name.setter def secret_name(self, sn=None): if not hasattr(self.source, "secret_name"): raise NotImplementedError() self.source.secret_name = sn # ------------------------------------------------------------------------------------- volume_id (AWS) # http://kubernetes.io/docs/user-guide/volumes/#awselasticblockstore # - the nodes on which pods are running must be AWS EC2 instances # - those instances need to be in the same region and availability-zone as the EBS volume # - EBS only supports a single EC2 instance mounting a volume # Pod creation will timeout waiting for readiness if not on AWS; unschedulable. @property def volume_id(self): if not hasattr(self.source, "volume_id"): raise NotImplementedError() return self.source.volume_id @volume_id.setter def volume_id(self, vid=None): if not hasattr(self.source, "volume_id"): raise NotImplementedError() self.source.volume_id = vid # ------------------------------------------------------------------------------------- pd_name (GCE) # http://kubernetes.io/docs/user-guide/volumes/#gcepersistentdisk # - the nodes on which pods are running must be GCE VMs # - those VMs need to be in the same GCE project and zone as the PD # Pod creation will timeout waiting for readiness if not on GCE; unschedulable. @property def pd_name(self): if not hasattr(self.source, "pd_name"): raise NotImplementedError() return self.source.pd_name @pd_name.setter def pd_name(self, pd=None): if not hasattr(self.source, "pd_name"): raise NotImplementedError() self.source.pd_name = pd # ------------------------------------------------------------------------------------- read_only (GCE) # HTTP 422: GCE PD can only be mounted on multiple machines if it is read-only @property def read_only(self): if not hasattr(self.source, "read_only"): raise NotImplementedError() return self.source.read_only @read_only.setter def read_only(self, ro=None): if not hasattr(self.source, "read_only"): raise NotImplementedError() self.source.read_only = ro # ------------------------------------------------------------------------------------- fs_type (AWS, GCE) @property def fs_type(self): if not hasattr(self.source, "fs_type"): raise NotImplementedError() return self.source.fs_type @fs_type.setter def fs_type(self, t=None): if not hasattr(self.source, "fs_type"): raise NotImplementedError() self.source.fs_type = t # ------------------------------------------------------------------------------------- nfs_server @property def nfs_server(self): if not hasattr(self.source, "server"): raise NotImplementedError() return self.source.server @nfs_server.setter def nfs_server(self, s=None): if not hasattr(self.source, "server"): raise NotImplementedError() self.source.server = s # ------------------------------------------------------------------------------------- nfs_path @property def nfs_path(self): if not hasattr(self.source, "path"): raise NotImplementedError() return self.source.path @nfs_path.setter def nfs_path(self, p=None): if not hasattr(self.source, "path"): raise NotImplementedError() self.source.path = p # ------------------------------------------------------------------------------------- git_repository @property def git_repository(self): if not hasattr(self.source, "repository"): raise NotImplementedError() return self.source.repository @git_repository.setter def git_repository(self, repo=None): if not hasattr(self.source, "repository"): raise NotImplementedError() self.source.repository = repo # ------------------------------------------------------------------------------------- git_revision @property def git_revision(self): if not hasattr(self.source, "revision"): raise NotImplementedError() return self.source.revision @git_revision.setter def git_revision(self, rev=None): if not hasattr(self.source, "revision"): raise NotImplementedError() self.source.revision = rev # ------------------------------------------------------------------------------------- claimName @property def claim_name(self): if not hasattr(self.source, "claim_name"): raise NotImplementedError() return self.source.claim_name @claim_name.setter def claim_name(self, name=None): if not hasattr(self.source, "claim_name"): raise NotImplementedError() self.source.claim_name = name # ------------------------------------------------------------------------------------- configmap_name @property def configmap_name(self): if not hasattr(self.source, "name"): raise NotImplementedError() return self.source.name @configmap_name.setter def configmap_name(self, name=None): if not hasattr(self.source, "name"): raise NotImplementedError() self.source.name = name # ------------------------------------------------------------------------------------- configmap_items @property def configmap_items(self): if not hasattr(self.source, "items"): raise NotImplementedError() return self.source.items @configmap_items.setter def configmap_items(self, v=None): if not hasattr(self.source, "items"): raise NotImplementedError() self.source.items = v # ------------------------------------------------------------------------------------- configmap_default_mode @property def configmap_default_mode(self): if not hasattr(self.source, "default_mode"): raise NotImplementedError() return self.source.default_mode @configmap_default_mode.setter def configmap_default_mode(self, v=None): if not hasattr(self.source, "default_mode"): raise NotImplementedError() self.source.default_mode = v # ------------------------------------------------------------------------------------- configmap_optional @property def configmap_optional(self): if not hasattr(self.source, "optional"): raise NotImplementedError() return self.source.optional @configmap_optional.setter def configmap_optional(self, v=None): if not hasattr(self.source, "optional"): raise NotImplementedError() self.source.optional = v # ------------------------------------------------------------------------------------- serialize def serialize(self): return self.model.serialize() def as_json(self): data = self.serialize() dump = json.dumps(data, indent=4) return dump def as_yaml(self): data = self.serialize() dump = yaml.dump(data, default_flow_style=False) return dump
PypiClean
/cloudshell-cli-5.0.0.zip/cloudshell-cli-5.0.0/cloudshell/cli/session/ssh_session.py
from __future__ import annotations from io import StringIO from typing import TYPE_CHECKING import paramiko from scp import SCPClient from cloudshell.cli.session.connection_params import ConnectionParams from cloudshell.cli.session.expect_session import ExpectSession from cloudshell.cli.session.session_exceptions import SessionException if TYPE_CHECKING: from logging import Logger from cloudshell.cli.types import T_ON_SESSION_START, T_TIMEOUT class SSHSessionException(SessionException): pass class SSHSession(ExpectSession, ConnectionParams): SESSION_TYPE = "SSH" BUFFER_SIZE = 512 def __init__( self, host: str, username: str, password: str, port: int | None = None, on_session_start: T_ON_SESSION_START | None = None, pkey: str | None = None, pkey_passphrase: str | None = None, *args, **kwargs, ): ConnectionParams.__init__( self, host, port=port, on_session_start=on_session_start, pkey=pkey ) ExpectSession.__init__(self, *args, **kwargs) if self.port is None: self.port = 22 self.username = username self.password = password self.pkey = pkey self.pkey_passphrase = pkey_passphrase self._handler = None self._current_channel = None self._buffer_size = self.BUFFER_SIZE def __eq__(self, other) -> bool: return all( [ ConnectionParams.__eq__(self, other), self.username == other.username, self.password == other.password, self.pkey == other.pkey, self.pkey_passphrase == other.pkey_passphrase, ] ) def __del__(self) -> None: self.disconnect() def _create_handler(self) -> None: self._handler = paramiko.SSHClient() self._handler.load_system_host_keys() self._handler.set_missing_host_key_policy(paramiko.AutoAddPolicy()) def _initialize_session(self, prompt: str, logger: Logger) -> None: self._create_handler() try: self._handler.connect( self.host, self.port, self.username, self.password, timeout=self._timeout, banner_timeout=30, allow_agent=False, look_for_keys=False, pkey=self._get_pkey_object(self.pkey, self.pkey_passphrase, logger), ) except Exception as e: logger.exception("Failed to initialize session:") raise SSHSessionException(f"Failed to open connection to device: {e}") self._current_channel = self._handler.invoke_shell() self._current_channel.settimeout(self._timeout) def _connect_actions(self, prompt: str, logger: Logger) -> None: self.hardware_expect( None, expected_string=prompt, timeout=self._timeout, logger=logger ) self._on_session_start(logger) def disconnect(self) -> None: """Disconnect from device.""" if self._handler: self._handler.close() self._active = False def _send(self, command: str, logger: Logger) -> None: """Send message to device.""" self._current_channel.send(command) def _set_timeout(self, timeout: T_TIMEOUT) -> None: self._current_channel.settimeout(timeout) def _read_byte_data(self) -> bytes: return self._current_channel.recv(self._buffer_size) @property def scp_client(self) -> SCPClient: return SCPClient(self._handler.get_transport()) @property def sftp_client(self) -> paramiko.SFTPClient: return paramiko.SFTPClient.from_transport(self._handler.get_transport()) @staticmethod def _get_pkey_object( key_material: str | None, passphrase: str | None, logger: Logger ) -> paramiko.RSAKey | paramiko.DSSKey | paramiko.ECDSAKey | None: """Try to detect private key type and return paramiko.PKey object.""" if not key_material: return None for cls in [paramiko.RSAKey, paramiko.DSSKey, paramiko.ECDSAKey]: try: key = cls.from_private_key(StringIO(key_material), password=passphrase) except paramiko.ssh_exception.SSHException as e: # Invalid key, try other key type logger.warning(e) else: return key
PypiClean
/gui/omniframe/omni_frame_widget.py
import pyxyfy.logger logger = pyxyfy.logger.get(__name__) import sys from pathlib import Path from threading import Thread, Event import time import cv2 from PyQt6.QtCore import Qt, pyqtSignal, QThread from PyQt6.QtGui import QImage, QPixmap, QIcon from PyQt6.QtWidgets import ( QApplication, QSizePolicy, QWidget, QSpinBox, QScrollArea, QComboBox, QCheckBox, QDialog, QGroupBox, QDoubleSpinBox, QHBoxLayout, QLabel, QPushButton, QSlider, QVBoxLayout, ) # Append main repo to top of path to allow import of backend from pyxyfy.session import Session from pyxyfy.gui.omniframe.omni_frame_builder import OmniFrameBuilder from pyxyfy.cameras.synchronizer import Synchronizer from pyxyfy import __root__ class OmniFrameWidget(QWidget): def __init__(self,session:Session): super(OmniFrameWidget, self).__init__() self.session = session self.synchronizer:Synchronizer = self.session.get_synchronizer() self.frame_builder = OmniFrameBuilder(self.synchronizer, board_count_target=80) self.frame_emitter = OmniFrameEmitter(self.frame_builder) self.frame_emitter.start() self.layout_widgets() self.connect_widgets() def layout_widgets(self): self.setLayout(QVBoxLayout()) self.calibrate_collect_btn = QPushButton("Collect Calibration Data") self.layout().addWidget(self.calibrate_collect_btn) self.scroll_area = QScrollArea() self.scroll_area.setVerticalScrollBarPolicy(Qt.ScrollBarPolicy.ScrollBarAlwaysOn) # self.scroll_area.setLayout(QVBoxLayout()) self.layout().addWidget(self.scroll_area) self.omni_frame_display = QLabel() self.scroll_area.setWidget(self.omni_frame_display) def connect_widgets(self): self.calibrate_collect_btn.clicked.connect(self.on_calibrate_connect_click) self.frame_emitter.ImageBroadcast.connect(self.ImageUpdateSlot) def on_calibrate_connect_click(self): if self.calibrate_collect_btn.text() == "Collect Calibration Data": logger.info("Begin collecting calibration data") # by default, data saved to session folder self.frame_builder.store_points.set() self.session.start_recording() self.calibrate_collect_btn.setText("Early Terminate Collection") elif self.calibrate_collect_btn.text() == "Early Terminate Collection": logger.info("Prematurely end data collection") self.frame_builder.store_points.clear() self.calibrate_collect_btn.setText("Calibrate") self.frame_emitter.stop() self.stop_thread = Thread(target=self.session.stop_recording, args=(), daemon=True) elif self.calibrate_collect_btn.text() == "Calibrate": self.initiate_calibration() def ImageUpdateSlot(self, q_image): self.omni_frame_display.resize(self.omni_frame_display.sizeHint()) qpixmap = QPixmap.fromImage(q_image) self.omni_frame_display.setPixmap(qpixmap) ## This is a bit of a hack and likely handled better with proper signals if self.omni_frame_display.height()==1: logger.info("Target board counts acquired, ending data collection.") self.calibrate_collect_btn.setText("Calibrate") self.frame_emitter.stop() self.stop_thread = Thread(target=self.session.stop_recording, args=(), daemon=True) self.stop_thread.start() def initiate_calibration(self): self.calibrate_thead = Thread(target=self.session.calibrate,args=(), daemon=True) self.calibrate_thead.start() class OmniFrameEmitter(QThread): ImageBroadcast = pyqtSignal(QImage) def __init__(self, omniframe_builder:OmniFrameBuilder): super(OmniFrameEmitter,self).__init__() self.omniframe_builder = omniframe_builder logger.info("Initiated frame emitter") self.keep_collecting = Event() self.keep_collecting.set() def run(self): while self.keep_collecting.is_set(): omni_frame = self.omniframe_builder.get_omni_frame() if omni_frame is not None: image = cv2_to_qlabel(omni_frame) self.ImageBroadcast.emit(image) def stop(self): self.keep_collecting.clear() def cv2_to_qlabel(frame): image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) qt_frame = QImage( image.data, image.shape[1], image.shape[0], QImage.Format.Format_RGB888, ) return qt_frame if __name__ == "__main__": App = QApplication(sys.argv) config_path = Path(__root__, "tests", "demo") session = Session(config_path) session.load_cameras() session.load_streams() session.adjust_resolutions() omni_dialog = OmniFrameWidget(session) omni_dialog.show() sys.exit(App.exec())
PypiClean
/arus-1.1.22.tar.gz/arus-1.1.22/examples/pipelines/pipeline_flow_control.py
from arus.core.pipeline import Pipeline import arus import datetime as dt import pandas as pd import time import logging def _pipeline_test_processor(chunk_list, **kwargs): import pandas as pd result = {'NAME': [], 'START_TIME': [], 'STOP_TIME': []} for data, st, et, prev_st, prev_et, name in chunk_list: result['NAME'].append(name) result['START_TIME'].append(data.iloc[0, 0]) result['STOP_TIME'].append(data.iloc[-1, 0]) result = pd.DataFrame.from_dict(result) return result if __name__ == "__main__": window_size = 2 start_time = dt.datetime.now() gr = arus.generator.RandomAccelDataGenerator( sr=80, grange=8, sigma=1, buffer_size=100) seg = arus.segmentor.SlidingWindowSegmentor(window_size) stream = arus.Stream(gr, seg, name='stream-1') pipeline = Pipeline(max_processes=2, scheduler='processes') pipeline.add_stream(stream) pipeline.set_processor(_pipeline_test_processor) # connect, there will be no incoming data, get_iteratnor will be blocking pipeline.connect() results = [] count_none = 0 for result, st, et, prev_st, prev_et, name in pipeline.get_iterator(timeout=0.1): if result is not None: print('Connect is not working') assert False else: count_none = count_none + 1 if count_none == 50: break print('Connected yet no data is coming') pipeline.stop() # connect, wait for 3 seconds and then start processing, get_iterator will be blocking for 3 + 4 seconds until receiving the first window pipeline.connect() connect_time = pd.Timestamp(datetime.now()) print('Connect time: ' + str(connect_time)) count_none = 0 for result, st, et, prev_st, prev_et, name in pipeline.get_iterator(timeout=0.1): if result is not None: first_block_time = pd.Timestamp(datetime.now()) print('First block time: ' + str(first_block_time) + ', ' + str(first_block_time.timestamp() - connect_time.timestamp()) + ' seconds since connection.') print('First block st: ' + str(st)) break else: count_none = count_none + 1 if count_none == 30: process_time = pd.Timestamp(datetime.now()) pipeline.process(start_time=process_time) print('Process time: ' + str(process_time) + ', ' + str(process_time.timestamp() - connect_time.timestamp()) + ' seconds since connection.') print('Stop') pipeline.stop() # connect and process for 4 seconds and then pause for 2 seconds and then process again for 4 seconds st = datetime.now() pipeline.start(process_start_time=st) print('Start at: ' + str(st)) count = 0 count_none = 0 restarted = False stt = time.time() + 100000 for result, st, et, prev_st, prev_et, name in pipeline.get_iterator(timeout=0.1): if result is not None: count = count + 1 print('Blocks at: ' + str(st)) if count == 2: print('Pause') pipeline.pause() stt = time.time() if count == 4: break if time.time() - stt >= 2 and not restarted: restarted = True ts = dt.datetime.now() process_time = pd.Timestamp(ts) pipeline.process(start_time=process_time) print('Start again at:' + str(ts)) print('Stop') pipeline.stop()
PypiClean
/exodus-bundler-2.0.4.tar.gz/exodus-bundler-2.0.4/README.md
<h1 vertical-align="middle">Exodus <a targe="_blank" href="https://twitter.com/home?status=Exodus%20%E2%80%93%20Painless%20relocation%20of%20Linux%20binaries%20without%20containers!%20%40IntoliNow%0A%0Ahttps%3A//github.com/intoli/exodus"> <img height="26px" src="https://simplesharebuttons.com/images/somacro/twitter.png" alt="Tweet"></a> <a target="_blank" href="https://www.facebook.com/sharer/sharer.php?u=https%3A//github.com/intoli/exodus"> <img height="26px" src="https://simplesharebuttons.com/images/somacro/facebook.png" alt="Share on Facebook"></a> <a target="_blank" href="http://reddit.com/submit?url=https%3A%2F%2Fgithub.com%2Fintoli%2Fexodus&title=Exodus%20-%20Painless%20relocation%20of%20ELF%20binaries%20on%20Linux"> <img height="26px" src="https://simplesharebuttons.com/images/somacro/reddit.png" alt="Share on Reddit"></a> <a target="_blank" href="https://news.ycombinator.com/submitlink?u=https://github.com/intoli/exodus&t=Exodus%20%E2%80%93%20Painless%20relocation%20of%20Linux%20binaries%20without%20containers"> <img height="26px" src="media/ycombinator.png" alt="Share on Hacker News"></a> </h1> <p align="left"> <a href="https://circleci.com/gh/intoli/exodus/tree/master"> <img src="https://img.shields.io/circleci/project/github/intoli/exodus/master.svg" alt="Build Status"></a> <a href="https://circleci.intoli.com/artifacts/intoli/exodus/coverage-report/index.html"> <img src="https://img.shields.io/badge/dynamic/json.svg?label=coverage&colorB=ff69b4&query=$.coverage&uri=https://circleci.intoli.com/artifacts/intoli/exodus/coverage-report/total-coverage.json" alt="Coverage"></a> <a href="https://github.com/intoli/exodus/blob/master/LICENSE.md"> <img src="https://img.shields.io/pypi/l/exodus-bundler.svg" alt="License"></a> <a href="https://pypi.python.org/pypi/exodus-bundler/"> <img src="https://img.shields.io/pypi/v/exodus-bundler.svg" alt="PyPI Version"></a> </p> Exodus is a tool that makes it easy to successfully relocate Linux ELF binaries from one system to another. This is useful in situations where you don't have root access on a machine or where a package simply isn't available for a given Linux distribution. For example, CentOS 6.X and Amazon Linux don't have packages for [Google Chrome](https://www.google.com/chrome/browser/desktop/index.html) or [aria2](https://aria2.github.io/). Server-oriented distributions tend to have more limited and outdated packages than desktop distributions, so it's fairly common that one might have a piece of software installed on their laptop that they can't easily install on a remote machine. With exodus, transferring a piece of software that's working on one computer to another is as simple as this. ```bash exodus aria2c | ssh intoli.com ``` Exodus handles bundling all of the binary's dependencies, compiling a statically linked wrapper for the executable that invokes the relocated linker directly, and installing the bundle in `~/.exodus/` on the remote machine. You can see it in action here. ![Demonstration of usage with htop](media/htop-demo.gif) ## Table of Contents - [The Problem Being Solved](#the-problem-being-solved) - An overview of some of the challenges that arise when relocating binaries. - [Installation](#installation) - Instructions for installing exodus. - [Usage](#usage) - [The Command-Line Interface](#command-line-interface) - The options supported by the command-line utility. - [Usage Examples](#examples) - Common usage patterns, helpful for getting started quickly. - [How It Works](#how-it-works) - An overview of how exodus works. - [Known Limitations](#known-limitations) - Situations that are currently outside the scope of what exodus can handle. - [Development](#development) - Instructions for setting up the development environment. - [Contributing](#contributing) - Guidelines for contributing. - [License](#license) - License details for the project. ## The Problem Being Solved If you simply copy an executable file from one system to another, then you're very likely going to run into problems. Most binaries available on Linux are dynamically linked and depend on a number of external library files. You'll get an error like this when running a relocated binary when it has a missing dependency. ``` aria2c: error while loading shared libraries: libgnutls.so.30: cannot open shared object file: No such file or directory ``` You can try to install these libraries manually, or to relocate them and set `LD_LIBRARY_PATH` to wherever you put them, but it turns out that the locations of the [ld-linux](https://linux.die.net/man/8/ld-linux) linker and the [glibc](https://www.gnu.org/software/libc/) libraries are hardcoded. Things can very quickly turn into a mess of relocation errors, ``` aria2c: relocation error: /lib/libpthread.so.0: symbol __getrlimit, version GLIBC_PRIVATE not defined in file libc.so.6 with link time reference ``` segmentation faults, ``` Segmentation fault (core dumped) ``` or, if you're really unlucky, this very confusing symptom of a missing linker. ``` $ ./aria2c bash: ./aria2c: No such file or directory $ ls -lha ./aria2c -rwxr-xr-x 1 sangaline sangaline 2.8M Jan 30 21:18 ./aria2c ``` Exodus works around these issues by compiling a small statically linked launcher binary that invokes the relocated linker directly with any hardcoded `RPATH` library paths overridden. The relocated binary will run with the exact same linker and libraries that it ran with on its origin machine. ## Installation The package can be installed from [the package on pypi](https://pypi.python.org/pypi/exodus_bundler). Running the following will install `exodus` locally for your current user. ```bash pip install --user exodus-bundler ``` You will then need to add `~/.local/bin/` to your `PATH` variable in order to run the `exodus` executable (if you haven't already done so). This can be done by adding ``` export PATH="~/.local/bin/:${PATH}" ``` to your `~/.bashrc` file. ### Optional/Recommended Dependencies It is also highly recommended that you install [gcc](https://gcc.gnu.org/) and one of either [musl libc](https://www.musl-libc.org/) or [diet libc](https://www.fefe.de/dietlibc/) on the machine where you'll be packaging binaries. If present, these small C libraries will be used to compile small statically linked launchers for the bundled applications. An equivalent shell script will be used as a fallback, but it carries significant overhead compared to the compiled launchers. ## Usage ### Command-Line Interface The command-line interface supports the following options. ``` usage: exodus [-h] [-c CHROOT_PATH] [-a DEPENDENCY] [-d] [--no-symlink FILE] [-o OUTPUT_FILE] [-q] [-r [NEW_NAME]] [--shell-launchers] [-t] [-v] EXECUTABLE [EXECUTABLE ...] Bundle ELF binary executables with all of their runtime dependencies so that they can be relocated to other systems with incompatible system libraries. positional arguments: EXECUTABLE One or more ELF executables to include in the exodus bundle. optional arguments: -h, --help show this help message and exit -c CHROOT_PATH, --chroot CHROOT_PATH A directory that will be treated as the root during linking. Useful for testing and bundling extracted packages that won't run without a chroot. (default: None) -a DEPENDENCY, --add DEPENDENCY, --additional-file DEPENDENCY Specifies an additional file to include in the bundle, useful for adding programatically loaded libraries and other non-library dependencies. The argument can be used more than once to include multiple files, and directories will be included recursively. (default: []) -d, --detect Attempt to autodetect direct dependencies using the system package manager. Operating system support is limited. (default: False) --no-symlink FILE Signifies that a file must not be symlinked to the deduplicated data directory. This is useful if a file looks for other resources based on paths relative its own location. This is enabled by default for executables. (default: []) -o OUTPUT_FILE, --output OUTPUT_FILE The file where the bundle will be written out to. The extension depends on the output type. The "{{executables}}" and "{{extension}}" template strings can be used in the provided filename. If omitted, the output will go to stdout when it is being piped, or to "./exodus-{{executables}}-bundle.{{extension}}" otherwise. (default: None) -q, --quiet Suppress warning messages. (default: False) -r [NEW_NAME], --rename [NEW_NAME] Renames the binary executable(s) before packaging. The order of rename tags must match the order of positional executable arguments. (default: []) --shell-launchers Force the use of shell launchers instead of attempting to compile statically linked ones. (default: False) -t, --tarball Creates a tarball for manual extraction instead of an installation script. Note that this will change the output extension from ".sh" to ".tgz". (default: False) -v, --verbose Output additional informational messages. (default: False) ``` ### Examples #### Piping Over SSH The easiest way to install an executable bundle on a remote machine is to pipe the `exodus` command output over SSH. For example, the following will install the `aria2c` command on the `intoli.com` server. ```bash exodus aria2c | ssh intoli.com ``` This requires that the default shell for the remote user be set to `bash` (or a compatible shell). If you use `csh`, then you need to additionally run `bash` on the remote server like this. ```bash exodus aria2c | ssh intoli.com bash ``` #### Explicitly Adding Extra Files Additional files can be added to bundles in a number of different ways. If there is a specific file or directory that you would like to include, then you can use the `--add` option. For example, the following command will bundle `nmap` and include the contents of `/usr/share/nmap` in the bundle. ```bash exodus --add /usr/share/nmap nmap ``` You can also pipe a list of dependencies into `exodus`. This allows you to use standard Linux utilities to find and filter dependencies as you see fit. The following command sequence uses `find` to include all of the Lua scripts under `/usr/share/nmap`. ```bash find /usr/share/nmap/ -iname '*.lua' | exodus nmap ``` These two approaches can be used together, and the `--add` flag can also be used multiple times in one command. #### Auto-Detecting Extra Files If you're not sure which extra dependencies are necessary, you can use the `--detect` option to query your system's package manager and automatically include any files in the corresponding packages. Running ```bash exodus --detect nmap ``` will include the contents of `/usr/share/nmap` as well as its man pages and the contents of `/usr/share/zenmap/`. If you ever try to relocate a binary that doesn't work with the default configuration, the `--detect` option is a good first thing to try. You can also pipe the output of `strace` into `exodus` and all of the files that are accessed will be automatically included. This is particularly useful in situations where shared libraries are loaded programmatically, but it can also be used to determine which files are necessary to run a specific command. The following command will determine all of the files that `nmap` accesses while running the set of default scripts. ```bash strace -f nmap --script default 127.0.0.1 2>&1 | exodus nmap ``` The output of `strace` is then parsed by `exodus` and all of the files are included. It's generally more robust to use `--detect` to find the non-library dependencies, but the `strace` pattern can be indispensable when a program uses `dlopen()` to load libraries programmatically. Also, note that *any* files that a program accesses will be included in a bundle when following this approach. Never distribute a bundle without being certain that you haven't accidentally included a file that you don't want to make public. #### Renaming Binaries Multiple binaries that have the same name can be installed in parallel through the use of the `--rename`/`-r` option. Say that you have two different versions of `grep` on your local machine: one at `/bin/grep` and one at `/usr/local/bin/grep`. In that situation, using the `-r` flag allows you to assign aliases for each binary. ```bash exodus -r grep-1 -r grep-2 /bin/grep /usr/local/bin/grep ``` The above command would install the two `grep` versions in parallel with `/bin/grep` called `grep-1` and `/usr/local/bin/grep` called `grep-2`. #### Manual Extraction You can create a compressed tarball directly instead of the default script by specifying the `--tarball` option. To create a tarball, copy it to a remote server, and then extract it in `~/custom-location`, you could run the following. ```bash # Create the tarball. exodus --tarball aria2c --output aria2c.tgz # Copy it to the remote server and remove it locally. scp aria2c.tgz intoli.com:/tmp/aria2c.tgz rm aria2c.tgz # Make sure that `~/custom-location` exists. ssh intoli.com "mkdir -p ~/custom-location" # Extract the tarball remotely. ssh intoli.com "tar --strip 1 -C ~/custom-location -zxf /tmp/aria2c.tgz" # Remove the remote tarball. ssh intoli.com "rm /tmp/aria2c.tgz" ``` You will additionally need to add `~/custom-location/bin` to your `PATH` variable on the remote server. This can be done by adding the following to `~/.bashrc` on the remote server. ```bash export PATH="~/custom-location/bin:${PATH}" ``` #### Adding to a Docker Image Tarball formatted exodus bundles can easily be included in Docker images by using the [ADD](https://docs.docker.com/engine/reference/builder/#add) instruction. You must first create a bundle using the `--tarball` option ```bash # Create and enter a directory for the Docker image. mkdir jq cd jq # Generate the `exodus-jq-bundle.tgz` bundle. exodus --tarball jq ``` and then create a `Dockerfile` file inside of the `jq` directory with the following contents. ``` FROM scratch ADD exodus-jq-bundle.tgz /opt/ ENTRYPOINT ["/opt/exodus/bin/jq"] ``` The Docker image can then be built by running ```bash docker build -t jq . ``` and `jq` can be run inside of the container. ```bash docker run jq ``` This simple image will include only the `jq` binary and dependencies, but the bundles can be included in existing docker images in the same way. For example, adding ```bash ENV PATH="/opt/exodus/bin:${PATH}" ADD exodus-jq-bundle.tgz /opt/ ``` to an existing `Dockerfile` will make the `jq` binary available for use inside the container. ## How It Works There are two main components to how exodus works: 1. Finding and bundling all of a binary's dependencies. 2. Launching the binary in such a way that the proper dependencies are used without any potential interaction from system libraries on the destination machine. The first component is actually fairly simple. You can invoke [ld-linux](https://linux.die.net/man/8/ld-linux) with the `LD_TRACE_LOADED_OBJECTS` environment variable set to `1` and it will list all of the resolved library dependencies for a binary. For example, running ```bash LD_TRACE_LOADED_OBJECTS=1 /lib64/ld-linux-x86-64.so.2 /bin/grep ``` will output the following. ``` linux-vdso.so.1 => (0x00007ffc7495c000) libpcre.so.0 => /lib64/libpcre.so.0 (0x00007f89b2f3e000) libc.so.6 => /lib64/libc.so.6 (0x00007f89b2b7a000) libpthread.so.0 => /usr/lib/libpthread.so.0 (0x00007f0e95e8c000) /lib64/ld-linux-x86-64.so.2 (0x00007f89b3196000) ``` The `linus-vdso.so.1` dependency refers to kernel space routines that are exported to user space, but the other four are shared library files on disk that are required in order to run `grep`. Notably, one of these dependencies is the `/lib64/ld-linux-x86-64.so.2` linker itself. The location of this file is typically hardcoded into an ELF binary's `INTERP` header and the linker is invoked by the kernel when you run the program. We'll come back to that in a minute, but for now the main point is that we can find a binary's direct dependencies using the linker. Of course, these direct dependencies might have additional dependencies of their own. We can iteratively find all of the necessary dependencies by following the same approach of invoking the linker again for each of the library dependencies. This isn't actually necessary for `grep`, but exodus does handle finding the full set of dependencies for you. After all of the dependencies are found, exodus puts them together with the binary in a tarball that can be extracted (typically into either `/opt/exodus/` or `~/.exodus`). We can explore the structure of the `grep` bundle by using [tree](https://linux.die.net/man/1/tree) combined with a `sed` one-liner to truncate long SHA-256 hashes to 8 digits. Running ```bash alias truncate-hashes="sed -r 's/([a-f0-9]{8})[a-f0-9]{56}/\1.../g'" tree ~/.exodus/ | truncate-hashes ``` will show us all of the files and folders included in the `grep` bundle. ``` /home/sangaline/.exodus/ ├── bin │   └── grep -> ../bundles/3124cd96.../usr/bin/grep ├── bundles │   └── 3124cd96... │   ├── lib64 │   │   └── ld-linux-x86-64.so.2 -> ../../../data/dfd5de26... │   └── usr │   ├── bin │   │   ├── grep │   │   ├── grep-x -> ../../../../data/7477c1a7... │   │   └── linker-dfd5de26... │   └── lib │   ├── libc.so.6 -> ../../../../data/6d0e1d45... │   ├── libpcre.so.1 -> ../../../../data/a0862ebc... │   └── libpthread.so.0 -> ../../../../data/85cb56a5... └── data ├── 6d0e1d45... ├── 7477c1a7... ├── 85cb56a5... ├── a0862ebc... └── dfd5de26... 8 directories, 13 files ``` You can see that there are three top-level directories within `~/.exodus/`: `bin`, `bundles`, and `data`. Let's cover these in reverse-alphabetical order, starting with the `data` directory. The `data` directory contains the actual files from the bundles with names corresponding to SHA-256 hashes of their content. This is done so that multiple versions of a file with the same filename can be extracted in the `data` directory without overwriting each other. On the other hand, files that do have the same content *will* overwrite each other. This avoids the need to store multiple copies of the same data, even if the identical files appear in different bundles or directories. Next, we have the `bundles` directory, which is full of subfolders that also have SHA-256 hashes as names. The hashes this time are determined based on the combined directory structure and content of everything included in the bundle. The hash provides a unique fingerprint for the bundle and allows multiple bundles to be extracted without their directory contents mixing. Inside of each bundle subdirectory, the original directory structure of the bundle's contents on the host machine is mirrored. For this particular `grep` bundle, there are `lib64`, `usr/bin`, and `usr/lib` directories. A more complicated bundle could include additional files from `/usr/share`, `/opt/local`, a user's home directory, or really anywhere on the system (see the `--add` and `--detect` options). The files in both `lib64` and `usr/lib` simply consist of symlinks to the actual library files in the top-level `data/` directory. The `usr/bin` directory is a little more complicated. The `grep` file isn't actually the original `grep` binary, it's a special executable that `exodus` constructs called a "launcher." A launcher is a tiny program that invokes the linker and overrides the library search path in such a way that our original binary can run without any system libraries being used and causing issues due to incompatibilities. The linker in this case is the `linker-dfd5de26...` file. This is located in the same directory so that resource paths can be resolved relative to the running executable. Finally, the `grep-x` symlink points to the actual `grep` binary that was bundled and extracted in the top-level `data/` directory (this is the ELF file that the linker interprets). When a C compiler and either [musl libc](https://www.musl-libc.org/) or [diet libc](https://www.fefe.de/dietlibc/) are available, exodus will compile a statically linked binary launcher. If neither of these are present, it will fall back to using a shell script to perform the task of the launcher. This adds a little bit of overhead relative to the binary launchers, but they are helpful for understanding what the launchers do. Here's the shell script version of the `grep-launcher`, for example. ```bash #! /bin/bash current_directory="$(dirname "$(readlink -f "$0")")" executable="${current_directory}/./grep-x" library_path="../../lib64:../lib64:../../lib:../lib:../../lib32:../lib32" library_path="${current_directory}/${library_path//:/:${current_directory}/}" linker="${current_directory}/./linker-dfd5de2638cea087685b67786050dcdc33aac7b67f5f8c2753b7da538517880a" exec "${linker}" --library-path "${library_path}" --inhibit-rpath "" "${executable}" "$@" ``` You can see that the launcher first constructs the full paths for all of the `LD_LIBRARY_PATH` directories, the executable, and the linker based on its own location. It then executes the linker with a set of arguments that allow it to search the proper library directories, ignore the hardcoded `RPATH`, and run the binary with any command-line arguments passed along. This serves a similar purpose to something like [patchelf](https://github.com/NixOS/patchelf) that would modify the `INTERP` and `RPATH` of the binary, but it additionally allows for both the linker and library locations to be specified based *solely on their relative locations*. This is what allows for the exodus bundles to be extracted in `~/.exodus`, `/opt/exodus/`, or any other location, as long as the internal bundle structure is preserved. Continuing on with our reverse-alphabetical order, we finally get to the top-level `bin` directory. The top-level `bin` directory consists of symlinks of the binary names to their corresponding launchers. This allows for the addition of a single directory to a user's `PATH` variable in order to make the migrated exodus binaries accessible. For example, adding `export PATH="~/.exodus/bin:${PATH}"` to a `~/.bashrc` file will add all of these entry points to a user's `PATH` and allow them to be run without specifying their full path. ## Known Limitations There are several scenarios under which bundling an application with exodus will fail. Many of these are things that we're working on and hope to improve in the future, but some are fundamentally by design and are unlikely to change. Here you can see an overview of situations where exodus will not be able to successfully relocate executables. - **Non-ELF Binaries** - Exodus currently only supports completely bundling ELF binaries. Interpreted executable files, like shell scripts, can be included in bundles, but their shebang interpreter directives will not be changed. This generally means that they will be interpreted using the system version of `bash`, `python`, `perl`, or whatever else. The problem that exodus aims to solve is largely centered around the dynamic linking of ELF binaries, so this is unlikely to change in the foreseeable future. - **Incompatible CPU Architectures** - Binaries compiled for one CPU architecture will generally not be able to run on a CPU of another architecture. There are some exceptions to this, for example x64 processors are backwards compatible with x86 instruction sets, but you will not be able to migrate x64 binaries to an x86 or an ARM machine. Doing so would require processor emulation, and this is definitely outside the scope of the exodus project. If you find yourself looking for a solution to this problem, then you might want to check out [QEMU](https://www.qemu.org/). - **Incompatible Glibc and Kernel Versions** - When glibc is compiled, it is configured to target a specific kernel version. Trying to run any software that was compiled against glibc on a system using an older kernel version than glibc's target version will result in a `FATAL: kernel too old` error. You can check the oldest supported kernel version for a binary by running `file /path/to/binary`. The output should include a string like `for GNU/Linux 2.6.32` which signifies the oldest kernel version that the binary is compatible with. As a workaround, you can create exodus bundles in a Docker image using an operating system image which supports older kernels (*e.g.* use an outdated version of the operating system). - **Driver Dependent Libraries** - Unlike some other application bundlers, exodus aims to include all of the required libraries when the bundle is created and to completely isolate the transported binary from the destination machine's system libraries. This means that any libraries which are compiled for specific hardware drivers will only work on machines with the same drivers. A key example of this is the `libGLX_indirect.so` library which can link to either `libGLX_mesa.so` or `libGLX_nvidia.so` depending on which graphics card drivers are used on a given system. Bundling dependencies that are not locally available on the source machine is fundamentally outside the scope of what exodus is designed to do, and this will never change. ## Development The development environment can be setup by running the following. ```bash # Clone the repository. git clone https://github.com/intoli/exodus.git cd exodus # Create and enter a virtualenv. virtualenv .env . .env/bin/activate # Install the development requirements. pip install -r development-requirements.txt # Install the exodus package in editable mode. pip install -e . ``` The test suite can then be run using [tox](https://tox.readthedocs.io/en/latest/). ```bash tox ``` ## Contributing Contributions are welcome, but please follow these contributor guidelines outlined in [CONTRIBUTING.md](CONTRIBUTING.md). ## License Exodus is licensed under a [BSD 2-Clause License](LICENSE.md) and is copyright [Intoli, LLC](https://intoli.com).
PypiClean
/ams_dott-1.6.4-py3-none-any.whl/dottmi/gdb.py
# Authors: # - Thomas Winkler, ams AG, [email protected] import atexit import os import platform import signal import subprocess import time from abc import ABC, abstractmethod from pathlib import Path import psutil from psutil import NoSuchProcess import dottmi.target from dottmi.dottexceptions import DottException from dottmi.gdb_mi import GdbMi from dottmi.gdbcontrollerdott import GdbControllerDott from dottmi.utils import log class GdbServer(ABC): def __init__(self, addr, port, device_id): self._addr: str = addr self._port: int = port self._device_id: str = device_id @property def device_id(self): return self._device_id @property def addr(self): return self._addr @property def port(self): return self._port @abstractmethod def _launch(self): pass @abstractmethod def shutdown(self): pass class GdbServerJLink(GdbServer): def __init__(self, gdb_svr_binary: str, addr: str, port: int, device_id: str, interface: str, endian: str, speed: str = '15000', serial_number: str = None, jlink_addr: str = None): super().__init__(addr, port, device_id) self._srv_binary: str = gdb_svr_binary self._srv_process = None self._target_interface: str = interface self._target_endian: str = endian self._speed: str = speed self._serial_number: str = serial_number self._jlink_addr: str = jlink_addr # Popen.__del__ occasionally complains under Windows about invalid file handles on interpreter shutdown. # This is somewhat distracting and is silenced by a custom delete function. subprocess.Popen.__del_orig__ = subprocess.Popen.__del__ subprocess.Popen.__del__ = GdbServerJLink._popen_del if self.addr is None: self._launch() @staticmethod def _popen_del(instance): try: instance.__del_orig__() except: pass def _launch_internal(self): args = [self._srv_binary, '-device', self.device_id, '-if', self._target_interface , '-endian', self._target_endian, '-vd', '-noir', '-timeout', '2000', '-singlerun', '-silent', '-speed', self._speed] if self._jlink_addr is not None: args.append('-select') args.append(f'IP={self._jlink_addr}') if self._serial_number is not None: if self._jlink_addr is not None: log.warn('JLink address and JLINK serial number given. Ignoring serial in favour of address.') else: args.append('-select') args.append(f'USB={self._serial_number}') if self._port is not None: args.append('-port') args.append(f'{self._port}') cflags = 0 if platform.system() == 'Windows': cflags = subprocess.CREATE_NEW_PROCESS_GROUP self._srv_process = subprocess.Popen(args, shell=False, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, creationflags=cflags) p = psutil.Process(self._srv_process.pid) startup_done = False try: # query the started process until it has opened a listening socket on the expected port end_time = time.time() + 8 while not startup_done and time.time() < end_time: for c in p.connections(): if c.laddr.port == self.port: if self._serial_number is None: log.info(f'GDB server is now listening on port {self.port}!') else: log.info(f'GDB server (JLINK SN: {self._serial_number}) now listening on port {self.port}!') startup_done = True except psutil.AccessDenied as ex: # On Linux the situation was observed that from newly launched GDB server processes # an AccessDenied exception is raised when accessing them with psutils. This exception # is then 'thrown' upwards where it is handled by retrying to create the process. raise ex except (NoSuchProcess, PermissionError) as ex: log.error('JLINK GDB server has terminated!') end_time = time.time() + 2 res_poll = None while not startup_done and time.time() < end_time: res_poll = self._srv_process.poll() if res_poll is not None: break if res_poll is not None: err_code, err_str = self._conv_jlink_error(self._srv_process.poll()) log.error(f'J-Link gdb server termination reason: {err_code:x} ({err_str})') if err_code == -2: log.error('Already a JLINK GDB server instance running?') if err_code == -5: log.debug('GDB server command line:') log.debug(' '.join(args)) raise DottException('Startup of JLINK gdb server failed!') from None if not startup_done: raise DottException('Startup of JLINK gdb server failed due to timeout!') from None else: self._addr = '127.0.0.1' atexit.register(self.shutdown) def _launch(self): start_done: bool = False while not start_done: try: self._launch_internal() start_done = True except psutil.AccessDenied as ex: pass def shutdown(self): if self._srv_process is not None: # if the gdb server is still running (despite being started in single run mode) it is terminated here try: if platform.system() == 'Windows': os.kill(self._srv_process.pid, signal.CTRL_BREAK_EVENT) else: os.kill(self._srv_process.pid, signal.SIGINT) self._srv_process.communicate(timeout=1) except subprocess.TimeoutExpired: self._srv_process.terminate() self._srv_process = None def _conv_jlink_error(self, jlink_error: int) -> (int, str): bits_in_word = 32 err_code = jlink_error - (1 << bits_in_word) err_str = 'Unknown error code.' if err_code == 0: err_str = 'No error. Gdb server closed normally.' if err_code == -1: err_str = 'Unknown error. Should not happen.' if err_code == -2: err_str = f'Failed to open listener port (default: 2331, current: {self.port}).' if err_code == -3: err_str = 'Could not connect to target. No target voltage detected or connection failed.' if err_code == -4: err_str = 'Failed to accept a connection from GDB.' if err_code == -5: err_str = 'Failed to parse the command line options, wrong or missing command line parameter.' if err_code == -6: err_str = 'Unknown or no device name set.' if err_code == -7: err_str = 'Failed to connect to J-Link.' return err_code, err_str class GdbClient(object): # Create a new gdb instance # gdb_client_binary ... binary of gdb client (in PATH or with full-qualified path) # gdb_server_addr ... gdb server address as supplied to GDB's target command (e.g., remote :2331); # if none DOTT tries to start a Segger GDB server instance and connect to it def __init__(self, gdb_client_binary: str) -> None: self._gdb_client_binary: str = gdb_client_binary self._mi_controller: GdbControllerDott = None self._gdb_mi: GdbMi = None # set Python 2.7 (used for GDB commands) path such that gdb subprocess actually finds it my_env = os.environ.copy() python27_path = os.environ.get('PYTHONPATH27') if python27_path is None: raise Exception('PYTHONPATH27 not set. Can not load gdb command support. Aborting.') if platform.system() == 'Windows': os.environ['PATH'] = f'{python27_path};{my_env["PATH"]}' os.environ['PYTHONPATH'] = '%s;%s\\lib;%s\\lib\\site-packages;%s\\DLLs' % ((python27_path,) * 4) else: os.environ['PYTHONPATH'] = '' my_dir = os.path.dirname(os.path.realpath(__file__)) os.environ['PYTHONPATH'] += os.pathsep + str(Path(my_dir + '/..')) # connect to already running gdb server def connect(self) -> None: # create 'GDB Machine Interface' instance and put it async mode self._mi_controller = GdbControllerDott([self._gdb_client_binary, "--nx", "--quiet", "--interpreter=mi3"]) self._gdb_mi = GdbMi(self._mi_controller) @property def gdb_mi(self) -> GdbMi: return self._gdb_mi class GdbServerQuirks(object): @staticmethod def instantiate_quirks(dt: 'dottmi.target.Target') -> 'GdbServerQuirks': # Segger and OpenOCD don't agree on xpsr naming (all lowercase vs. mixed case) if 'xPSR' in dt.reg_get_names(): log.info("Using OpenOCD's xPSR naming") return GdbServerQuirks('xPSR', 'monitor rbp all', 'monitor reset halt') else: # falling back to Segger's naming as default log.info("Using Segger's xpsr naming") return GdbServerQuirks('xpsr', 'monitor clrbp', 'monitor reset') def __init__(self, xpsr_name: str, monitor_clr_all_bps: str, monitor_reset: str): self._xpsr_name: str = xpsr_name self._monitor_clr_all_bps: str = monitor_clr_all_bps self._monitor_reset: str = monitor_reset @property def xpsr_name(self) -> str: return self._xpsr_name @property def monitor_clear_all_bps(self) -> str: return self._monitor_clr_all_bps @property def monitor_reset(self) -> str: return self._monitor_reset
PypiClean
/sifter_wagtail-3.0a1-py3-none-any.whl/wagtail/admin/static_src/wagtailadmin/js/core.js
function addMessage(status, text) { $('.messages').addClass('new').empty().append('<ul><li class="' + status + '">' + text + '</li></ul>'); var addMsgTimeout = setTimeout(function() { $('.messages').addClass('appear'); clearTimeout(addMsgTimeout); }, 100); } function escapeHtml(text) { var map = { '&': '&amp;', '<': '&lt;', '>': '&gt;', '"': '&quot;', '\'': '&#039;' }; return text.replace(/[&<>"']/g, function(m) { return map[m]; }); } function initTagField(id, autocompleteUrl, options) { var finalOptions = Object.assign({ autocomplete: {source: autocompleteUrl}, preprocessTag: function(val) { // Double quote a tag if it contains a space // and if it isn't already quoted. if (val && val[0] != '"' && val.indexOf(' ') > -1) { return '"' + val + '"'; } return val; }, }, options); $('#' + id).tagit(finalOptions); } /* * Enables a "dirty form check", prompting the user if they are navigating away * from a page with unsaved changes. * * It takes the following parameters: * * - formSelector - A CSS selector to select the form to apply this check to. * * - options - An object for passing in options. Possible options are: * - confirmationMessage - The message to display in the prompt. * - alwaysDirty - When set to true the form will always be considered dirty, * prompting the user even when nothing has been changed. */ function enableDirtyFormCheck(formSelector, options) { var $form = $(formSelector); var confirmationMessage = options.confirmationMessage || ' '; var alwaysDirty = options.alwaysDirty || false; var initialData = null; var formSubmitted = false; $form.on('submit', function() { formSubmitted = true; }); // Delay snapshotting the form’s data to avoid race conditions with form widgets that might process the values. // User interaction with the form within that delay also won’t trigger the confirmation message. setTimeout(function() { initialData = $form.serialize(); }, 1000 * 10); window.addEventListener('beforeunload', function(event) { var isDirty = initialData && $form.serialize() != initialData; var displayConfirmation = ( !formSubmitted && (alwaysDirty || isDirty) ); if (displayConfirmation) { event.returnValue = confirmationMessage; return confirmationMessage; } }); } $(function() { // Add class to the body from which transitions may be hung so they don't appear to transition as the page loads $('body').addClass('ready'); // Enable toggle to open/close nav $(document).on('click', '#nav-toggle', function() { $('body').toggleClass('nav-open'); if (!$('body').hasClass('nav-open')) { $('body').addClass('nav-closed'); } else { $('body').removeClass('nav-closed'); } }); // Enable toggle to open/close user settings $(document).on('click', '#account-settings', function() { $('.nav-main').toggleClass('nav-main--open-footer'); $(this).find('em').toggleClass('icon-arrow-down-after icon-arrow-up-after'); }); // Resize nav to fit height of window. This is an unimportant bell/whistle to make it look nice var fitNav = function() { $('.nav-wrapper').css('min-height', $(window).height()); }; fitNav(); $(window).on('resize', function() { fitNav(); }); // Logo interactivity function initLogo() { var sensitivity = 8; // the amount of times the user must stroke the wagtail to trigger the animation var $logoContainer = $('[data-animated-logo-container]'); var mouseX = 0; var lastMouseX = 0; var dir = ''; var lastDir = ''; var dirChangeCount = 0; function enableWag() { $logoContainer.removeClass('logo-serious').addClass('logo-playful'); } function disableWag() { $logoContainer.removeClass('logo-playful').addClass('logo-serious'); } $logoContainer.on('mousemove', function(event) { mouseX = event.pageX; if (mouseX > lastMouseX) { dir = 'r'; } else if (mouseX < lastMouseX) { dir = 'l'; } if (dir != lastDir && lastDir != '') { dirChangeCount += 1; } if (dirChangeCount > sensitivity) { enableWag(); } lastMouseX = mouseX; lastDir = dir; }); $logoContainer.on('mouseleave', function() { dirChangeCount = 0; disableWag(); }); disableWag(); } initLogo(); // Enable nice focus effects on all fields. This enables help text on hover. $(document).on('focus mouseover', 'input,textarea,select', function() { $(this).closest('.field').addClass('focused'); $(this).closest('fieldset').addClass('focused'); $(this).closest('li').addClass('focused'); }); $(document).on('blur mouseout', 'input,textarea,select', function() { $(this).closest('.field').removeClass('focused'); $(this).closest('fieldset').removeClass('focused'); $(this).closest('li').removeClass('focused'); }); /* tabs */ if (window.location.hash) { var cleanedHash = window.location.hash.replace(/[^\w\-\#]/g, ''); $('a[href="' + cleanedHash + '"]').tab('show'); } $(document).on('click', '.tab-nav a', function(e) { e.preventDefault(); $(this).tab('show'); window.history.replaceState(null, null, $(this).attr('href')); }); $(document).on('click', '.tab-toggle', function(e) { e.preventDefault(); $('.tab-nav a[href="' + $(this).attr('href') + '"]').trigger('click'); }); $('.dropdown').each(function() { var $dropdown = $(this); $('.dropdown-toggle', $dropdown).on('click', function(e) { e.stopPropagation(); $dropdown.toggleClass('open'); if ($dropdown.hasClass('open')) { // If a dropdown is opened, add an event listener for document clicks to close it $(document).on('click.dropdown.cancel', function(e) { var relTarg = e.relatedTarget || e.toElement; // Only close dropdown if the click target wasn't a child of this dropdown if (!$(relTarg).parents().is($dropdown)) { $dropdown.removeClass('open'); $(document).off('click.dropdown.cancel'); } }); } else { $(document).off('click.dropdown.cancel'); } }); }); /* Dropzones */ $('.drop-zone').on('dragover', function() { $(this).addClass('hovered'); }).on('dragleave dragend drop', function() { $(this).removeClass('hovered'); }); /* Header search behaviour */ if (window.headerSearch) { var searchCurrentIndex = 0; var searchNextIndex = 0; var $input = $(window.headerSearch.termInput); var $inputContainer = $input.parent(); $input.on('keyup cut paste change', function() { clearTimeout($input.data('timer')); $input.data('timer', setTimeout(search, 200)); }); // auto focus on search box $input.trigger('focus'); function search() { var workingClasses = 'icon-spinner'; var newQuery = $input.val(); var currentQuery = getURLParam('q'); // only do the query if it has changed for trimmed queries // eg. " " === "" and "firstword " ==== "firstword" if (currentQuery.trim() !== newQuery.trim()) { $inputContainer.addClass(workingClasses); searchNextIndex++; var index = searchNextIndex; $.ajax({ url: window.headerSearch.url, data: {q: newQuery}, success: function(data, status) { if (index > searchCurrentIndex) { searchCurrentIndex = index; $(window.headerSearch.targetOutput).html(data).slideDown(800); window.history.replaceState(null, null, '?q=' + newQuery); } }, complete: function() { wagtail.ui.initDropDowns(); $inputContainer.removeClass(workingClasses); } }); } } function getURLParam(name) { var results = new RegExp('[\\?&]' + name + '=([^]*)').exec(window.location.search); if (results) { return results[1]; } return ''; } } /* Functions that need to run/rerun when active tabs are changed */ $(document).on('shown.bs.tab', function(e) { // Resize autosize textareas $('textarea[data-autosize-on]').each(function() { autosize.update($(this).get()); }); }); /* Debounce submission of long-running forms and add spinner to give sense of activity */ $(document).on('click', 'button.button-longrunning', function(e) { var $self = $(this); var $replacementElem = $('em', $self); var reEnableAfter = 30; var dataName = 'disabledtimeout'; window.cancelSpinner = function() { $self.prop('disabled', '').removeData(dataName).removeClass('button-longrunning-active'); if ($self.data('clicked-text')) { $replacementElem.text($self.data('original-text')); } }; // If client-side validation is active on this form, and is going to block submission of the // form, don't activate the spinner var form = $self.closest('form').get(0); if (form && form.checkValidity && !form.noValidate && (!form.checkValidity())) { return; } // Disabling a button prevents it submitting the form, so disabling // must occur on a brief timeout only after this function returns. var timeout = setTimeout(function() { if (!$self.data(dataName)) { // Button re-enables after a timeout to prevent button becoming // permanently un-usable $self.data(dataName, setTimeout(function() { clearTimeout($self.data(dataName)); cancelSpinner(); }, reEnableAfter * 1000)); if ($self.data('clicked-text') && $replacementElem.length) { // Save current button text $self.data('original-text', $replacementElem.text()); $replacementElem.text($self.data('clicked-text')); } // Disabling button must be done last: disabled buttons can't be // modified in the normal way, it would seem. $self.addClass('button-longrunning-active').prop('disabled', 'true'); } clearTimeout(timeout); }, 10); }); }); // ============================================================================= // Wagtail global module, mainly useful for debugging. // ============================================================================= var wagtail = window.wagtail = null; // ============================================================================= // Inline dropdown module // ============================================================================= wagtail = (function(document, window, wagtail) { // Module pattern if (!wagtail) { wagtail = { ui: {} }; } // Constants var DROPDOWN_SELECTOR = '[data-dropdown]'; var LISTING_TITLE_SELECTOR = '[data-listing-page-title]'; var LISTING_ACTIVE_CLASS = 'listing__item--active'; var ICON_DOWN = 'icon-arrow-down'; var ICON_UP = 'icon-arrow-up'; var IS_OPEN = 'is-open'; var clickEvent = 'click'; var TOGGLE_SELECTOR = '[data-dropdown-toggle]'; var ARIA = 'aria-hidden'; var keys = { ESC: 27, ENTER: 13, SPACE: 32 }; /** * Singleton controller and registry for DropDown components. * * Mostly used to maintain open/closed state of components and easily * toggle them when the focus changes. */ var DropDownController = { _dropDowns: [], closeAllExcept: function(dropDown) { var index = this._dropDowns.indexOf(dropDown); this._dropDowns.forEach(function(item, i) { if (i !== index && item.state.isOpen) { item.closeDropDown(); } }); }, add: function(dropDown) { this._dropDowns.push(dropDown); }, get: function() { return this._dropDowns; }, getByIndex: function(index) { return this._dropDowns[index]; }, getOpenDropDown: function() { var needle = null; this._dropDowns.forEach(function(item) { if (item.state.isOpen) { needle = item; } }); return needle; } }; /** * DropDown component * * Template: _button_with_dropdown.html * * Can contain a list of links * Controllable via a toggle class or the keyboard. */ function DropDown(el, registry) { if (!el || !registry ) { if ('error' in console) { console.error('A dropdown was created without an element or the DropDownController.\nMake sure to pass both to your component.'); return; } } this.el = el; this.$parent = $(el).parents(LISTING_TITLE_SELECTOR); this.state = { isOpen: false }; this.registry = registry; this.clickOutsideDropDown = this._clickOutsideDropDown.bind(this); this.closeDropDown = this._closeDropDown.bind(this); this.openDropDown = this._openDropDown.bind(this); this.handleClick = this._handleClick.bind(this); this.handleKeyEvent = this._handleKeyEvent.bind(this); el.addEventListener(clickEvent, this.handleClick); el.addEventListener('keydown', this.handleKeyEvent); this.$parent.data('close', this.closeDropDown); } DropDown.prototype = { _handleKeyEvent: function(e) { var validTriggers = [keys.SPACE, keys.ENTER]; if (validTriggers.indexOf(e.which) > -1) { e.preventDefault(); this.handleClick(e); } }, _handleClick: function(e) { if (!this.state.isOpen) { this.openDropDown(e); } else { this.closeDropDown(e); } }, _openDropDown: function(e) { e.stopPropagation(); e.preventDefault(); var el = this.el; var $parent = this.$parent; var toggle = el.querySelector(TOGGLE_SELECTOR); this.state.isOpen = true; this.registry.closeAllExcept(this); el.classList.add(IS_OPEN); el.setAttribute(ARIA, false); toggle.classList.remove(ICON_DOWN); toggle.classList.add(ICON_UP); document.addEventListener(clickEvent, this.clickOutsideDropDown, false); $parent.addClass(LISTING_ACTIVE_CLASS); }, _closeDropDown: function(e) { this.state.isOpen = false; var el = this.el; var $parent = this.$parent; var toggle = el.querySelector(TOGGLE_SELECTOR); document.removeEventListener(clickEvent, this.clickOutsideDropDown, false); el.classList.remove(IS_OPEN); toggle.classList.add(ICON_DOWN); toggle.classList.remove(ICON_UP); el.setAttribute(ARIA, true); $parent.removeClass(LISTING_ACTIVE_CLASS); }, _clickOutsideDropDown: function(e) { var el = this.el; var relTarget = e.relatedTarget || e.toElement; if (!$(relTarget).parents().is(el)) { this.closeDropDown(); } } }; function initDropDown() { var dropDown = new DropDown(this, DropDownController) DropDownController.add(dropDown); } function handleKeyPress(e) { if (e.which === keys.ESC) { var open = DropDownController.getOpenDropDown(); if (open) { open.closeDropDown(); } } } function initDropDowns() { $(DROPDOWN_SELECTOR).each(initDropDown); $(document).on("keydown", handleKeyPress); } $(document).ready(initDropDowns); wagtail.ui.initDropDowns = initDropDowns; wagtail.ui.DropDownController = DropDownController; // provide a workaround for NodeList#forEach not being available in IE 11 function qsa(element, selector) { return [].slice.call(element.querySelectorAll(selector)); } // Initialise button selectors function initButtonSelects() { qsa(document, '.button-select').forEach(function(element) { var inputElement = element.querySelector('input[type="hidden"]'); qsa(element, '.button-select__option').forEach(function(buttonElement) { buttonElement.addEventListener('click', function(e) { e.preventDefault(); inputElement.value = buttonElement.value; qsa(element, '.button-select__option--selected').forEach(function(selectedButtonElement) { selectedButtonElement.classList.remove('button-select__option--selected'); }); buttonElement.classList.add('button-select__option--selected'); }); }); }); } $(document).ready(initButtonSelects); return wagtail; })(document, window, wagtail);
PypiClean
/odoo12_addon_l10n_br_resource-12.0.1.1.0-py3-none-any.whl/odoo/addons/l10n_br_resource/tools/brazil_all_holidays_set.py
import re from workalendar.america import Brazil, BrazilBankCalendar from workalendar.america.brazil import IBGE_REGISTER class BrazilianHoliday: def __init__(self, nome, data, estado_ibge, municipio_ibge, abrangencia, tipo): self.estado_ibge = estado_ibge self.municipio_ibge = municipio_ibge self.municipio_nome = '' self.abrangencia = abrangencia self.tipo = tipo self.nome = nome self.data = data # Commemorative holidays list COMMEMORATIVE_HOLIDAYS = [ 'Consciência Negra', ] def brazil_all_holidays_set(year): """Returns all holidays in brazil with their respective type and coverage""" holidays_set = [] # Get brazilian national holidays cal = Brazil() for national_holidays in cal.holidays(year): holiday_name = national_holidays[1] holiday_date = national_holidays[0] if national_holidays[1] in COMMEMORATIVE_HOLIDAYS: tipo_feriado = 'C' else: tipo_feriado = 'F' holiday_obj = BrazilianHoliday(holiday_name, holiday_date, None, None, 'N', tipo_feriado) if not any(x.nome == holiday_obj.nome for x in holidays_set): holidays_set.append(holiday_obj) # Get brazilian bank holidays cal = BrazilBankCalendar() for bank_holidays in cal.holidays(year): holiday_name = bank_holidays[1] holiday_date = bank_holidays[0] holiday_obj = BrazilianHoliday(holiday_name, holiday_date, None, None, 'N', 'B') if not any(x.nome == holiday_obj.nome for x in holidays_set): holidays_set.append(holiday_obj) # Get holidays from brazilian state for register in IBGE_REGISTER.items(): estado_ibge = re.sub("BR-IBGE-", "", register[0]) if len(estado_ibge) == 2: cal_state = IBGE_REGISTER[register[0]]() for state_holidays in cal_state.holidays(year): holiday_name = state_holidays[1] holiday_date = state_holidays[0] holiday_obj = BrazilianHoliday(holiday_name, holiday_date, estado_ibge, None, 'E', 'F') # Check if is just a state holiday if not any((x.nome == holiday_obj.nome and not x.estado_ibge) for x in holidays_set): holidays_set.append(holiday_obj) # Get brazilian municipal holidays for register in IBGE_REGISTER.items(): municipio_ibge = re.sub("BR-IBGE-", "", register[0]) estado_ibge = municipio_ibge[0:2] if len(municipio_ibge) > 2: cal_city = IBGE_REGISTER[register[0]]() for city_holiday in cal_city.holidays(year): holiday_name = city_holiday[1] holiday_date = city_holiday[0] holiday_obj = BrazilianHoliday(holiday_name, holiday_date, estado_ibge, municipio_ibge, 'M', 'F') # Check if is just a municipal holiday if not any((x.nome == holiday_obj.nome and not x.municipio_ibge) for x in holidays_set): holidays_set.append(holiday_obj) return holidays_set
PypiClean
/gaia_oc_amd-0.2.1.tar.gz/gaia_oc_amd-0.2.1/gaia_oc_amd/io.py
import os import json import numpy as np import pandas as pd from astropy.io.votable import parse from torch import load, save from gaia_oc_amd.machine_learning.deepsets_zaheer import D5 def cluster_list(cluster_list_source): """Function for creating a list of cluster names. If the input of this function is a file path, return a list of the lines in the file. If it is a string, return a list with only the input string. Args: cluster_list_source (str, list): Either the name of a cluster, the name of a file containing cluster names or a list of cluster names. Returns: cluster_names (str, list): List of cluster names. """ if type(cluster_list_source) == str: if os.path.exists(cluster_list_source): # cluster_list_source should be a path to a file with cluster names with open(cluster_list_source, 'r') as f: cluster_names = f.read().splitlines() return cluster_names else: # cluster_list_source should be a single cluster name cluster_names = [cluster_list_source] return cluster_names elif type(cluster_list_source) == list: # cluster_list_source should be a list of cluster names cluster_names = cluster_list_source return cluster_names else: raise TypeError(f'Wrong input type: {type(cluster_list_source)}, use a string or list.') def load_cluster_parameters(cluster_parameters_path, cluster_name, new_column_names=None): """Loads in the data of the cluster parameters and returns those of a single cluster. Args: cluster_parameters_path (str): Path to the (.csv) file where the cluster parameters are saved. cluster_name (str): Name of the cluster new_column_names (dict): Dictionary for renaming the dataframe columns in the format {'old_name': 'new_name', etc...} Returns: params (dict): Dictionary containing the parameters of a single cluster. """ cluster_params = pd.read_csv(cluster_parameters_path, index_col=0).dropna() if new_column_names is not None: cluster_params = cluster_params.rename(columns=new_column_names) if cluster_name in cluster_params['name'].values: params = cluster_params[cluster_params['name'] == cluster_name].to_dict(orient='list') params = {key: params[key][0] for key in params} return params else: return None def load_cone(save_dir): """Loads in the data of the stars found in the cone search. Args: save_dir (str): Path to the directory where the cone search data is saved. Returns: cone (Dataframe): Dataframe containing the data of the sources in the cone search. """ votable_path = os.path.join(save_dir, 'cone.vot.gz') csv_path = os.path.join(save_dir, 'cone.csv') if os.path.exists(votable_path): cone = parse(votable_path) cone = cone.get_first_table().to_table(use_names_over_ids=True) cone = cone.to_pandas() elif os.path.exists(csv_path): cone = pd.read_csv(csv_path) else: raise IOError(f"No cone data file ('cone.vot.gz' or 'cone.csv') found in directory {save_dir}") return cone def load_members(members_path, cluster_name): """Loads in the data of a membership list and selects the members of a single cluster. Args: members_path (str): Path to the (.csv) file where the member data is saved. cluster_name (str): Name of the cluster Returns: cluster_members (Dataframe): Dataframe containing the data of the cluster member sources. """ all_members = pd.read_csv(members_path, index_col=0) cluster_members = all_members[all_members['cluster'] == cluster_name] cluster_members = cluster_members.reset_index(drop=True)[['source_id', 'PMemb']] return cluster_members def load_isochrone(isochrone_path, age): """Loads in the data of the isochrones and selects the isochrone of a particular age. Args: isochrone_path (str): Path to the (.dat) file where the isochrone data saved. age (float): log(age) of the isochrone Returns: isochrone (Dataframe): Dataframe containing isochrone data points. """ isochrones = pd.read_csv(isochrone_path, names=['Zini', 'MH', 'logAge', 'Mini', 'int_IMF', 'Mass', 'logL', 'logTe', 'logg', 'label', 'McoreTP', 'C_O', 'period0', 'period1', 'period2', 'period3', 'period4', 'pmode', 'Mloss', 'tau1m', 'X', 'Y', 'Xc', 'Xn', 'Xo', 'Cexcess', 'Z', 'mbolmag', 'phot_g_mean_mag', 'G_BPmag', 'G_RPmag'], delim_whitespace=True, comment='#') # Select the isochrone with the age of the cluster ages = np.array(list(set(isochrones['logAge'].values))) closest_age = ages[np.argmin(np.abs(ages - age))] age_error = np.abs(closest_age - age) if age_error > 0.02: raise UserWarning(f'The isochrone in the data file with age {closest_age} is closest to the supplied age ' f'{age}. A large difference might result in an isochrone that poorly fits the member ' f'distribution.') isochrone = isochrones[isochrones['logAge'] == closest_age] return isochrone def save_sets(save_dir, train_members=None, candidates=None, non_members=None, comparison_members=None): """Saves the sets of sources in .csv files. Args: save_dir (str): Directory where the sets are saved train_members (Dataframe): Dataframe containing train member sources candidates (Dataframe): Dataframe containing candidate sources non_members (Dataframe): Dataframe containing non_member sources comparison_members (Dataframe): Dataframe containing comparison member sources """ for subset, subset_name in zip([train_members, candidates, non_members, comparison_members], ['train_members', 'candidates', 'non_members', 'comparison_members']): if subset is not None: subset.to_csv(os.path.join(save_dir, subset_name + '.csv')) def load_sets(save_dir): """Loads the sets of sources from .csv files. Args: save_dir (str): Directory where the sets are loaded from Returns: train_members (Dataframe): Dataframe containing train member sources candidates (Dataframe): Dataframe containing candidate sources non_members (Dataframe): Dataframe containing non-member sources comparison_members (Dataframe): Dataframe containing comparison member sources """ files = ['train_members.csv', 'candidates.csv', 'non_members.csv', 'comparison_members.csv'] sources = [] for file in files: path = os.path.join(save_dir, file) if os.path.exists(path): sources.append(pd.read_csv(path, index_col=0)) else: sources.append(None) train_members, candidates, non_members, comparison_members = tuple(sources) return train_members, candidates, non_members, comparison_members def save_cluster(save_dir, cluster): """Saves the cluster parameters. Args: save_dir (str): Directory where the sets are saved cluster (Cluster): Cluster object """ # with open(os.path.join(save_dir, 'cluster'), 'wb') as cluster_file: # pickle.dump(cluster, cluster_file) with open(os.path.join(save_dir, 'cluster'), 'w') as cluster_params_file: cluster_params = vars(cluster).copy() if cluster_params['isochrone'] is not None: cluster_params['isochrone'] = cluster_params['isochrone'].to_numpy().T.tolist() json.dump(cluster_params, cluster_params_file) def load_cluster(save_dir): """Loads the cluster parameters. Args: save_dir (str): Directory where the sets are saved Returns: cluster (Cluster): Cluster object """ with open(os.path.join(save_dir, 'cluster'), 'r') as cluster_file: cluster_params = json.load(cluster_file) if cluster_params['isochrone'] is not None: cluster_params['isochrone'] = pd.DataFrame({cluster_params['isochrone_colour']: cluster_params['isochrone'][0], 'phot_g_mean_mag': cluster_params['isochrone'][1]}) return cluster_params def save_hyper_parameters(model_save_dir, hyper_parameters): """Saves the hyperparameters of a certain model. Args: model_save_dir (str): Directory where the hyperparameters corresponding to a specific model will be saved hyper_parameters (dict): Dictionary containing the data and training hyperparameters """ with open(os.path.join(model_save_dir, 'hyper_parameters'), 'w') as f: json.dump(hyper_parameters, f) def load_hyper_parameters(model_save_dir): """Loads the hyperparameters of a certain model. Args: model_save_dir (str): Directory where the hyperparameters corresponding to a specific model are stored Returns: hyper_parameters (dict): Dictionary containing the data and training hyperparameters """ with open(os.path.join(model_save_dir, 'hyper_parameters'), 'r') as f: hyper_parameters = json.load(f) return hyper_parameters def save_metrics(model_save_dir, metrics_dict): """Saves the metrics of a certain model. Args: model_save_dir (str): Directory where the hyperparameters corresponding to a specific model will be saved metrics_dict (dict): Dictionary containing and training hyperparameters """ with open(os.path.join(model_save_dir, 'metrics'), 'w') as f: json.dump(metrics_dict, f) def load_metrics(model_save_dir): """Loads the metrics of a certain model. Args: model_save_dir (str): Directory where the metrics corresponding to a specific model are stored Returns: metrics (dict): Dictionary containing and training metrics """ with open(os.path.join(model_save_dir, 'metrics'), 'r') as f: metrics_dict = json.load(f) return metrics_dict def save_model(model_save_dir, model): """Saves the (trained) parameters of a deep sets model. Args: model_save_dir (str): Directory where the parameters corresponding to a specific model will be saved model (D5): Deep sets model with (trained) parameters """ save(model.state_dict(), os.path.join(model_save_dir, 'model_parameters')) def load_model(model_save_dir): """Loads a pretrained model with the (hyper)parameters in model_save_dir. Args: model_save_dir (str): Directory where the model data is stored Returns: model (D5): Model with pretrained parameters """ hyper_parameters = load_hyper_parameters(model_save_dir) source_features = hyper_parameters['source_features'] cluster_features = hyper_parameters['cluster_features'] hidden_size = hyper_parameters['hidden_size'] model = D5(hidden_size, x_dim=2 * len(source_features) + len(cluster_features), pool='mean', out_dim=2) model.load_state_dict(load(os.path.join(model_save_dir, 'model_parameters'))) return model
PypiClean
/hacs-frontend-20230630112320.tar.gz/hacs-frontend-20230630112320/hacs_frontend/frontend_latest/1106--L8MEXUaKBc.js
export const id=1106;export const ids=[1106];export const modules={63335:(t,e,i)=>{i.d(e,{F:()=>d});var s=i(43204),n=i(79932),o=i(58417),r=i(39274);let a=class extends o.A{};a.styles=[r.W],a=(0,s.__decorate)([(0,n.Mo)("mwc-checkbox")],a);var l=i(68144),h=i(83448),c=i(61092);class d extends c.K{constructor(){super(...arguments),this.left=!1,this.graphic="control"}render(){const t={"mdc-deprecated-list-item__graphic":this.left,"mdc-deprecated-list-item__meta":!this.left},e=this.renderText(),i=this.graphic&&"control"!==this.graphic&&!this.left?this.renderGraphic():l.dy``,s=this.hasMeta&&this.left?this.renderMeta():l.dy``,n=this.renderRipple();return l.dy` ${n} ${i} ${this.left?"":e} <span class="${(0,h.$)(t)}"> <mwc-checkbox reducedTouchTarget tabindex="${this.tabindex}" .checked="${this.selected}" ?disabled="${this.disabled}" @change="${this.onChange}"> </mwc-checkbox> </span> ${this.left?e:""} ${s}`}async onChange(t){const e=t.target;this.selected===e.checked||(this._skipPropRequest=!0,this.selected=e.checked,await this.updateComplete,this._skipPropRequest=!1)}}(0,s.__decorate)([(0,n.IO)("slot")],d.prototype,"slotElement",void 0),(0,s.__decorate)([(0,n.IO)("mwc-checkbox")],d.prototype,"checkboxElement",void 0),(0,s.__decorate)([(0,n.Cb)({type:Boolean})],d.prototype,"left",void 0),(0,s.__decorate)([(0,n.Cb)({type:String,reflect:!0})],d.prototype,"graphic",void 0)},21270:(t,e,i)=>{i.d(e,{W:()=>s});const s=i(68144).iv`:host(:not([twoline])){height:56px}:host(:not([left])) .mdc-deprecated-list-item__meta{height:40px;width:40px}`},30515:(t,e,i)=>{i(39975),i(65660);var s=i(71132),n=i(69491),o=i(50856),r=i(74460),a=new Set;const l={properties:{_parentResizable:{type:Object,observer:"_parentResizableChanged"},_notifyingDescendant:{type:Boolean,value:!1}},listeners:{"iron-request-resize-notifications":"_onIronRequestResizeNotifications"},created:function(){this._interestedResizables=[],this._boundNotifyResize=this.notifyResize.bind(this),this._boundOnDescendantIronResize=this._onDescendantIronResize.bind(this)},attached:function(){this._requestResizeNotifications()},detached:function(){this._parentResizable?this._parentResizable.stopResizeNotificationsFor(this):(a.delete(this),window.removeEventListener("resize",this._boundNotifyResize)),this._parentResizable=null},notifyResize:function(){this.isAttached&&(this._interestedResizables.forEach((function(t){this.resizerShouldNotify(t)&&this._notifyDescendant(t)}),this),this._fireResize())},assignParentResizable:function(t){this._parentResizable&&this._parentResizable.stopResizeNotificationsFor(this),this._parentResizable=t,t&&-1===t._interestedResizables.indexOf(this)&&(t._interestedResizables.push(this),t._subscribeIronResize(this))},stopResizeNotificationsFor:function(t){var e=this._interestedResizables.indexOf(t);e>-1&&(this._interestedResizables.splice(e,1),this._unsubscribeIronResize(t))},_subscribeIronResize:function(t){t.addEventListener("iron-resize",this._boundOnDescendantIronResize)},_unsubscribeIronResize:function(t){t.removeEventListener("iron-resize",this._boundOnDescendantIronResize)},resizerShouldNotify:function(t){return!0},_onDescendantIronResize:function(t){this._notifyingDescendant?t.stopPropagation():r.my||this._fireResize()},_fireResize:function(){this.fire("iron-resize",null,{node:this,bubbles:!1})},_onIronRequestResizeNotifications:function(t){var e=(0,n.vz)(t).rootTarget;e!==this&&(e.assignParentResizable(this),this._notifyDescendant(e),t.stopPropagation())},_parentResizableChanged:function(t){t&&window.removeEventListener("resize",this._boundNotifyResize)},_notifyDescendant:function(t){this.isAttached&&(this._notifyingDescendant=!0,t.notifyResize(),this._notifyingDescendant=!1)},_requestResizeNotifications:function(){if(this.isAttached)if("loading"===document.readyState){var t=this._requestResizeNotifications.bind(this);document.addEventListener("readystatechange",(function e(){document.removeEventListener("readystatechange",e),t()}))}else this._findParent(),this._parentResizable?this._parentResizable._interestedResizables.forEach((function(t){t!==this&&t._findParent()}),this):(a.forEach((function(t){t!==this&&t._findParent()}),this),window.addEventListener("resize",this._boundNotifyResize),this.notifyResize())},_findParent:function(){this.assignParentResizable(null),this.fire("iron-request-resize-notifications",null,{node:this,bubbles:!0,cancelable:!0}),this._parentResizable?a.delete(this):a.add(this)}};var h=i(21683),c=i(78956),d=i(93252);const u=[l,{listeners:{"app-reset-layout":"_appResetLayoutHandler","iron-resize":"resetLayout"},attached:function(){this.fire("app-reset-layout")},_appResetLayoutHandler:function(t){(0,n.vz)(t).path[0]!==this&&(this.resetLayout(),t.stopPropagation())},_updateLayoutStates:function(){console.error("unimplemented")},resetLayout:function(){var t=this._updateLayoutStates.bind(this);this._layoutDebouncer=c.dx.debounce(this._layoutDebouncer,h.rs,t),(0,d.E)(this._layoutDebouncer),this._notifyDescendantResize()},_notifyLayoutChanged:function(){var t=this;requestAnimationFrame((function(){t.fire("app-reset-layout")}))},_notifyDescendantResize:function(){this.isAttached&&this._interestedResizables.forEach((function(t){this.resizerShouldNotify(t)&&this._notifyDescendant(t)}),this)}}],f={properties:{scrollTarget:{type:HTMLElement,value:function(){return this._defaultScrollTarget}}},observers:["_scrollTargetChanged(scrollTarget, isAttached)"],_shouldHaveListener:!0,_scrollTargetChanged:function(t,e){if(this._oldScrollTarget&&(this._toggleScrollListener(!1,this._oldScrollTarget),this._oldScrollTarget=null),e)if("document"===t)this.scrollTarget=this._doc;else if("string"==typeof t){var i=this.domHost;this.scrollTarget=i&&i.$?i.$[t]:(0,n.vz)(this.ownerDocument).querySelector("#"+t)}else this._isValidScrollTarget()&&(this._oldScrollTarget=t,this._toggleScrollListener(this._shouldHaveListener,t))},_scrollHandler:function(){},get _defaultScrollTarget(){return this._doc},get _doc(){return this.ownerDocument.documentElement},get _scrollTop(){return this._isValidScrollTarget()?this.scrollTarget===this._doc?window.pageYOffset:this.scrollTarget.scrollTop:0},get _scrollLeft(){return this._isValidScrollTarget()?this.scrollTarget===this._doc?window.pageXOffset:this.scrollTarget.scrollLeft:0},set _scrollTop(t){this.scrollTarget===this._doc?window.scrollTo(window.pageXOffset,t):this._isValidScrollTarget()&&(this.scrollTarget.scrollTop=t)},set _scrollLeft(t){this.scrollTarget===this._doc?window.scrollTo(t,window.pageYOffset):this._isValidScrollTarget()&&(this.scrollTarget.scrollLeft=t)},scroll:function(t,e){var i;"object"==typeof t?(i=t.left,e=t.top):i=t,i=i||0,e=e||0,this.scrollTarget===this._doc?window.scrollTo(i,e):this._isValidScrollTarget()&&(this.scrollTarget.scrollLeft=i,this.scrollTarget.scrollTop=e)},get _scrollTargetWidth(){return this._isValidScrollTarget()?this.scrollTarget===this._doc?window.innerWidth:this.scrollTarget.offsetWidth:0},get _scrollTargetHeight(){return this._isValidScrollTarget()?this.scrollTarget===this._doc?window.innerHeight:this.scrollTarget.offsetHeight:0},_isValidScrollTarget:function(){return this.scrollTarget instanceof HTMLElement},_toggleScrollListener:function(t,e){var i=e===this._doc?window:e;t?this._boundScrollHandler||(this._boundScrollHandler=this._scrollHandler.bind(this),i.addEventListener("scroll",this._boundScrollHandler)):this._boundScrollHandler&&(i.removeEventListener("scroll",this._boundScrollHandler),this._boundScrollHandler=null)},toggleScrollListener:function(t){this._shouldHaveListener=t,this._toggleScrollListener(t,this.scrollTarget)}},p={};const _=[f,{properties:{effects:{type:String},effectsConfig:{type:Object,value:function(){return{}}},disabled:{type:Boolean,reflectToAttribute:!0,value:!1},threshold:{type:Number,value:0},thresholdTriggered:{type:Boolean,notify:!0,readOnly:!0,reflectToAttribute:!0}},observers:["_effectsChanged(effects, effectsConfig, isAttached)"],_updateScrollState:function(t){},isOnScreen:function(){return!1},isContentBelow:function(){return!1},_effectsRunFn:null,_effects:null,get _clampedScrollTop(){return Math.max(0,this._scrollTop)},attached:function(){this._scrollStateChanged()},detached:function(){this._tearDownEffects()},createEffect:function(t,e){var i=p[t];if(!i)throw new ReferenceError(this._getUndefinedMsg(t));var s=this._boundEffect(i,e||{});return s.setUp(),s},_effectsChanged:function(t,e,i){this._tearDownEffects(),t&&i&&(t.split(" ").forEach((function(t){var i;""!==t&&((i=p[t])?this._effects.push(this._boundEffect(i,e[t])):console.warn(this._getUndefinedMsg(t)))}),this),this._setUpEffect())},_layoutIfDirty:function(){return this.offsetWidth},_boundEffect:function(t,e){e=e||{};var i=parseFloat(e.startsAt||0),s=parseFloat(e.endsAt||1),n=s-i,o=function(){},r=0===i&&1===s?t.run:function(e,s){t.run.call(this,Math.max(0,(e-i)/n),s)};return{setUp:t.setUp?t.setUp.bind(this,e):o,run:t.run?r.bind(this):o,tearDown:t.tearDown?t.tearDown.bind(this):o}},_setUpEffect:function(){this.isAttached&&this._effects&&(this._effectsRunFn=[],this._effects.forEach((function(t){!1!==t.setUp()&&this._effectsRunFn.push(t.run)}),this))},_tearDownEffects:function(){this._effects&&this._effects.forEach((function(t){t.tearDown()})),this._effectsRunFn=[],this._effects=[]},_runEffects:function(t,e){this._effectsRunFn&&this._effectsRunFn.forEach((function(i){i(t,e)}))},_scrollHandler:function(){this._scrollStateChanged()},_scrollStateChanged:function(){if(!this.disabled){var t=this._clampedScrollTop;this._updateScrollState(t),this.threshold>0&&this._setThresholdTriggered(t>=this.threshold)}},_getDOMRef:function(t){console.warn("_getDOMRef","`"+t+"` is undefined")},_getUndefinedMsg:function(t){return"Scroll effect `"+t+"` is undefined. Did you forget to import app-layout/app-scroll-effects/effects/"+t+".html ?"}}];(0,s.k)({_template:o.d` <style> :host { position: relative; display: block; transition-timing-function: linear; transition-property: -webkit-transform; transition-property: transform; } :host::before { position: absolute; right: 0px; bottom: -5px; left: 0px; width: 100%; height: 5px; content: ""; transition: opacity 0.4s; pointer-events: none; opacity: 0; box-shadow: inset 0px 5px 6px -3px rgba(0, 0, 0, 0.4); will-change: opacity; @apply --app-header-shadow; } :host([shadow])::before { opacity: 1; } #background { @apply --layout-fit; overflow: hidden; } #backgroundFrontLayer, #backgroundRearLayer { @apply --layout-fit; height: 100%; pointer-events: none; background-size: cover; } #backgroundFrontLayer { @apply --app-header-background-front-layer; } #backgroundRearLayer { opacity: 0; @apply --app-header-background-rear-layer; } #contentContainer { position: relative; width: 100%; height: 100%; } :host([disabled]), :host([disabled])::after, :host([disabled]) #backgroundFrontLayer, :host([disabled]) #backgroundRearLayer, /* Silent scrolling should not run CSS transitions */ :host([silent-scroll]), :host([silent-scroll])::after, :host([silent-scroll]) #backgroundFrontLayer, :host([silent-scroll]) #backgroundRearLayer { transition: none !important; } :host([disabled]) ::slotted(app-toolbar:first-of-type), :host([disabled]) ::slotted([sticky]), /* Silent scrolling should not run CSS transitions */ :host([silent-scroll]) ::slotted(app-toolbar:first-of-type), :host([silent-scroll]) ::slotted([sticky]) { transition: none !important; } </style> <div id="contentContainer"> <slot id="slot"></slot> </div> `,is:"app-header",behaviors:[_,u],properties:{condenses:{type:Boolean,value:!1},fixed:{type:Boolean,value:!1},reveals:{type:Boolean,value:!1},shadow:{type:Boolean,reflectToAttribute:!0,value:!1}},observers:["_configChanged(isAttached, condenses, fixed)"],_height:0,_dHeight:0,_stickyElTop:0,_stickyElRef:null,_top:0,_progress:0,_wasScrollingDown:!1,_initScrollTop:0,_initTimestamp:0,_lastTimestamp:0,_lastScrollTop:0,get _maxHeaderTop(){return this.fixed?this._dHeight:this._height+5},get _stickyEl(){if(this._stickyElRef)return this._stickyElRef;for(var t,e=(0,n.vz)(this.$.slot).getDistributedNodes(),i=0;t=e[i];i++)if(t.nodeType===Node.ELEMENT_NODE){if(t.hasAttribute("sticky")){this._stickyElRef=t;break}this._stickyElRef||(this._stickyElRef=t)}return this._stickyElRef},_configChanged:function(){this.resetLayout(),this._notifyLayoutChanged()},_updateLayoutStates:function(){if(0!==this.offsetWidth||0!==this.offsetHeight){var t=this._clampedScrollTop,e=0===this._height||0===t,i=this.disabled;this._height=this.offsetHeight,this._stickyElRef=null,this.disabled=!0,e||this._updateScrollState(0,!0),this._mayMove()?this._dHeight=this._stickyEl?this._height-this._stickyEl.offsetHeight:0:this._dHeight=0,this._stickyElTop=this._stickyEl?this._stickyEl.offsetTop:0,this._setUpEffect(),e?this._updateScrollState(t,!0):(this._updateScrollState(this._lastScrollTop,!0),this._layoutIfDirty()),this.disabled=i}},_updateScrollState:function(t,e){if(0!==this._height){var i=0,s=0,n=this._top,o=(this._lastScrollTop,this._maxHeaderTop),r=t-this._lastScrollTop,a=Math.abs(r),l=t>this._lastScrollTop,h=performance.now();if(this._mayMove()&&(s=this._clamp(this.reveals?n+r:t,0,o)),t>=this._dHeight&&(s=this.condenses&&!this.fixed?Math.max(this._dHeight,s):s,this.style.transitionDuration="0ms"),this.reveals&&!this.disabled&&a<100&&((h-this._initTimestamp>300||this._wasScrollingDown!==l)&&(this._initScrollTop=t,this._initTimestamp=h),t>=o))if(Math.abs(this._initScrollTop-t)>30||a>10){l&&t>=o?s=o:!l&&t>=this._dHeight&&(s=this.condenses&&!this.fixed?this._dHeight:0);var c=r/(h-this._lastTimestamp);this.style.transitionDuration=this._clamp((s-n)/c,0,300)+"ms"}else s=this._top;i=0===this._dHeight?t>0?1:0:s/this._dHeight,e||(this._lastScrollTop=t,this._top=s,this._wasScrollingDown=l,this._lastTimestamp=h),(e||i!==this._progress||n!==s||0===t)&&(this._progress=i,this._runEffects(i,s),this._transformHeader(s))}},_mayMove:function(){return this.condenses||!this.fixed},willCondense:function(){return this._dHeight>0&&this.condenses},isOnScreen:function(){return 0!==this._height&&this._top<this._height},isContentBelow:function(){return 0===this._top?this._clampedScrollTop>0:this._clampedScrollTop-this._maxHeaderTop>=0},_transformHeader:function(t){this.translate3d(0,-t+"px",0),this._stickyEl&&this.translate3d(0,this.condenses&&t>=this._stickyElTop?Math.min(t,this._dHeight)-this._stickyElTop+"px":0,0,this._stickyEl)},_clamp:function(t,e,i){return Math.min(i,Math.max(e,t))},_ensureBgContainers:function(){this._bgContainer||(this._bgContainer=document.createElement("div"),this._bgContainer.id="background",this._bgRear=document.createElement("div"),this._bgRear.id="backgroundRearLayer",this._bgContainer.appendChild(this._bgRear),this._bgFront=document.createElement("div"),this._bgFront.id="backgroundFrontLayer",this._bgContainer.appendChild(this._bgFront),(0,n.vz)(this.root).insertBefore(this._bgContainer,this.$.contentContainer))},_getDOMRef:function(t){switch(t){case"backgroundFrontLayer":return this._ensureBgContainers(),this._bgFront;case"backgroundRearLayer":return this._ensureBgContainers(),this._bgRear;case"background":return this._ensureBgContainers(),this._bgContainer;case"mainTitle":return(0,n.vz)(this).querySelector("[main-title]");case"condensedTitle":return(0,n.vz)(this).querySelector("[condensed-title]")}return null},getScrollState:function(){return{progress:this._progress,top:this._top}}})},12730:(t,e,i)=>{i(39975),i(65660);var s=i(71132),n=i(50856);(0,s.k)({_template:n.d` <style> :host { @apply --layout-horizontal; @apply --layout-center; position: relative; height: 64px; padding: 0 16px; pointer-events: none; font-size: var(--app-toolbar-font-size, 20px); } :host ::slotted(*) { pointer-events: auto; } :host ::slotted(paper-icon-button) { /* paper-icon-button/issues/33 */ font-size: 0; } :host ::slotted([main-title]), :host ::slotted([condensed-title]) { pointer-events: none; @apply --layout-flex; } :host ::slotted([bottom-item]) { position: absolute; right: 0; bottom: 0; left: 0; } :host ::slotted([top-item]) { position: absolute; top: 0; right: 0; left: 0; } :host ::slotted([spacer]) { margin-left: 64px; } </style> <slot></slot> `,is:"app-toolbar"})},55020:(t,e,i)=>{i.d(e,{j:()=>n});var s={};function n(){return s}},5763:(t,e,i)=>{function s(t){var e=new Date(Date.UTC(t.getFullYear(),t.getMonth(),t.getDate(),t.getHours(),t.getMinutes(),t.getSeconds(),t.getMilliseconds()));return e.setUTCFullYear(t.getFullYear()),t.getTime()-e.getTime()}i.d(e,{Z:()=>s})},23682:(t,e,i)=>{function s(t,e){if(e.length<t)throw new TypeError(t+" argument"+(t>1?"s":"")+" required, but only "+e.length+" present")}i.d(e,{Z:()=>s})},90394:(t,e,i)=>{function s(t){if(null===t||!0===t||!1===t)return NaN;var e=Number(t);return isNaN(e)?e:e<0?Math.ceil(e):Math.floor(e)}i.d(e,{Z:()=>s})},62308:(t,e,i)=>{i.d(e,{Z:()=>h});var s=i(34327),n=i(5763),o=i(23682);function r(t){(0,o.Z)(1,arguments);var e=(0,s.Z)(t);return e.setHours(0,0,0,0),e}var a=864e5;function l(t,e){var i=t.getFullYear()-e.getFullYear()||t.getMonth()-e.getMonth()||t.getDate()-e.getDate()||t.getHours()-e.getHours()||t.getMinutes()-e.getMinutes()||t.getSeconds()-e.getSeconds()||t.getMilliseconds()-e.getMilliseconds();return i<0?-1:i>0?1:i}function h(t,e){(0,o.Z)(2,arguments);var i=(0,s.Z)(t),h=(0,s.Z)(e),c=l(i,h),d=Math.abs(function(t,e){(0,o.Z)(2,arguments);var i=r(t),s=r(e),l=i.getTime()-(0,n.Z)(i),h=s.getTime()-(0,n.Z)(s);return Math.round((l-h)/a)}(i,h));i.setDate(i.getDate()-c*d);var u=c*(d-Number(l(i,h)===-c));return 0===u?0:u}},27296:(t,e,i)=>{i.d(e,{Z:()=>a});var s=i(62308),n=i(23682),o={ceil:Math.ceil,round:Math.round,floor:Math.floor,trunc:function(t){return t<0?Math.ceil(t):Math.floor(t)}},r="trunc";function a(t,e,i){(0,n.Z)(2,arguments);var a,l=(0,s.Z)(t,e)/7;return((a=null==i?void 0:i.roundingMethod)?o[a]:o[r])(l)}},59401:(t,e,i)=>{i.d(e,{Z:()=>a});var s=i(34327),n=i(90394),o=i(23682),r=i(55020);function a(t,e){var i,a,l,h,c,d,u,f;(0,o.Z)(1,arguments);var p=(0,r.j)(),_=(0,n.Z)(null!==(i=null!==(a=null!==(l=null!==(h=null==e?void 0:e.weekStartsOn)&&void 0!==h?h:null==e||null===(c=e.locale)||void 0===c||null===(d=c.options)||void 0===d?void 0:d.weekStartsOn)&&void 0!==l?l:p.weekStartsOn)&&void 0!==a?a:null===(u=p.locale)||void 0===u||null===(f=u.options)||void 0===f?void 0:f.weekStartsOn)&&void 0!==i?i:0);if(!(_>=0&&_<=6))throw new RangeError("weekStartsOn must be between 0 and 6 inclusively");var g=(0,s.Z)(t),b=g.getDay(),y=(b<_?7:0)+b-_;return g.setDate(g.getDate()-y),g.setHours(0,0,0,0),g}},34327:(t,e,i)=>{i.d(e,{Z:()=>o});var s=i(76775),n=i(23682);function o(t){(0,n.Z)(1,arguments);var e=Object.prototype.toString.call(t);return t instanceof Date||"object"===(0,s.Z)(t)&&"[object Date]"===e?new Date(t.getTime()):"number"==typeof t||"[object Number]"===e?new Date(t):("string"!=typeof t&&"[object String]"!==e||"undefined"==typeof console||(console.warn("Starting with v2.0.0-beta.1 date-fns doesn't accept strings as date arguments. Please use `parseISO` to parse strings. See: https://github.com/date-fns/date-fns/blob/master/docs/upgradeGuide.md#string-arguments"),console.warn((new Error).stack)),new Date(NaN))}},3239:(t,e,i)=>{function s(t){if(!t||"object"!=typeof t)return t;if("[object Date]"==Object.prototype.toString.call(t))return new Date(t.getTime());if(Array.isArray(t))return t.map(s);var e={};return Object.keys(t).forEach((function(i){e[i]=s(t[i])})),e}i.d(e,{Z:()=>s})},22075:(t,e,i)=>{i.d(e,{L:()=>o});const s={en:"US",hi:"IN",deva:"IN",te:"IN",mr:"IN",ta:"IN",gu:"IN",kn:"IN",or:"IN",ml:"IN",pa:"IN",bho:"IN",awa:"IN",as:"IN",mwr:"IN",mai:"IN",mag:"IN",bgc:"IN",hne:"IN",dcc:"IN",bn:"BD",beng:"BD",rkt:"BD",dz:"BT",tibt:"BT",tn:"BW",am:"ET",ethi:"ET",om:"ET",quc:"GT",id:"ID",jv:"ID",su:"ID",mad:"ID",ms_arab:"ID",he:"IL",hebr:"IL",jam:"JM",ja:"JP",jpan:"JP",km:"KH",khmr:"KH",ko:"KR",kore:"KR",lo:"LA",laoo:"LA",mh:"MH",my:"MM",mymr:"MM",mt:"MT",ne:"NP",fil:"PH",ceb:"PH",ilo:"PH",ur:"PK",pa_arab:"PK",lah:"PK",ps:"PK",sd:"PK",skr:"PK",gn:"PY",th:"TH",thai:"TH",tts:"TH",zh_hant:"TW",hant:"TW",sm:"WS",zu:"ZA",sn:"ZW",arq:"DZ",ar:"EG",arab:"EG",arz:"EG",fa:"IR",az_arab:"IR",dv:"MV",thaa:"MV"};const n={AG:0,ATG:0,28:0,AS:0,ASM:0,16:0,BD:0,BGD:0,50:0,BR:0,BRA:0,76:0,BS:0,BHS:0,44:0,BT:0,BTN:0,64:0,BW:0,BWA:0,72:0,BZ:0,BLZ:0,84:0,CA:0,CAN:0,124:0,CO:0,COL:0,170:0,DM:0,DMA:0,212:0,DO:0,DOM:0,214:0,ET:0,ETH:0,231:0,GT:0,GTM:0,320:0,GU:0,GUM:0,316:0,HK:0,HKG:0,344:0,HN:0,HND:0,340:0,ID:0,IDN:0,360:0,IL:0,ISR:0,376:0,IN:0,IND:0,356:0,JM:0,JAM:0,388:0,JP:0,JPN:0,392:0,KE:0,KEN:0,404:0,KH:0,KHM:0,116:0,KR:0,KOR:0,410:0,LA:0,LA0:0,418:0,MH:0,MHL:0,584:0,MM:0,MMR:0,104:0,MO:0,MAC:0,446:0,MT:0,MLT:0,470:0,MX:0,MEX:0,484:0,MZ:0,MOZ:0,508:0,NI:0,NIC:0,558:0,NP:0,NPL:0,524:0,PA:0,PAN:0,591:0,PE:0,PER:0,604:0,PH:0,PHL:0,608:0,PK:0,PAK:0,586:0,PR:0,PRI:0,630:0,PT:0,PRT:0,620:0,PY:0,PRY:0,600:0,SA:0,SAU:0,682:0,SG:0,SGP:0,702:0,SV:0,SLV:0,222:0,TH:0,THA:0,764:0,TT:0,TTO:0,780:0,TW:0,TWN:0,158:0,UM:0,UMI:0,581:0,US:0,USA:0,840:0,VE:0,VEN:0,862:0,VI:0,VIR:0,850:0,WS:0,WSM:0,882:0,YE:0,YEM:0,887:0,ZA:0,ZAF:0,710:0,ZW:0,ZWE:0,716:0,AE:6,ARE:6,784:6,AF:6,AFG:6,4:6,BH:6,BHR:6,48:6,DJ:6,DJI:6,262:6,DZ:6,DZA:6,12:6,EG:6,EGY:6,818:6,IQ:6,IRQ:6,368:6,IR:6,IRN:6,364:6,JO:6,JOR:6,400:6,KW:6,KWT:6,414:6,LY:6,LBY:6,434:6,OM:6,OMN:6,512:6,QA:6,QAT:6,634:6,SD:6,SDN:6,729:6,SY:6,SYR:6,760:6,MV:5,MDV:5,462:5};function o(t){return function(t,e,i){if(t){var s,n=t.toLowerCase().split(/[-_]/),o=n[0],r=o;if(n[1]&&4===n[1].length?(r+="_"+n[1],s=n[2]):s=n[1],s||(s=e[r]||e[o]),s)return function(t,e){var i=e["string"==typeof t?t.toUpperCase():t];return"number"==typeof i?i:1}(s.match(/^\d+$/)?Number(s):s,i)}return 1}(t,s,n)}},57835:(t,e,i)=>{i.d(e,{XM:()=>s.XM,Xe:()=>s.Xe,pX:()=>s.pX});var s=i(38941)}}; //# sourceMappingURL=1106--L8MEXUaKBc.js.map
PypiClean
/compare_my_stocks-1.0.5-py3-none-any.whl/compare_my_stocks/engine/compareengine.py
import logging import threading from config import config from common.common import NoDataException, MySignal, Types, UniteType, InputSourceType from common.simpleexceptioncontext import simple_exception_handling from engine.compareengineinterface import CompareEngineInterface from engine.symbolshandler import SymbolsHandler from input.ibsource import get_ib_source from input.inputsource import InputSourceInterface from input.investpysource import InvestPySource from processing.datagenerator import DataGenerator from graph.graphgenerator import GraphGenerator from input.inputprocessor import InputProcessor from engine.parameters import Parameters from transactions.transactionhandlermanager import TransactionHandlerManager class InternalCompareEngine(SymbolsHandler,CompareEngineInterface): statusChanges = MySignal(str) finishedGeneration = MySignal(int) minMaxChanged = MySignal(tuple) namesChanged = MySignal(int) @staticmethod @simple_exception_handling(err_description='Input source initialization failed. ',never_throw=True) def get_input_source(input_type : InputSourceType = None): if input_type is None: input_type =config.Input.INPUTSOURCE if input_type is None: return None if input_type == InputSourceType.IB: return get_ib_source() # IBSource() elif input_type == InputSourceType.InvestPy: return InvestPySource() def __init__(self, axes=None): SymbolsHandler.__init__(self) input_source= self.get_input_source() self._tr = TransactionHandlerManager(None) self._inp = InputProcessor(self, self._tr,input_source) self._tr._inp = self._inp # double redirection. self._datagen: DataGenerator = DataGenerator(self) self._generator: GraphGenerator = GraphGenerator(self, axes) self._annotation = [] self._cache_date = None self.read_groups_from_file() self._datagenlock = threading.Lock() def required_syms(self, include_ext=True, want_portfolio_if_needed=False, want_unite_symbols=False,only_unite=False): #the want it all is in the case of populating dict selected = set() if want_unite_symbols and (self.used_type & Types.COMPARE and self.params.compare_with): #notice that based on params type and not real type selected.update(set([self.params.compare_with])) if want_portfolio_if_needed and (self.params.unite_by_group & UniteType.ADDPROT): selected=set(self.transaction_handler.get_portfolio_stocks()) if self.to_use_ext and include_ext: selected.update(set(self.params.ext)) if (self.used_unitetype & ~UniteType.ADDTOTALS) and want_unite_symbols: if only_unite: #it is a bit of cheating but we don't need to specify require data symbols in that case return selected if self.params.use_groups: return selected.union(self.get_options_from_groups(self.params.groups)) else: return selected.union(self.params.selected_stocks) def gen_graph(self, params: Parameters, just_upd=0, reprocess=1): if just_upd and self.params: self.params.update_from(params) else: self.params = params self.params._baseclass = self self.to_use_ext = self.params.use_ext self.used_unitetype = self.params.unite_by_group requried_syms = self.required_syms(True, True) if self._inp.usable_symbols and (not (set(requried_syms) <= self._inp.usable_symbols)): symbols_needed = set(requried_syms) - self._inp.usable_symbols - self._inp._bad_symbols - set( config.Symbols.IGNORED_SYMBOLS) #TODO::make bad symbols property if len(symbols_needed) > 0: reprocess = 1 logging.debug((f'should add stocks {symbols_needed}')) else: reprocess = 0 else: symbols_needed = set() # process all... adjust_date = False if reprocess: self._inp.process(symbols_needed) adjust_date = True if hasattr(self.params,'adjust_date'): adjust_date = adjust_date or self.params.adjust_date else: self.params.adjust_date=0 with self._datagenlock: res= self.call_data_generator() if res==2: adjust_date = True if res: df = self._datagen.df type = self._datagen.type before_act = self._datagen.df_before_act self.call_graph_generator(df, just_upd,type,before_act , adjust_date= adjust_date ) @simple_exception_handling(err_description="Exception in generation") def call_data_generator(self,auto_reprocess=True): b=0 for tries in range(2): if not self._datagen.verify_conditions(): self.statusChanges.emit(f'Graph Invalid! Check parameters') return False try: self._datagen.generate_data() return 1+b except NoDataException: if auto_reprocess: logging.debug("No data first try. reprocessing") self._inp.process(self.required_syms(True, True)) b=1 continue else: self.statusChanges.emit(f'No Data For Graph!') logging.debug(('no data')) return False except Exception as e: self.statusChanges.emit(f'Exception in generation: {e}') raise def call_graph_generator(self, df, just_upd, type,orig_data,adjust_date=False): if df.empty: self.statusChanges.emit(f'No Data For Graph!') return def upd(msg,err=False): self.statusChanges.emit(msg) self._inp.failed_to_get_new_data=None #reset it if err: logging.error(msg) else: logging.info(msg) plot_data = {} if ((Types.PRECENTAGE | Types.DIFF | Types.COMPARE) & type) == 0 and self.params.unite_by_group & (UniteType.SUM | UniteType.AVG)==0: try: plot_data= self._tr.get_data_for_graph(list(df.columns),df.index[0],df.index[-1]) except: logging.error("failed to get transaction data for graph") try: self._generator.gen_actual_graph(list(df.columns), df, self.params.isline, self.params.starthidden, just_upd, type,orig_data,adjust_date=adjust_date,plot_data=plot_data) if self._inp.failed_to_get_new_data: upd(f"Generated Graph with old data ( Query failed :() ") else: upd("Generated Graph :)") self.finishedGeneration.emit(1) except TypeError as e: upd(f"failed generating graph {e}",err=True) raise # makes the entire graph from the default attributes. def update_graph(self, params: Parameters = Parameters()): reprocess = 1 if (not self.input_processor._alldates) else 0 params.increase_fig = False self.gen_graph(params, just_upd=1, reprocess=reprocess) class CompareEngine(InternalCompareEngine): ''' Here we just add the proxy methods. ''' def serialized_data(self): return self._datagen.serialized_data() @property def adjust_date(self): return self._generator.adjust_date @adjust_date.setter def adjust_date(self, value): self._generator.adjust_date = value @property def input_processor(self): return self._inp @property def transaction_handler(self): return self._tr @property def colswithoutext(self): return self._datagen.colswithoutext @property def minValue(self): return self._datagen.minValue @property def maxValue(self): return self._datagen.maxValue @property def maxdate(self): if self._inp: return self._inp.maxdate else: return None @property def mindate(self): if self._inp: return self._inp.mindate else: return None @property def to_use_ext(self): """doc""" return self._datagen.to_use_ext @to_use_ext.setter def to_use_ext(self, value): self._datagen.to_use_ext = value @property def used_type(self): return self._datagen.used_type @property def used_unitetype(self): """doc""" return self._datagen.used_unitetype @used_unitetype.setter def used_unitetype(self, value): self._datagen.used_unitetype = value @property def inputsource(self) -> InputSourceInterface: return self._inp.inputsource @property def usable_symbols(self): return self._inp.usable_symbols @property def visible_columns(self): return self._generator.get_visible_cols() @property def final_columns(self): return self._datagen.finalcols def show_hide(self,val): return self._generator.show_hide(val) def process(self, *args,**kwargs): return self._inp.process(*args,**kwargs) def get_portfolio_stocks(self): return self.transaction_handler.get_portfolio_stocks()
PypiClean
/tensorflow_aarch64-2.14.0rc0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl/tensorflow/python/ops/structured/structured_tensor.py
"""Structured Tensors.""" import re from typing import Callable, Dict, List, Mapping, Optional, Sequence, Tuple, Union import numpy as np from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import extension_type from tensorflow.python.framework import ops from tensorflow.python.framework import tensor from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import type_spec from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.ragged import dynamic_ragged_shape from tensorflow.python.ops.ragged import ragged_factory_ops from tensorflow.python.ops.ragged import ragged_tensor from tensorflow.python.ops.ragged.row_partition import RowPartition from tensorflow.python.util import compat from tensorflow.python.util import nest from tensorflow.python.util.tf_export import tf_export # Each field may contain one of the following types of Tensors. _FieldValue = Union[ tensor.Tensor, ragged_tensor.RaggedTensor, 'StructuredTensor', extension_type.ExtensionType ] # Function that takes a FieldValue as input and returns the transformed # FieldValue. _FieldFn = Callable[[_FieldValue], _FieldValue] @tf_export('experimental.StructuredTensor') class StructuredTensor(extension_type.BatchableExtensionType): """A multidimensional collection of structures with the same schema. A **`StructuredTensor`** is a multi-dimensional collection of ***structures*** with the same ***schema***, where: * A ***schema*** is a collection of fields, each of which has a name and type. * A ***structure*** maps each field in the schema to a tensor value (which could be a nested StructuredTensor). As an important special case, a 1D `StructuredTensor` encodes a 2D table, where columns are heterogeneous `Tensor`s, and rows are the aligned elements in each of those `Tensor`s. Internally, StructuredTensors use a "field-major" encoding: for each leaf field, there is a single tensor that stores the value of that field for all structures in the `StructuredTensor`. ### Examples >>> # A scalar StructuredTensor describing a single person. >>> s1 = tf.experimental.StructuredTensor.from_pyval( ... {"age": 82, "nicknames": ["Bob", "Bobby"]}) >>> s1.shape TensorShape([]) >>> s1["age"] <tf.Tensor: shape=(), dtype=int32, numpy=82> >>> # A vector StructuredTensor describing three people. >>> s2 = tf.experimental.StructuredTensor.from_pyval([ ... {"age": 12, "nicknames": ["Josaphine"]}, ... {"age": 82, "nicknames": ["Bob", "Bobby"]}, ... {"age": 42, "nicknames": ["Elmo"]}]) >>> s2.shape TensorShape([3]) >>> s2[0]["age"] <tf.Tensor: shape=(), dtype=int32, numpy=12> ### Field Paths A *field path* is a tuple of field names, specifying the path to a nested field. """ _fields: Mapping[str, _FieldValue] _ragged_shape: dynamic_ragged_shape.DynamicRaggedShape __name__ = 'tf.StructuredTensor' #============================================================================= # Common Types #============================================================================= # pylint: disable=invalid-name # Field names work as key, and they can be a sequence to refer to the # sub-levels (embedded) StructuredTensor's. FieldName = Union[str, Sequence[str]] # pylint: enable=invalid-name #============================================================================= # Constructor & Factory Methods #============================================================================= def __init__(self, fields: Mapping[str, _FieldValue], ragged_shape: dynamic_ragged_shape.DynamicRaggedShape): self._fields = fields self._ragged_shape = ragged_shape @classmethod def _old_init(cls, fields, shape, nrows, row_partitions, internal=False): """Private constructor -- use factory methods to create StructuredTensors. This constructor builds a `StructuredTensor` from the given attributes, performing minimal validation. Args: fields: A dictionary mapping from string to `Tensor`, `RaggedTensor`, or `StructuredTensor`. (This dict is not copied, so the caller must ensure that it does not get mutated via leaked references.) shape: `tf.TensorShape` with statically known rank. nrows: scalar integer `tf.Tensor`, or `None` if `shape.rank==0`. row_partitions: tuple of `RowPartition`s, with length `shape.rank-1`. internal: ignored argument. Returns: a StructuredTensor. """ assert isinstance(fields, dict), fields assert isinstance(shape, tensor_shape.TensorShape), shape assert nrows is None or isinstance(nrows, tensor.Tensor), nrows assert row_partitions is None or isinstance(row_partitions, tuple), row_partitions return StructuredTensor( fields=fields, ragged_shape=_dynamic_ragged_shape_init(fields, shape, nrows, row_partitions)) @classmethod def from_shape( cls, ragged_shape: dynamic_ragged_shape.DynamicRaggedShape ) -> 'StructuredTensor': """Creates a `StructuredTensor` with no fields and ragged_shape. Args: ragged_shape: the shape of the structured tensor. Returns: a StructuredTensor with no fields and ragged_shape. """ return StructuredTensor(fields={}, ragged_shape=ragged_shape) @classmethod def from_fields(cls, fields, shape=(), nrows=None, row_partitions=None, validate=False): """Creates a `StructuredTensor` from a dictionary of fields. Args: fields: A dictionary mapping from string to `Tensor`, `RaggedTensor`, or `StructuredTensor`, providing the values for individual fields in each structure. If `shape.rank > 0`, then every tensor in `fields` must have the same shape in the first `shape.rank` dimensions; and that shape must be compatible with `shape`; and `result[i1...iN][key] = fields[key][i1...iN]` (where `N==shape.rank`). shape: A `TensorShape`: static information about the shape of the `StructuredTensor`. Must have a known `rank`. Defaults to scalar shape (i.e. `rank=0`). nrows: scalar integer tensor containing the number of rows in this `StructuredTensor`. Should only be specified if `shape.rank > 0`. Default value is inferred from the `fields` values. If `fields` is empty, then this must be specified. row_partitions: A list of `RowPartition`s describing the (possibly ragged) shape of this `StructuredTensor`. Should only be specified if `shape.rank > 1`. Default value is inferred from the `fields` values. If `fields` is empty, then this must be specified. validate: If true, then add runtime validation ops that check that the field values all have compatible shapes in the outer `shape.rank` dimensions. Returns: A `StructuredTensor`. Examples: >>> tf.experimental.StructuredTensor.from_fields({'x': 1, 'y': [1, 2, 3]}) <StructuredTensor( fields={ "x": tf.Tensor(1, shape=(), dtype=int32), "y": tf.Tensor([1 2 3], shape=(3,), dtype=int32)}, shape=())> >>> tf.experimental.StructuredTensor.from_fields( ... {'foo': [1, 2], 'bar': [3, 4]}, shape=[2]) <StructuredTensor( fields={ "bar": tf.Tensor([3 4], shape=(2,), dtype=int32), "foo": tf.Tensor([1 2], shape=(2,), dtype=int32)}, shape=(2,))> """ shape = tensor_shape.as_shape(shape) rank = shape.rank if rank is None: raise ValueError("StructuredTensor's shape must have known rank.") if not isinstance(fields, dict): raise TypeError('fields must be a dictionary, got %s' % type(fields).__name__) if rank < 2 and row_partitions: raise ValueError('row_partitions must be None or [] if shape.rank<2') if rank == 0 and nrows is not None: raise ValueError('nrows must be None if shape.rank==0') if row_partitions is not None: row_partitions = tuple(row_partitions) if len(row_partitions) != max(0, rank - 1): raise ValueError('len(row_partitions) must be shape.rank-1') elif rank < 2: row_partitions = () fields = dict(fields) # Make a private copy. with ops.name_scope(None, 'StructuredTensor', fields.values()): # TODO(martinz): Make this have better errors. shape = _dynamic_ragged_shape_init(fields, shape, nrows, row_partitions) # TODO(martinz): This may not need to be done if all fields are dense. if shape.rank > 1: shape = shape._with_num_row_partitions(shape.rank - 1) # Validate keys and convert field values to tensors. for key, value in fields.items(): if not isinstance(key, str): raise TypeError(f'Unexpected type for key in `fields`: {key}') if not _FIELD_NAME_RE.match(key): raise ValueError('Field name %r is not currently allowed.' % key) fields[key] = _convert_to_structured_field_value(value) fields = dict([(k, _replace_row_partitions(v, row_partitions)) for (k, v) in fields.items()]) return cls(fields=fields, ragged_shape=shape) @classmethod def from_fields_and_rank( cls, fields: Mapping[str, _FieldValue], rank: int, validate: bool = False, dtype: Optional[dtypes.DType] = None) -> 'StructuredTensor': """Creates a `StructuredTensor` from a nonempty dictionary of fields. Note that if the shape dtype is not specified, the shape dtype will be inferred from any fields that have a shape dtype. If fields differ, then int64 will be preferred to int32, because coercing from int32 to int64 is safer than coercing from int64 to int32. If there are no ragged fields, then it will be int64 by default, but this will be changed to int32 in the future. Args: fields: A dictionary mapping from string to `Tensor`, `RaggedTensor`, or `StructuredTensor`, providing the values for individual fields in each structure. If `rank > 0`, then every tensor in `fields` must have the same shape in the first `rank` dimensions. Cannot be empty. rank: The rank of the resulting structured tensor. validate: If true, then add runtime validation ops that check that the field values all have compatible shapes in the outer `rank` dimensions. dtype: If specified, then forces dtype of the shape to be this. Returns: A `StructuredTensor`. Examples: >>> tf.experimental.StructuredTensor.from_fields_and_rank( ... {'x': 1, 'y': [1, 2, 3]}, 0) <StructuredTensor( fields={ "x": tf.Tensor(1, shape=(), dtype=int32), "y": tf.Tensor([1 2 3], shape=(3,), dtype=int32)}, shape=())> >>> StructuredTensor.from_fields_and_rank({'foo': [1, 2], 'bar': [3, 4]}, ... 1) <StructuredTensor( fields={ "bar": tf.Tensor([3 4], shape=(2,), dtype=int32), "foo": tf.Tensor([1 2], shape=(2,), dtype=int32)}, shape=(2,))> """ if not fields: raise ValueError('Must provide at least one field') if not isinstance(rank, int): raise ValueError('rank must be an integer') if rank < 0: raise ValueError('rank must be nonnegative') fields = { k: _convert_to_structured_field_value(v) for (k, v) in fields.items() } if dtype is None: dtype = _find_shape_dtype(fields, None, None) fields = _fields_with_dtype(fields, dtype) shape = _shape_from_fields(fields, rank, dtype) if rank > 1: shape = shape._with_num_row_partitions(rank - 1) new_rp = shape._row_partitions # pylint: disable=protected-access fields = { k: _replace_row_partitions(v, new_rp) for (k, v) in fields.items() } return StructuredTensor(fields=fields, ragged_shape=shape) def with_updates(self, updates: Dict[FieldName, Union[_FieldValue, _FieldFn, None]], validate: bool = False) -> 'StructuredTensor': """Creates a new `StructuredTensor` with the updated fields. If this `StructuredTensor` is a scalar, and `k` is the `FieldName` being updated and `v` the new value, then: ``` result[k] = v # If (k, v) is in updates and v is a FieldValue result[k] = f(self[k]) # If (k, f) is in updates and f is a FieldFn result[k] = self[k] # If k is in self.field_names but not in updates ``` If this `StructuredTensor` has rank `N` and shape `[D1...DN]`, then each FieldValue `v` in `updates` must have shape `[D1...DN, ...]`, that is, prefixed with the same shape as the `StructuredTensor`. Then the resulting `StructuredTensor` will have: ``` result[i1...iN][k] = v[i1...iN] # (k, v) in updates result[i1...iN][k] = f(self.field_value(k))[i1...iN] # (k, f) in updates result[i1...iN][k] = self[i1...iN][k] # k not in updates ``` Note that `result.shape` is always equal to `self.shape` (but the shapes of nested StructuredTensors may be changed if they are updated with new values). Args: updates: A dictionary mapping `FieldName` to either a `FieldValue` to be used to update, or a `FieldFn` that will transform the value for the given `FieldName`. `FieldName` can be a string for a direct field, or a sequence of strings to refer to a nested sub-field. `FieldFn` is a function that takes a `FieldValue` as input and should return a `FieldValue`. All other fields are copied over to the new `StructuredTensor`. New `FieldName` can be given (to add new fields), but only to existing `StructuredTensor`, it won't automatically create new nested structures -- but one can create a whole `StructureTensor` sub-structure and set that into an existing structure. If the new value is set to `None`, it is removed. validate: If true, then add runtime validation ops that check that the field values all have compatible shapes in the outer `shape.rank` dimensions. Returns: A `StructuredTensor`. Raises: `ValueError`: If the any of the `FieldName` keys points to non-existent sub-structures, if parent and child nodes are updated, if shapes change, if a delete update is given for a non-existent field, or if a `FieldFn` transforming function is given for a `FieldName` that doesn't yet exist. Examples: >>> shoes_us = tf.experimental.StructuredTensor.from_pyval([ ... {"age": 12, "nicknames": ["Josaphine"], ... "shoes": {"sizes": [8.0, 7.5, 7.5]}}, ... {"age": 82, "nicknames": ["Bob", "Bobby"], ... "shoes": {"sizes": [11.0, 11.5, 12.0]}}, ... {"age": 42, "nicknames": ["Elmo"], ... "shoes": {"sizes": [9.0, 9.5, 10.0]}}]) >>> def us_to_europe(t): ... return tf.round(t * 2.54 + 17.0) # Rough approximation. >>> shoe_sizes_key = ("shoes", "sizes") >>> shoes_eu = shoes_us.with_updates({shoe_sizes_key: us_to_europe}) >>> shoes_eu.field_value(shoe_sizes_key) <tf.RaggedTensor [[37.0, 36.0, 36.0], [45.0, 46.0, 47.0], [40.0, 41.0, 42.0]]> """ updates_items = [(_normalize_field_name_to_tuple(name), value) for name, value in updates.items()] # Sort by keys and check for updates of both parent and child nodes. updates_items = sorted(updates_items) for i in range(1, len(updates_items)): # Parent of a node would precede node in the sorted order. name = updates_items[i][0] # item[0] is the name, item[1] is the value. prev_name = updates_items[i - 1][0] if name[:len(prev_name)] == prev_name: raise ValueError( '`StructuredTensor.with_updates` does not allow both parent and ' 'child nodes to be updated: parent={}, child={}. If needed you can ' 'update child nodes in the parent update value.'.format( prev_name, name)) return self._with_updates_impl((), updates_items, validate) def _with_updates_impl(self, error_prefix: Tuple[str, ...], updates: List[Tuple[FieldName, Union[_FieldValue, _FieldFn]]], validate: bool) -> 'StructuredTensor': """Recursive part of `with_updates` implementation.""" # Get current fields. new_fields = dict(self._fields) # Convert field name to string with full path for error messages. def name_fullpath(name: Sequence[str]) -> str: return str(error_prefix + (name,)) # Apply value if a function or the value itself. def apply_value(name: str, value: Union[_FieldValue, _FieldFn]) -> _FieldValue: if callable(value): # `value` is actually a transforming function. if name not in new_fields: raise ValueError( '`StructuredTensor.with_updates` cannot update the field {} ' 'because a transforming function was given, but that field ' 'does not already exist.'.format(name_fullpath(name))) value = value(new_fields[name]) return value # Merge updates. for name, value in updates: if not name or not name[0]: raise ValueError( '`StructuredTensor.with_updates` does not allow empty names ' '{}.'.format(name_fullpath(name))) if len(name) == 1: name = name[0] if value is None: if name not in new_fields: raise ValueError( '`StructuredTensor.with_updates` cannot delete field ' '{} because it is not present.'.format(name_fullpath(name))) new_fields.pop(name) else: new_fields[name] = apply_value(name, value) else: # Recursive prefix = name[0] suffix = name[1:] if prefix not in new_fields: raise ValueError( '`StructuredTensor.with_updates` cannot create new sub-field ' '{} if parent field {} is not set.'.format( error_prefix + tuple(name), name_fullpath(prefix))) current_value = new_fields[prefix] if not isinstance(current_value, StructuredTensor): raise ValueError( '`StructuredTensor.with_updates` cannot create new sub-field ' '{} if parent structure {} is not a `StructuredTensor` that ' 'can contain sub-structures -- it is a `{}`.'.format( error_prefix + tuple(name), name_fullpath(prefix), type(current_value))) one_update = [(suffix, value)] # Accessing protected member in recursion. # FutureWork: optimize by aggregating the recursions, instead of # calling one at a time. # pylint: disable=protected-access value = current_value._with_updates_impl(error_prefix + (prefix,), one_update, validate) # pylint: enable=protected-access new_fields[prefix] = value # TODO(edloper): When validate=True, only validate the modified fields. try: return StructuredTensor.from_fields( new_fields, shape=self.shape, row_partitions=self.row_partitions, nrows=self.nrows(), validate=validate) except ValueError as e: msg = '`StructuredTensor.with_updates` failed' if error_prefix: msg = '{} for field {}'.format(msg, error_prefix) raise ValueError(msg) from e def _promote_helper(self, source_path, new_parent_path): """Creates a promoted field without adding it to the structure. Args: source_path: the source path in the structured tensor. new_parent_path: the new parent path. Must be a prefix of source_path. Returns: a composite tensor of source_path promoted. Raises: ValueError: if the shape of the field is unknown and the right strategy cannot be determined. """ current_field = self.field_value(source_path) new_parent_rank = self.field_value(new_parent_path).rank parent_rank = self.field_value(source_path[:-1]).rank if new_parent_rank == parent_rank: return current_field current_field_rank = current_field.shape.rank if current_field_rank is None: raise ValueError('Cannot determine if dimensions should be merged.') inner_dim = min(parent_rank, current_field_rank - 1) if inner_dim <= new_parent_rank: return current_field return _merge_dims_generic(current_field, new_parent_rank, inner_dim) def promote(self, source_path, new_name): """Promotes a field, merging dimensions between grandparent and parent. >>> d = [ ... {'docs': [{'tokens':[1, 2]}, {'tokens':[3]}]}, ... {'docs': [{'tokens':[7]}]}] >>> st = tf.experimental.StructuredTensor.from_pyval(d) >>> st2 =st.promote(('docs','tokens'), 'docs_tokens') >>> st2[0]['docs_tokens'] <tf.Tensor: shape=(3,), dtype=int32, numpy=array([1, 2, 3], dtype=int32)> >>> st2[1]['docs_tokens'] <tf.Tensor: shape=(1,), dtype=int32, numpy=array([7], dtype=int32)> Args: source_path: the path of the field or substructure to promote; must have length at least 2. new_name: the name of the new field (must be a string). Returns: a modified structured tensor with the new field as a child of the grandparent of the source_path. Raises: ValueError: if source_path is not a list or a tuple or has a length less than two, or new_name is not a string, or the rank of source_path is unknown and it is needed. """ if not isinstance(new_name, str): raise ValueError('new_name is not a string') if not isinstance(source_path, (list, tuple)): raise ValueError('source_path must be a list or tuple') if len(source_path) < 2: raise ValueError('source_path must have length at least two') grandparent_path = source_path[:-2] new_field = self._promote_helper(source_path, grandparent_path) new_path = grandparent_path + (new_name,) return self.with_updates({new_path: new_field}) #============================================================================= # Properties #============================================================================= @property def rank(self): """The rank of this StructuredTensor. Guaranteed not to be `None`.""" return self._ragged_shape.rank @property def shape(self): """The static shape of this StructuredTensor. The returned `TensorShape` is guaranteed to have a known rank, but the individual dimension sizes may be unknown. Returns: `tf.TensorShape` """ return self._ragged_shape._to_tensor_shape() # pylint: disable=protected-access # TODO(martinz): for backwards compatibility @property def _row_partitions(self): """Deprecated form of row_partitions.""" return self.row_partitions # TODO(edloper): Make this a func instead of a property? Or make nrows # a property instead of a func? Seems like these should be consistent. @property def row_partitions(self): """A tuple of `RowPartition`s defining the shape of this `StructuredTensor`. When `self.rank <= 1`, this tuple will be empty. When `self.rank > 1`, these `RowPartitions` define the shape of the `StructuredTensor` by describing how a flat (1D) list of structures can be repeatedly partitioned to form a higher-dimensional object. In particular, the flat list is first partitioned into sublists using `row_partitions[-1]`, and then those sublists are further partitioned using `row_partitions[-2]`, etc. The following examples show the row partitions used to describe several different `StructuredTensor`, each of which contains 8 copies of the same structure (`x`): >>> x = {'a': 1, 'b': ['foo', 'bar', 'baz']} # shape = [] (scalar) >>> s1 = [[x, x, x, x], [x, x, x, x]] # shape = [2, 4] >>> tf.experimental.StructuredTensor.from_pyval(s1).row_partitions (tf.RowPartition(row_splits=[0 4 8]),) >>> s2 = [[x, x], [x, x], [x, x], [x, x]] # shape = [4, 2] >>> tf.experimental.StructuredTensor.from_pyval(s2).row_partitions (tf.RowPartition(row_splits=[0 2 4 6 8]),) >>> s3 = [[x, x, x], [], [x, x, x, x], [x]] # shape = [2, None] >>> tf.experimental.StructuredTensor.from_pyval(s3).row_partitions (tf.RowPartition(row_splits=[0 3 3 7 8]),) >>> s4 = [[[x, x], [x, x]], [[x, x], [x, x]]] # shape = [2, 2, 2] >>> tf.experimental.StructuredTensor.from_pyval(s4).row_partitions (tf.RowPartition(row_splits=[0 2 4]), tf.RowPartition(row_splits=[0 2 4 6 8])) >>> s5 = [[[x, x], [x]], [[x, x]], [[x, x], [x]]] # shape = [3, None, None] >>> tf.experimental.StructuredTensor.from_pyval(s5).row_partitions (tf.RowPartition(row_splits=[0 2 3 5]), tf.RowPartition(row_splits=[0 2 3 5 7 8])) Note that shapes for nested fields (such as `x['b']` in the above example) are not considered part of the shape of a `StructuredTensor`, and are not included in `row_partitions`. If this `StructuredTensor` has a ragged shape (i.e., if any of the `row_partitions` is not uniform in size), then all fields will be encoded as either `RaggedTensor`s or `StructuredTensor`s with these `RowPartition`s used to define their outermost `self.rank` dimensions. Returns: A `tuple` of `RowPartition` objects with length `self.rank - 1` (or `0` if `self.rank < 2`) """ if self.rank < 2: return () return self._ragged_shape._as_row_partitions() # pylint:disable=protected-access def nrows(self): """The number of rows in this StructuredTensor (if rank>0). This means the length of the outer-most dimension of the StructuredTensor. Notice that if `self.rank > 1`, then this equals the number of rows of the first row partition. That is, `self.nrows() == self.row_partitions[0].nrows()`. Otherwise `self.nrows()` will be the first dimension of the field values. Returns: A scalar integer `Tensor` (or `None` if `self.rank == 0`). """ if self.rank == 0: return None return self._ragged_shape[0] def with_shape_dtype(self, dtype: dtypes.DType) -> 'StructuredTensor': if dtype == self._ragged_shape.dtype: return self return StructuredTensor( fields=_fields_with_dtype(self._fields, dtype), ragged_shape=self._ragged_shape.with_dtype(dtype)) def _is_eager(self): """True if all fields are composed of eager tensors.""" tensors = nest.flatten(self, expand_composites=True) return all(isinstance(t, ops.EagerTensor) for t in tensors) #============================================================================= # Encoding #============================================================================= def field_names(self): """Returns the string field names for this `StructuredTensor`.""" return tuple(self._fields.keys()) def field_value(self, field_name): """Returns the tensor value for the specified field or path. If `field_name` is a `string`, then it names a field directly owned by this `StructuredTensor`. If this `StructuredTensor` has shape `[D1...DN]`, then the returned tensor will have shape `[D1...DN, V1...VM]`, where the slice `result[d1...dN]` contains the field value for the structure at `self[d1...dN]`. If `field_name` is a `tuple` of `string`, then it specifies a path to a field owned by nested `StructuredTensor`. In particular, `struct.field_value((f1, f2, ..., fN))` is equivalent to `struct.field_value(f1).field_value(f2)....field_value(fN)` Args: field_name: `string` or `tuple` of `string`: The field whose values should be returned. Returns: `Tensor`, `StructuredTensor`, or `RaggedTensor`. Raises: KeyError: If the given field_name is not found. """ if isinstance(field_name, (list, tuple)): value = self for f in field_name: if not isinstance(value, StructuredTensor): raise KeyError('Field path {} not found in {}'.format( field_name, self)) value = value.field_value(f) return value return self._fields[field_name] #============================================================================= # Operators #============================================================================= # TODO(edloper): Add support for ellipsis and/or newaxis? def __getitem__(self, key): """Returns the specified piece of this StructuredTensor. * If `struct_tensor` is scalar (i.e., a single structure), then `struct_tensor[f]` returns the value of field `f` (where `f` must be a string). * If `struct_tensor` is non-scalar (i.e., a vector or higher-dimensional tensor of structures), `struct_tensor[i]` selects an element or slice of the tensor using standard Python semantics (e.g., negative values index from the end). `i` may have any of the following types: * `int` constant * `string` constant * scalar integer `Tensor` * `slice` containing integer constants and/or scalar integer `Tensor`s #### Multidimensional indexing `StructuredTensor` supports multidimensional indexing. I.e., `key` may be a `tuple` of values, indexing or slicing multiple dimensions at once. For example, if `people` is a vector of structures, each of which has a vector- valued `names` field, then `people[3, 'names', 0]` is equivalent to `people[3]['names'][0]`; and `people[:, 'names', :]` will return a (possibly ragged) matrix of names, with shape `[num_people, num_names_per_person]`. Args: key: Indicates which piece of the StructuredTensor to return. Returns: A `Tensor`, `StructuredTensor`, or `RaggedTensor`. """ if isinstance(key, list): key = tuple(key) elif not isinstance(key, tuple): key = (key,) if not key: return self if self.rank == 0: return self._scalar_getitem(key) else: return self._tensor_getitem(key) def _scalar_getitem(self, key): if (isinstance(key[0], slice) and key[0].start is None and key[0].stop is None and key[0].step is None): fields = dict((field_name, field_value.__getitem__(key[1:])) for (field_name, field_value) in self._fields.items()) return StructuredTensor.from_fields(fields, self.shape) elif not isinstance(key[0], compat.bytes_or_text_types): raise ValueError('Key for indexing a StructuredTensor must be a ' "string or a full slice (':')") return self._fields[key[0]].__getitem__(key[1:]) def _tensor_getitem(self, key): rank = self.rank if len(key) <= rank: new_fields = dict((field_name, field_value.__getitem__(key)) for (field_name, field_value) in self._fields.items()) result_shape = self.shape.as_list() for d, k in enumerate(key): if isinstance(k, slice): if not (k.start is None and k.stop is None and k.step is None): # TODO(edloper): Better static shape analysis here. result_shape[d] = None elif isinstance(k, (int, tensor.Tensor)): result_shape[d] = -1 # mark for deletion elif k is None: raise ValueError('Slicing not supported for tf.newaxis') else: # Ellipsis, tf.newaxis: raise ValueError('Slicing not supported for %r' % k) result_shape = [d for d in result_shape if d != -1] return StructuredTensor.from_fields(new_fields, result_shape) else: if not isinstance(key[rank], compat.bytes_or_text_types): # TODO(edloper): Also support full slice here? raise ValueError('Key for indexing a StructuredTensor must be a string') return self._fields[key[rank]].__getitem__(key[:rank] + key[rank + 1:]) def __repr__(self): fields = sorted(self._fields.items()) fields = ((k, str(v).replace('\n', '\n ')) for k, v in fields) fields = ('"{}": {}'.format(k, v) for k, v in fields) dict_repr = ',\n '.join(fields) return ('<StructuredTensor(\n' ' fields={\n' ' %s},\n' ' shape=%s)>' % (dict_repr, self.shape)) #============================================================================= # Conversion #============================================================================= def to_pyval(self): """Returns this StructuredTensor as a nested Python dict or list of dicts. Converts this `StructuredTensor` to a nested python value: * `StructTensors` with `rank=0` are converted into a dictionary, with an entry for each field. Field names are used as keys and field values are converted to python values. In particular: * Scalar Tensor fields are converted to simple values (such as `int` or `float` or `string`) * Non-scalar Tensor fields and RaggedTensor fields are converted to nested lists of simple values. * StructuredTensor fields are converted recursively using `to_pyval`. * `StructTensors` with `rank>0` are converted to nested python `list`s, containing one dictionary for each structure (where each structure's dictionary is defined as described above). Requires that all fields are Eager tensors. >>> tf.experimental.StructuredTensor.from_fields( ... {'a': [1, 2, 3]}, [3]).to_pyval() [{'a': 1}, {'a': 2}, {'a': 3}] Note that `StructuredTensor.from_pyval(pyval).to_pyval() == pyval`. Returns: A nested Python dict or list of dicts. """ if not self._is_eager(): raise ValueError( 'StructuredTensor.to_pyval() is only supported in eager mode.') # Convert each field value to a nested list. result = {} for (key, value) in self._fields.items(): if isinstance(value, ops.EagerTensor): value = value.numpy() if isinstance(value, np.ndarray): value = value.tolist() elif isinstance(value, ragged_tensor.RaggedTensor): value = value.to_list() elif isinstance(value, StructuredTensor): value = value.to_pyval() # TODO(edloper): Throw an exception if value is an unexpected type. result[key] = value # If rank>0, then re-group each value from dict-of-list to list-of-dict. if len(self.shape) > 0: # pylint: disable=g-explicit-length-test if not result: # special-case for StructuredTensors w/ no fields. return _empty_dict_pylist_from_row_partitions(self.row_partitions, self.nrows()) return _pyval_field_major_to_node_major( list(result.keys()), list(result.values()), self.rank) else: return result @classmethod def from_pyval(cls, pyval, typespec=None): """Constructs a StructuredTensor from a nested Python structure. >>> tf.experimental.StructuredTensor.from_pyval( ... {'a': [1, 2, 3], 'b': [[4, 5], [6, 7]]}) <StructuredTensor( fields={ "a": tf.Tensor([1 2 3], shape=(3,), dtype=int32), "b": <tf.RaggedTensor [[4, 5], [6, 7]]>}, shape=())> Note that `StructuredTensor.from_pyval(pyval).to_pyval() == pyval`. Args: pyval: The nested Python structure that should be used to create the new `StructuredTensor`. typespec: A `StructuredTensor.Spec` specifying the expected type for each field. If not specified, then all nested dictionaries are turned into StructuredTensors, and all nested lists are turned into Tensors (if rank<2) or RaggedTensors (if rank>=2). Returns: A `StructuredTensor`. """ return cls._from_pyval(pyval, typespec, ()) @classmethod def _from_pyval(cls, pyval, typespec, path_so_far): """Helper function for from_pyval. Args: pyval: The nested Python structure that should be used to create the new `StructuredTensor`. typespec: A `StructuredTensor.Spec` specifying the expected type for each field. If not specified, then all nested dictionaries are turned into StructuredTensors, and all nested lists are turned into Tensors (if rank<2) or RaggedTensors (if rank>=2). path_so_far: the path of fields that led here (for error messages). Returns: A `StructuredTensor`. """ if isinstance(pyval, dict): return cls._from_pydict(pyval, typespec, path_so_far) elif isinstance(pyval, (list, tuple)): keys = set() rank = _pyval_find_struct_keys_and_depth(pyval, keys) if rank is not None: return cls._from_pylist_of_dict(pyval, keys, rank, typespec, path_so_far) else: return cls._from_pylist_of_value(pyval, typespec, path_so_far) else: return cls._from_pyscalar(pyval, typespec, path_so_far) @classmethod def _from_pydict(cls, pyval, typespec, path_so_far): """Converts python dictionary `pyval` to a StructuredTensor with rank=0.""" if typespec is None: fields = dict((k, cls._from_pyval(v, None, path_so_far + (k,))) for (k, v) in pyval.items()) else: spec_shape = typespec._shape # pylint: disable=protected-access field_specs = typespec._field_specs # pylint: disable=protected-access if not (isinstance(typespec, StructuredTensor.Spec) and spec_shape.rank == 0 and set(pyval) == set(field_specs)): raise ValueError('Value at %r does not match typespec: %r vs %r' % (path_so_far, pyval, typespec)) fields = dict((k, cls._from_pyval(v, field_specs[k], path_so_far + (k,))) for (k, v) in pyval.items()) return StructuredTensor.from_fields(fields=fields, shape=(), validate=False) @classmethod def _from_pylist_of_dict(cls, pyval, keys, rank, typespec, path_so_far): """Converts python list `pyval` to a StructuredTensor with rank>1.""" fields = dict((key, []) for key in keys) for child in pyval: _pyval_update_fields(child, fields, 1) if typespec is None: shape = tensor_shape.TensorShape([None] * rank) for (key, target) in fields.items(): fields[key] = cls._from_pyval(target, None, path_so_far + (key,)) else: field_specs = typespec._fields # pylint: disable=protected-access if ((not isinstance(typespec, StructuredTensor.Spec)) or # pylint: disable=superfluous-parens (set(fields) - set(field_specs))): raise ValueError('Value at %r does not match typespec: %r vs %r' % (path_so_far, pyval, typespec)) shape = typespec._shape if shape.rank < rank: raise ValueError('Value at %r does not match typespec (rank mismatch): ' '%r vs %r' % (path_so_far, pyval, typespec)) for (key, spec) in field_specs.items(): fields[key] = cls._from_pyval( fields.get(key, []), spec, path_so_far + (key,)) try: if not fields and typespec is None: # TODO(b/183245576): handle cases where the typespec is known # but the dictionary is empty. return StructuredTensor._from_pylist_of_empty_dict(pyval, rank) return StructuredTensor.from_fields( fields=fields, shape=shape, validate=False) except Exception as exc: raise ValueError('Error parsing path %r' % (path_so_far,)) from exc @classmethod def _from_pylist_of_empty_dict(cls, pyval, rank): """Converts a pylist of empty dictionaries to StructuredTensors.""" if rank == 0: return StructuredTensor.from_fields(fields={}, shape=(), validate=False) elif rank == 1: nrows = len(pyval) shape = (nrows,) return StructuredTensor.from_fields(fields={}, shape=shape, nrows=nrows) elif rank > 1: ragged_zeros = ragged_factory_ops.constant(_dicts_to_zeros(pyval)) nrows = len(pyval) shape = tensor_shape.TensorShape([len(pyval)] + ([None] * (rank - 1))) return StructuredTensor.from_fields( fields={}, shape=shape, row_partitions=ragged_zeros._nested_row_partitions, # pylint:disable=protected-access nrows=nrows) @classmethod def _from_pylist_of_value(cls, pyval, typespec, path_so_far): """Converts python list `pyval` to a Tensor or RaggedTensor with rank>1.""" if typespec is None: try: return ragged_factory_ops.constant(pyval) except Exception as exc: raise ValueError('Error parsing path %r' % (path_so_far,)) from exc elif isinstance(typespec, tensor.TensorSpec): try: result = constant_op.constant(pyval, typespec.dtype) except Exception as exc: raise ValueError('Error parsing path %r' % (path_so_far,)) from exc if not typespec.shape.is_compatible_with(result.shape): raise ValueError('Value at %r does not match typespec: %r vs %r' % (path_so_far, typespec, pyval)) return result elif isinstance(typespec, ragged_tensor.RaggedTensorSpec): # pylint: disable=protected-access try: return ragged_factory_ops.constant( pyval, dtype=typespec._dtype, ragged_rank=typespec._ragged_rank, row_splits_dtype=typespec._row_splits_dtype, inner_shape=typespec._shape[typespec._ragged_rank + 1:]) except Exception as exc: raise ValueError('Error parsing path %r' % (path_so_far,)) from exc elif isinstance(typespec, StructuredTensor.Spec): empty_rank = _pyval_empty_list_depth(pyval) if empty_rank is None: raise ValueError('Value at %r does not match typespec: %r vs %r' % (path_so_far, typespec, pyval)) else: return cls._from_pylist_of_dict(pyval, set(), empty_rank, typespec, path_so_far) else: raise ValueError('Value at %r does not match typespec: %r vs %r' % (path_so_far, typespec, pyval)) @classmethod def _from_pyscalar(cls, pyval, typespec, path_so_far): """Converts python scalar value `pyval` to a Tensor.""" if typespec is None: try: return constant_op.constant(pyval) except Exception as exc: raise ValueError('Error parsing path %r' % (path_so_far,)) from exc else: if not (isinstance(typespec, tensor.TensorSpec) and typespec.shape.rank == 0): raise ValueError('Value at %r does not match typespec: %r vs %r' % (path_so_far, typespec, pyval)) # TODO(edloper): Check that typespec.shape matches. return constant_op.constant(pyval, typespec.dtype) #============================================================================= # Transforms #============================================================================= # TODO(edloper): Add a 'validate' option here? # TODO(edloper): Unify nomenclature with RaggedTensor. Should RaggedTensor # have a partition_outer_dimension method? def partition_outer_dimension(self, row_partition): """Partitions the outer dimension of this StructuredTensor. Returns a new `StructuredTensor` with the same values as `self`, where the outer dimension is partitioned into two (possibly ragged) dimensions. Requires that this StructuredTensor have an outer dimension (i.e., `self.shape.rank > 0`). >>> st = tf.experimental.StructuredTensor.from_pyval( ... [{'foo': 12}, {'foo': 33}, {'foo': 99}]) >>> partition = RowPartition.from_row_lengths([2, 0, 1]) >>> st.partition_outer_dimension(partition) <StructuredTensor( fields={ "foo": <tf.RaggedTensor [[12, 33], [], [99]]>}, shape=(3, None))> Args: row_partition: A `RowPartition`. Returns: A `StructuredTensor` with rank `values.rank + 1`. """ if not isinstance(row_partition, RowPartition): raise TypeError('row_partition must be a RowPartition.') if self.shape.rank == 0: raise ValueError('Shape %s must have rank at least 1' % self.shape) return _partition_outer_dimension(self, row_partition) def merge_dims(self, outer_axis, inner_axis): """Merges outer_axis...inner_axis into a single dimension. Returns a copy of this RaggedTensor with the specified range of dimensions flattened into a single dimension, with elements in row-major order. >>> st = tf.experimental.StructuredTensor.from_pyval( ... [[{'foo': 12}, {'foo': 33}], [], [{'foo': 99}]]) >>> st.merge_dims(0, 1) <StructuredTensor( fields={ "foo": tf.Tensor([12 33 99], shape=(3,), dtype=int32)}, shape=(3,))> Args: outer_axis: `int`: The first dimension in the range of dimensions to merge. May be negative (to index from the last dimension). inner_axis: `int`: The last dimension in the range of dimensions to merge. May be negative (to index from the last dimension). Returns: A copy of this tensor, with the specified dimensions merged into a single dimension. The shape of the returned tensor will be `self.shape[:outer_axis] + [N] + self.shape[inner_axis + 1:]`, where `N` is the total number of slices in the merged dimensions. """ outer_axis = array_ops.get_positive_axis( outer_axis, self.shape.rank, axis_name='outer_axis', ndims_name='rank(self)') inner_axis = array_ops.get_positive_axis( inner_axis, self.shape.rank, axis_name='inner_axis', ndims_name='rank(self)') if not outer_axis <= inner_axis: raise ValueError('Expected outer_axis (%d) to be less than or equal to ' 'inner_axis (%d)' % (outer_axis, inner_axis)) return _merge_dims(self, outer_axis, inner_axis) class Spec: """A spec for StructuredTensor.""" def __validate__(self): assert self._ragged_shape is not None @classmethod def _from_fields_and_rank(cls, fields, rank): """Creates a spec of a StructuredTensor with fields and rank.""" shape = None for (k, v) in fields.items(): field_shape_untruncated = _dynamic_ragged_shape_spec_from_spec(v) if field_shape_untruncated is None: raise ValueError(f'Cannot convert spec of {k}.') untruncated_rank = field_shape_untruncated.rank if (untruncated_rank is not None and untruncated_rank < rank): raise ValueError(f'Rank of field {k} is {untruncated_rank}, ' f'but must be at least {rank}.') field_shape = field_shape_untruncated._truncate(rank) # pylint: disable=protected-access if shape is None: shape = field_shape else: shape = shape._merge_with(field_shape) return StructuredTensor.Spec(_ragged_shape=shape, _fields=fields) @classmethod def _from_shape( cls, shape: dynamic_ragged_shape.DynamicRaggedShape ) -> 'StructuredTensor.Spec': """Creates the spec of an empty StructuredTensor.""" return StructuredTensor.Spec(_ragged_shape=shape, _fields={}) # For backwards compatibility @property def _shape(self) -> tensor_shape.TensorShape: return self._ragged_shape._to_tensor_shape() # pylint: disable=protected-access # For backwards compatibility @property def _field_specs(self) -> Dict[str, type_spec.TypeSpec]: return self._fields # For backwards compatibility @property def shape(self) -> tensor_shape.TensorShape: return self._shape # For backwards compatibility @property def rank(self): return self._ragged_shape.rank # Regular expression used to determine whether a string is a valid field name. # Note: we plan to relax (or possibly eliminate) this in the future; you # should not rely on the fact that some field names are currently disallowed. _FIELD_NAME_RE = re.compile('^[a-zA-Z][a-zA-Z0-9_]*$') #============================================================================= # Helper functions #============================================================================= # TODO(edloper): Move some of these helpers to row_partition.py? def _convert_to_structured_field_value(value): """Converts `value` to a Tensor, RaggedTensor, or StructuredTensor.""" if isinstance(value, (tensor.Tensor, ragged_tensor.RaggedTensor, StructuredTensor)): return value elif ragged_tensor.is_ragged(value): return ragged_tensor.convert_to_tensor_or_ragged_tensor(value) elif isinstance(value, extension_type.ExtensionType): return value else: try: return ops.convert_to_tensor(value) except (ValueError, TypeError) as e: raise TypeError('Unexpected type for value in `fields`: %r' % value) from e def _find_shape_dtype( fields: Mapping[str, _FieldValue], nrows: Optional[tensor.Tensor], row_partitions: Optional[Sequence[RowPartition]]) -> dtypes.DType: """Return a consistent dtype for fields, nrows, & row_partitions. In the future, the default will switch from int64 to int32, but for now, we stick with int64. Args: fields: the fields of the StructuredTensor. nrows: the nrows of the StructuredTensor row_partitions: the row_partitions of the StructuredTensor. Returns: If anything requires int64, then return int64. If int32 is explicitly specified, return int32. Otherwise, return int64. """ field_dtypes = [_field_shape_dtype(v) for v in fields.values()] nrows_dtypes = [nrows.dtype] if isinstance(nrows, tensor.Tensor) else [] rp_dtypes = [] if row_partitions is None else [ rp.dtype for rp in row_partitions ] all_dtypes = field_dtypes + nrows_dtypes + rp_dtypes if dtypes.int64 in all_dtypes: return dtypes.int64 if dtypes.int32 in all_dtypes: return dtypes.int32 # TODO(martinz): Eventually, shift this to tf.int32. return dtypes.int64 def _merge_nrows(nrows, static_nrows, value, dtype, validate): """Merges `nrows` with `nrows(value)`. Checks that `value` has the expected number of rows (`nrows`), and returns `nrows`. If `validate` is true, then add validation ops that check that the `nrows` values match. Args: nrows: scalar integer Tensor. static_nrows: tf.Dimension: static value of nrows, if known. value: Tensor or RaggedTensor or StructuredTensor dtype: dtype for `nrows`. validate: bool -- whether to add validation ops. Returns: A tuple `(nrows, static_nrows)`. """ static_value_nrows = tensor_shape.dimension_at_index(value.shape, 0) if isinstance(value, tensor.Tensor): value_nrows = array_ops.shape(value, out_type=dtype)[0] else: value_nrows = value.nrows() if nrows is None: nrows = value_nrows elif (static_value_nrows.value is not None and static_nrows.value is not None): if not static_value_nrows.is_compatible_with(static_nrows): raise ValueError('fields have incompatible nrows') nrows = value_nrows # No need to add an assertion op. elif validate: nrows = control_flow_ops.with_dependencies([ check_ops.assert_equal( nrows, value_nrows, message='fields have incompatible nrows') ], nrows) return nrows, static_nrows._merge_with(static_value_nrows) # pylint: disable=protected-access def _merge_row_partitions(row_partitions, value, rank, dtype, validate): """Merges `row_partitions` with `row_partitions(value)`.""" if isinstance(value, tensor.Tensor): value_row_partitions = _row_partitions_for_tensor(value, rank, dtype) elif isinstance(value, ragged_tensor.RaggedTensor): value_row_partitions = _row_partitions_for_ragged_tensor(value, rank, dtype) else: assert isinstance(value, StructuredTensor), type(value) value_row_partitions = value.row_partitions[:rank - 1] assert len(value_row_partitions) == rank - 1 if row_partitions is None: return tuple(value_row_partitions) else: return tuple([ p1._merge_precomputed_encodings(p2, validate) # pylint: disable=protected-access for (p1, p2) in zip(row_partitions, value_row_partitions) ]) def _row_partitions_for_tensor(value, rank, dtype): """Returns the row partitions for a tf.Tensor.""" shape = array_ops.shape(value, out_type=dtype) return _row_partitions_for_uniform_shape(shape, rank) def _row_partitions_for_ragged_tensor(value, rank, dtype): """Returns the row partitions for a tf.RaggedTensor.""" assert rank > 1 value_row_partitions = value._nested_row_partitions[:rank - 1] # pylint: disable=protected-access if len(value_row_partitions) < (rank - 1): value_row_partitions += _row_partitions_for_tensor( value.flat_values, rank - len(value_row_partitions), dtype) assert len(value_row_partitions) == rank - 1 return value_row_partitions def _row_partitions_for_uniform_shape(shape, rank): """Returns row partitions for the given shape Tensor. Args: shape: A vector describing a uniform shape. rank: The number of dimensions to generate row partitions for Returns: A list of (rank-1) `RowPartition`s with uniform row length. """ shape_cumprod = math_ops.cumprod(shape[:rank]) # pylint: disable=g-complex-comprehension return tuple([ RowPartition.from_uniform_row_length( uniform_row_length=shape[i + 1], nvals=shape_cumprod[i + 1], nrows=shape_cumprod[i]) for i in range(rank - 1) ]) def _pyval_field_major_to_node_major(keys, values, depth): """Regroup each field (k, v) from dict-of-list to list-of-dict. Given a "field-major" encoding of the StructuredTensor (which maps each key to a single nested list containing the values for all structs), return a corresponding "node-major" encoding, consisting of a nested list of dicts. Args: keys: The field names (list of string). Must not be empty. values: The field values (list of python values). Must have the same length as `keys`. depth: The list depth at which dictionaries should be created. Returns: A nested list of dict, with depth `depth`. """ assert keys if depth == 0: return dict(zip(keys, values)) nvals = len(values[0]) assert all(nvals == len(values[i]) for i in range(1, len(values))) return [ _pyval_field_major_to_node_major(keys, value_slice, depth - 1) for value_slice in zip(*values) ] def _empty_dict_pylist_from_row_partitions(row_partitions, nrows): """Returns a python list of empty dicts from the given row partitions. Args: row_partitions: The row-partitions describing the ragged shape of the result. nrows: The number of rows in the outermost row-partition. (Or if `len(row_partitions)==0`, then the number of empty dicts to return.) Returns: A nested python list whose leaves (if any) are empty python dicts. """ if not row_partitions: return [{} for _ in range(nrows)] else: values = _empty_dict_pylist_from_row_partitions( row_partitions[1:], row_partitions[0].row_splits()[-1]) splits = row_partitions[0].row_splits() return [values[splits[i]:splits[i + 1]] for i in range(len(splits) - 1)] def _pyval_find_struct_keys_and_depth(pyval, keys): """Finds the keys & depth of nested dictionaries in `pyval`. Args: pyval: A nested structure of lists, tuples, and dictionaries. keys: (output parameter) A set, which will be updated with any keys that are found in the nested dictionaries. Returns: The nesting depth of dictionaries in `pyval`, or `None` if `pyval` does not contain any dictionaries. Raises: ValueError: If dictionaries have inconsistent depth. """ if isinstance(pyval, dict): keys.update(pyval.keys()) return 0 elif isinstance(pyval, (list, tuple)): depth = None for child in pyval: child_depth = _pyval_find_struct_keys_and_depth(child, keys) if child_depth is not None: if depth is None: depth = child_depth + 1 elif depth != child_depth + 1: raise ValueError('Inconsistent depth of dictionaries') return depth else: return None def _pyval_update_fields(pyval, fields, depth): """Append the field values from `pyval` to `fields`. Args: pyval: A python `dict`, or nested list/tuple of `dict`, whose value(s) should be appended to `fields`. fields: A dictionary mapping string keys to field values. Field values extracted from `pyval` are appended to this dictionary's values. depth: The depth at which `pyval` should be appended to the field values. """ if not isinstance(pyval, (dict, list, tuple)): raise ValueError('Expected dict or nested list/tuple of dict') for (key, target) in fields.items(): for _ in range(1, depth): target = target[-1] target.append(pyval[key] if isinstance(pyval, dict) else []) if isinstance(pyval, (list, tuple)): for child in pyval: _pyval_update_fields(child, fields, depth + 1) def _pyval_empty_list_depth(pyval): """Find the max depth for nested empty lists. Args: pyval: A nested python list. Returns: The maximum depth of empty lists in `pyval`, or None if `pyval` contains anything other than nested empty lists. """ if isinstance(pyval, list): if not pyval: return 1 depths = [_pyval_empty_list_depth(v) for v in pyval] if any(depth is None for depth in depths): return None else: return max(depths) + 1 else: return None def _replace_row_partitions(value, new_partitions): """Updates `value` to use `new_partitions` as its (outer) row partitions. This is used to ensure that all fields in a `StructuredTensor` use identical `RowPartition` objects for the shared dimensions. In particular, `StructuredTensor.from_fields` first merges all of the row partitions from any fields, and then replaces the outer row partitions of all fields with the merged row partitions (using this function). Args: value: A `Tensor`, `RaggedTensor`, or `StructuredTensor`. new_partitions: A list of row-partitions that should be used by `value`. Must be equivalent to `value`'s current row partitions. Returns: A value that is equivalent to `value`, where outer row partitions have been replaced by `new_partitions`. """ if isinstance(value, tensor.Tensor) or not new_partitions: return value elif isinstance(value, ragged_tensor.RaggedTensor): return ragged_tensor.RaggedTensor._from_row_partition( # pylint: disable=protected-access values=_replace_row_partitions(value.values, new_partitions[1:]), row_partition=new_partitions[0]) else: assert isinstance(value, StructuredTensor) new_fields = dict((k, _replace_row_partitions(v, new_partitions)) for (k, v) in value._fields.items()) return StructuredTensor._old_init( # pylint: disable=protected-access fields=new_fields, shape=value.shape, nrows=value.nrows(), row_partitions=tuple(new_partitions) + tuple(value.row_partitions[len(new_partitions):])) def _partition_outer_dimension(value, row_partition): """Partitions the outer dimension of `value` using `row_partitions`. Examples: >>> partition = RowPartition.from_row_lengths([2, 0, 1]) >>> _partition_outer_dimension(tf.constant([1, 2, 3]), partition) <tf.RaggedTensor [[1, 2], [], [3]]> >>> struct_value = tf.experimental.StructuredTensor.from_pyval( ... [{'x': 1}, {'x': 2}, {'x': 3}]) >>> _partition_outer_dimension(struct_value, partition) <StructuredTensor( fields={ "x": <tf.RaggedTensor [[1, 2], [], [3]]>}, shape=(3, None))> Args: value: Tensor, RaggedTensor, or StructuredTensor row_partition: RowPartition Returns: A value with the same type as `value`, where `result.rank = value.rank + 1`. """ is_ragged = row_partition.uniform_row_length() is None if isinstance(value, tensor.Tensor) and not is_ragged: new_shape = array_ops.concat( [[row_partition.nrows(), row_partition.uniform_row_length()], array_ops.shape(value, out_type=row_partition.dtype)[1:]], axis=0) return array_ops.reshape(value, new_shape) elif isinstance(value, (tensor.Tensor, ragged_tensor.RaggedTensor)): return ragged_tensor.RaggedTensor._from_row_partition( # pylint: disable=protected-access value, row_partition) else: assert isinstance(value, StructuredTensor) nrows = row_partition.static_nrows ncols = row_partition.static_uniform_row_length shape = tensor_shape.TensorShape([nrows, ncols]).concatenate(value.shape[1:]) fields = dict((k, _partition_outer_dimension(v, row_partition)) for (k, v) in value._fields.items()) return StructuredTensor._old_init( # pylint: disable=protected-access fields, shape, row_partition.nrows(), (row_partition,) + value.row_partitions) def _merge_dims(value, outer_axis, inner_axis): """Merges `outer_axis...inner_axis` of `value` into a single dimension.""" assert outer_axis < inner_axis if isinstance(value, (tensor.Tensor, ragged_tensor.RaggedTensor)): return ragged_tensor.merge_dims(value, outer_axis, inner_axis) else: assert isinstance(value, StructuredTensor) fields = dict((k, _merge_dims(v, outer_axis, inner_axis)) for (k, v) in value._fields.items()) ragged_shape = value._ragged_shape._merge_dims( # pylint: disable=protected-access outer_axis, inner_axis) return StructuredTensor(fields, ragged_shape) _structured_tensor_factory_key = object() # unique private object def _dynamic_ragged_shape_spec_from_spec( spec: Union[dynamic_ragged_shape.DynamicRaggedShape.Spec, ragged_tensor.RaggedTensorSpec, StructuredTensor.Spec, tensor.TensorSpec] ) -> dynamic_ragged_shape.DynamicRaggedShape.Spec: if isinstance(spec, StructuredTensor.Spec): return spec._ragged_shape # pylint: disable=protected-access else: return dynamic_ragged_shape.DynamicRaggedShape.Spec._from_spec(spec) # pylint: disable=protected-access def _normalize_field_name_to_tuple(name: 'FieldName') -> Sequence[str]: """FieldName can be given also as string, this normalizes it to a tuple.""" if isinstance(name, str): return (name,) if isinstance(name, list): return tuple(name) assert isinstance(name, tuple) return name def _dicts_to_zeros(pyval): """Replaces dictionaries zeros in a pylist.""" if isinstance(pyval, dict): return 0 return [_dicts_to_zeros(x) for x in pyval] def _merge_dims_generic(source, outer, inner): """Merges outer_axis...inner_axis into a single dimension. If outer == inner, this is a NOOP. If inner < outer, then this fials. If inner >= source.shape.rank, then the behavior is undefined. Args: source: a tensor, ragged tensor, or structured tensor. outer: a python int, indicating the first dimension to compress (must be nonnegative). inner: a python int, indicating the first dimension to keep (of the tail) (must be nonnegative). Returns: source with outer_axis...inner_axis merged into a single dimension. """ if isinstance(source, StructuredTensor): return source.merge_dims(outer, inner) else: return ragged_tensor.merge_dims(source, outer, inner) def _dynamic_ragged_shape_from_tensor( field, dtype=None) -> dynamic_ragged_shape.DynamicRaggedShape: """Extension of DynamicRaggedShape.from_tensor to support StructuredTensor.""" if isinstance(field, StructuredTensor): return field._ragged_shape # pylint: disable=protected-access shape = array_ops.shape_v2(field, out_type=dtype) if isinstance(shape, tensor.Tensor): return dynamic_ragged_shape.DynamicRaggedShape( row_partitions=[], inner_shape=shape) elif isinstance(shape, dynamic_ragged_shape.DynamicRaggedShape): return shape # TODO(martinz): add a test for the following line. raise TypeError(f'Expected shape tf.shape({field}) to return a Tensor or a ' f'DynamicRaggedShape. Instead, got: {shape}.') def _merge_with_optional( a: Optional[dynamic_ragged_shape.DynamicRaggedShape], b: Optional[dynamic_ragged_shape.DynamicRaggedShape] ) -> Optional[dynamic_ragged_shape.DynamicRaggedShape]: if a is None: return b if b is None: return a return a._merge_with(b) # pylint: disable=protected-access def _shape_from_fields( fields, rank: int, dtype: dtypes.DType) -> Optional[dynamic_ragged_shape.DynamicRaggedShape]: """Given fields, rank, and dtype, create a shape.""" field_shape = None for (k, field) in fields.items(): try: next_field_shape_raw = _dynamic_ragged_shape_from_tensor( field, dtype=dtype) next_field_shape = next_field_shape_raw[:rank] field_shape = _merge_with_optional(field_shape, next_field_shape) except Exception as err: raise ValueError(f'Error in shape of {k}') from err return field_shape def _field_shape_dtype(field: _FieldValue) -> Optional[dtypes.DType]: if isinstance(field, ragged_tensor.RaggedTensor): return field._row_partition.dtype # pylint: disable=protected-access if isinstance(field, StructuredTensor): return field._ragged_shape.dtype # pylint: disable=protected-access return None def _field_with_shape_dtype(field: _FieldValue, dtype: dtypes.DType) -> _FieldValue: if isinstance(field, ragged_tensor.RaggedTensor): return field.with_row_splits_dtype(dtype) if isinstance(field, StructuredTensor): return field.with_shape_dtype(dtype) return field def _fields_with_dtype(fields: Mapping[str, _FieldValue], dtype: dtypes.DType) -> Mapping[str, _FieldValue]: return {k: _field_with_shape_dtype(v, dtype) for (k, v) in fields.items()} # pylint:disable=protected-access def _dynamic_ragged_shape_init(fields, shape, nrows, row_partitions): """Produce a DynamicRaggedShape for StructuredTensor.""" assert isinstance(fields, dict), fields assert isinstance(shape, tensor_shape.TensorShape), shape assert nrows is None or isinstance(nrows, tensor.Tensor) or isinstance( nrows, int), nrows assert row_partitions is None or isinstance(row_partitions, tuple), row_partitions rank = shape.rank if rank is None: raise TypeError("StructuredTensor's shape must have known rank.") # TODO(martinz): figure out whether to validate. dtype = _find_shape_dtype(fields, nrows, row_partitions) fields = _fields_with_dtype(fields, dtype) result = None if shape.is_fully_defined(): result = dynamic_ragged_shape.DynamicRaggedShape._from_inner_shape( shape.as_list(), dtype=dtype) if rank == 0: return dynamic_ragged_shape.DynamicRaggedShape._from_inner_shape( array_ops.zeros((0,), dtype=dtype)) result = _merge_with_optional(result, _shape_from_fields(fields, rank, dtype)) if rank == 1: alt_value = tensor_shape.dimension_value(shape[0]) if alt_value is not None: nrows = alt_value if nrows is not None: result = _merge_with_optional( result, dynamic_ragged_shape.DynamicRaggedShape._from_inner_shape( [nrows], dtype=dtype)) if result is None: raise ValueError('Must specify `nrows`, a fully specified `shape`,' + ' or have `fields` if `rank=1`') return result if row_partitions: result = _merge_with_optional( result, dynamic_ragged_shape.DynamicRaggedShape.from_row_partitions( row_partitions, dtype=dtype)) if result is None: raise ValueError('Must specify row_partitions, a fully specified shape, ' + 'or have fields if rank > 1') return result # TODO(martinz): Drop this method or rename. def StructuredTensorSpec(shape, field_specs): # pylint:disable=invalid-name """A placeholder for the old StructuredTensorSpec.""" if not isinstance(field_specs, dict): raise TypeError('field_specs must be a dictionary.') for k in field_specs.keys(): if not isinstance(k, str): raise TypeError('field_specs must be a dictionary with string keys.') for v in field_specs.values(): if not isinstance(v, type_spec.TypeSpec): raise TypeError('field_specs must be a dictionary with TypeSpec values.') shape = dynamic_ragged_shape.DynamicRaggedShape.Spec._from_tensor_shape( tensor_shape.as_shape(shape), 0, dtypes.int32) rank = shape.rank if rank is None: raise TypeError("StructuredTensor's shape must have known rank.") for (k, v) in field_specs.items(): field_shape_untruncated = _dynamic_ragged_shape_spec_from_spec(v) if field_shape_untruncated is None: raise ValueError(f'Cannot convert spec of {k}.') untruncated_rank = field_shape_untruncated.rank if (untruncated_rank is not None and untruncated_rank < rank): raise ValueError(f'Rank of field {k} is {untruncated_rank},' f' but must be at least {rank}.') field_shape = field_shape_untruncated._truncate(rank) shape = shape._merge_with(field_shape) return StructuredTensor.Spec(_ragged_shape=shape, _fields=field_specs)
PypiClean
/Slune-1.0.16.tar.gz/Slune-1.0.16/medoc.py
# Slune # Copyright (C) 2002-2003 Jean-Baptiste LAMY # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA from __future__ import generators import time, random, math import soya, soya.widget as widget, soya.opengl as soyaopengl import py2play.level as py2p_level, py2play.action as action, py2play.character as character import slune.character as slune_character import slune.controler import slune.globdef as globdef, slune.sound as sound, slune.level as slune_level class Medoc(soya.Volume): radius = 2.5 def __init__(self, parent = None): soya.Volume.__init__(self, parent, soya.Shape.get("medoc2")) def begin_round(self): for character in self.parent.characters: if isinstance(character, slune_character.Competitor) and (character.distance_to(self) < self.radius): sound.play("flag-1.wav", character) firework = soya.Smoke(character.level, nb_particles = 10, removable = 1) firework.move(self) firework.regenerate() if hasattr(character, "nb_medoc"): character.nb_medoc += 1 if character.nb_medoc >= self.nb_max: from py2play.idler import IDLER IDLER.level_completed(character, 1, _("__medoc_winner__")) else: character.nb_medoc = 1 #if character.vehicle == 2: # character.medoc = soya.Volume(character.internal, soya.Shape.get("medoc2")) self.parent.remove(self) def add_medocs(level, nb, minimap = 1): down = soya.Vector(level, 0.0, -1.0, 0.0) for i in range(nb): m = Medoc(level) m.nb_max = nb while 1: m.set_xyz(level.random.uniform(5.0, 200.0), 1000.0, level.random.uniform(5.0, 200.0)) r = level.raypick(m, down, -1, 3) if r: r[0].convert_to(level) m.y = r[0].y + 0.5 break from py2play.idler import IDLER if minimap: IDLER.no_blackbands_group.insert(0, MiniMap(level)) class MiniMap(widget.Widget): def __init__(self, level): self.level = level self.point = soya.Point() self.top = 10 self.width = 200 self.height = 200 self.left = soya.root_widget.width - self.width - 40 medocs = [x for x in level if isinstance(x, Medoc)] min_x = min_y = 10e1000 max_x = max_y = -10e1000 for medoc in medocs: if medoc.x < min_x: min_x = medoc.x if medoc.x > max_x: max_x = medoc.x if medoc.z < min_y: min_y = medoc.z if medoc.z > max_y: max_y = medoc.z f_x = self.width / (max_x - min_x) f_y = self.height / (max_y - min_y) if f_x < f_y: f = f_x left = self.left else: f = f_y left = self.left + ((max_y - min_y) - (max_x - min_x)) * f top = self.top self.f = f self.cleft = left self.ctop = top self.min_x = min_x self.min_y = min_y def resize(self, parent_left, parent_top, parent_width, parent_height): self.left = parent_width - self.width - 40 self.screen_width = parent_width self.screen_height = parent_height def render(self): soyaopengl.glColor4f(1.0, 1.0, 1.0, 1.0) soyaopengl.glBegin(soyaopengl.GL_QUADS) for medoc in [x for x in self.level if isinstance(x, Medoc)]: x = self.cleft + self.f * (medoc.x - self.min_x) y = self.ctop + self.f * (medoc.z - self.min_y) soyaopengl.glVertex2f(x - 4.0, y - 4.0) soyaopengl.glVertex2f(x - 4.0, y + 4.0) soyaopengl.glVertex2f(x + 4.0, y + 4.0) soyaopengl.glVertex2f(x + 4.0, y - 4.0) soyaopengl.glEnd() for character in self.level.characters: if isinstance(character, slune_character.Competitor): x = self.cleft + self.f * (character.x - self.min_x) y = self.ctop + self.f * (character.z - self.min_y) self.point.__init__(character, 0.0, 0.0, -25.0) self.point.convert_to(self.level) x2 = self.cleft + self.f * (self.point.x - self.min_x) y2 = self.ctop + self.f * (self.point.z - self.min_y) soya.DEFAULT_MATERIAL.activate() soyaopengl.glBegin(soyaopengl.GL_LINES) soyaopengl.glVertex2f(x , y ) soyaopengl.glVertex2f(x2, y2) soyaopengl.glEnd() material = soya.Material.get("head_" + character.perso_name + "_1") material.activate() soyaopengl.glEnable(soyaopengl.GL_BLEND) soyaopengl.glBegin(soyaopengl.GL_QUADS) soyaopengl.glTexCoord2f(0.0, 0.0); soyaopengl.glVertex2f(x - 20.0, y - 20.0) soyaopengl.glTexCoord2f(0.0, 1.0); soyaopengl.glVertex2f(x - 20.0, y + 20.0) soyaopengl.glTexCoord2f(1.0, 1.0); soyaopengl.glVertex2f(x + 20.0, y + 20.0) soyaopengl.glTexCoord2f(1.0, 0.0); soyaopengl.glVertex2f(x + 20.0, y - 20.0) soyaopengl.glEnd() soyaopengl.glDisable(soyaopengl.GL_BLEND) soya.DEFAULT_MATERIAL.activate() class MedocWaiter(soya.Volume): def __init__(self, parent, x, y, z): soya.Volume.__init__(self, parent, soya.Shape.get("girafe1")) self.set_xyz(x, y, z) self.medoc_received = 0 def begin_round(self): if not self.medoc_received: from py2play.player import CURRENT_PLAYER if CURRENT_PLAYER.character.distance_to(self) < 10.0: self.medoc_received = 1 sound.play("flag-1.wav", self) GiveMedoc(self.parent, CURRENT_PLAYER.character, self) class GiveMedoc(soya.Volume): def __init__(self, parent, character, receiver): soya.Volume.__init__(self, parent, soya.Shape.get("medoc2")) self.move (soya.Point (character, 0.0, 0.5, 0.3)) self.look_at(soya.Vector(character, 0.0, 0.0, -1.0)) self.receiver = receiver def begin_round(self): if self.distance_to(self.receiver) < 1.0: for e in self.parent: if isinstance(e, MedocWaiter) and (e.medoc_received == 0): break else: import slune.videosequence as videosequence videosequence.Mission14Outro().start( tux = videosequence.PLAYER, gnu = videosequence.add_perso("gnu" , 1), shark = videosequence.add_perso("shark", 4), ) self.parent.remove(self) def advance_time(self, proportion): if self.parent: v = self >> self.receiver v.set_length(0.2 * proportion) self += v class MedocWaiterMiniMap(widget.Widget): def __init__(self, level): self.level = level self.point = soya.Point() self.top = 10 self.width = 200 self.height = 200 self.left = soya.root_widget.width - self.width - 40 flags = [e for e in level.children if isinstance(e, MedocWaiter)] min_x = min_y = 10e1000 max_x = max_y = -10e1000 for flag in flags: if flag.x < min_x: min_x = flag.x if flag.x > max_x: max_x = flag.x if flag.z < min_y: min_y = flag.z if flag.z > max_y: max_y = flag.z f_x = self.width / (max_x - min_x) f_y = self.height / (max_y - min_y) if f_x < f_y: f = f_x left = self.left else: f = f_y left = self.left + ((max_y - min_y) - (max_x - min_x)) * f top = self.top self.f = f self.cleft = left self.ctop = top self.min_x = min_x self.min_y = min_y self.flags_pos = [] self._calllist = soyaopengl.glGenList() soyaopengl.glNewList(self._calllist) soyaopengl.glColor4f(1.0, 1.0, 1.0, 1.0) soyaopengl.glBegin(soyaopengl.GL_QUADS) for flag in flags: x = left + f * (flag.x - min_x) y = top + f * (flag.z - min_y) soyaopengl.glVertex2f(x - 4.0, y - 4.0) soyaopengl.glVertex2f(x - 4.0, y + 4.0) soyaopengl.glVertex2f(x + 4.0, y + 4.0) soyaopengl.glVertex2f(x + 4.0, y - 4.0) soyaopengl.glEnd() soyaopengl.glEndList() def resize(self, parent_left, parent_top, parent_width, parent_height): self.left = parent_width - self.width - 40 self.screen_width = parent_width self.screen_height = parent_height def render(self): soyaopengl.glCallList(self._calllist) for character in self.level.characters: if isinstance(character, slune_character.Competitor): x = self.cleft + self.f * (character.x - self.min_x) y = self.ctop + self.f * (character.z - self.min_y) self.point.__init__(character, 0.0, 0.0, -25.0) self.point.convert_to(self.level) x2 = self.cleft + self.f * (self.point.x - self.min_x) y2 = self.ctop + self.f * (self.point.z - self.min_y) soya.DEFAULT_MATERIAL.activate() soyaopengl.glBegin(soyaopengl.GL_LINES) soyaopengl.glVertex2f(x , y ) soyaopengl.glVertex2f(x2, y2) soyaopengl.glEnd() material = soya.Material.get("head_" + character.perso_name + "_1") material.activate() soyaopengl.glEnable(soyaopengl.GL_BLEND) soyaopengl.glBegin(soyaopengl.GL_QUADS) soyaopengl.glTexCoord2f(0.0, 0.0); soyaopengl.glVertex2f(x - 20.0, y - 20.0) soyaopengl.glTexCoord2f(0.0, 1.0); soyaopengl.glVertex2f(x - 20.0, y + 20.0) soyaopengl.glTexCoord2f(1.0, 1.0); soyaopengl.glVertex2f(x + 20.0, y + 20.0) soyaopengl.glTexCoord2f(1.0, 0.0); soyaopengl.glVertex2f(x + 20.0, y - 20.0) soyaopengl.glEnd() soyaopengl.glDisable(soyaopengl.GL_BLEND) soya.DEFAULT_MATERIAL.activate()
PypiClean
/gvec_to_python-1.1.3-py3-none-any.whl/gvec_to_python/hylife/utilities_FEEC/bsplines_kernels.py
# from pyccel.decorators import types from numpy import empty, zeros # ============================================================================== def scaling(t_d: 'float[:]', p_d: int, span_d: int, values: 'float[:]'): """ Scales local B-spline values to M-spline values Parameters ---------- knots : array_like Knots sequence. degree : int Polynomial degree of B-splines. span : int Knot span index. Returns ------- x : array_like Scaling vector with elements (p + 1)/(t[i + p + 1] - t[i]) """ for il in range(p_d + 1): i = span_d - il values[p_d - il] *= (p_d + 1)/(t_d[i + p_d + 1] - t_d[i]) # ============================================================================== def find_span(t: 'float[:]', p: int, eta: float): # Knot index at left/right boundary low = p high = 0 high = len(t) - 1 - p # Check if point is exactly on left/right boundary, or outside domain if eta <= t[low]: returnVal = low elif eta >= t[high]: returnVal = high - 1 else: # Perform binary search span = (low + high)//2 while eta < t[span] or eta >= t[span + 1]: if eta < t[span]: high = span else: low = span span = (low + high)//2 returnVal = span return returnVal # ============================================================================= def basis_funs(t: 'float[:]', p: int, eta: float, span: int, left: 'float[:]', right: 'float[:]', values: 'float[:]'): """ Parameters ---------- t : array_like Knots sequence. p : int Polynomial degree of B-splines. eta : double Evaluation point. span : int Knot span index. Returns ------- values : numpy.ndarray Values of p + 1 non-vanishing B-Splines at location eta. """ left[:] = 0. right[:] = 0. values[0] = 1. for j in range(p): left[j] = eta - t[span - j] right[j] = t[span + 1 + j] - eta saved = 0. for r in range(j + 1): temp = values[r]/(right[r] + left[j - r]) values[r] = saved + right[r] * temp saved = left[j - r] * temp values[j + 1] = saved # ============================================================================= def basis_funs_all(t: 'float[:]', p: int, eta: float, span: int, left: 'float[:]', right: 'float[:]', values: 'float[:, :]', diff: 'float[:]'): """ Parameters ---------- t : array_like Knots sequence. p : int Polynomial degree of B-splines. eta : double Evaluation point. span : int Knot span index. Returns ------- values : numpy.ndarray Values of (p + 1, p + 1) non-vanishing B-Splines at location eta. diff : np.ndarray Scaling array (p) for M-splines. """ left[:] = 0. right[:] = 0. values[:, :] = 0. values[0, 0] = 1. for j in range(p): left[j] = eta - t[span - j] right[j] = t[span + 1 + j] - eta saved = 0. for r in range(j + 1): diff[r] = 1. / (right[r] + left[j - r]) temp = values[j, r] * diff[r] values[j + 1, r] = saved + right[r] * temp saved = left[j - r] * temp values[j + 1, j + 1] = saved diff[:] = diff*p # ============================================================================= def basis_funs_and_der(t: 'float[:]', p: int, eta: float, span: int, left: 'float[:]', right: 'float[:]', values: 'float[:, :]', diff: 'float[:]', der: 'float[:]'): """ Parameters ---------- t : array_like Knots sequence. p : int Polynomial degree of B-splines. eta : double Evaluation point. span : int Knot span index. left : array_like p left values right : array_like p right values values_all : array_like Returns ------- values : numpy.ndarray Values of (2, p + 1) non-vanishing B-Splines and derivatives at location eta. """ left[:] = 0. right[:] = 0. values[:, :] = 0. values[0, 0] = 1. for j in range(p): left[j] = eta - t[span - j] right[j] = t[span + 1 + j] - eta saved = 0. for r in range(j + 1): diff[r] = 1. / (right[r] + left[j - r]) temp = values[j, r] * diff[r] values[j + 1, r] = saved + right[r] * temp saved = left[j - r] * temp values[j + 1, j + 1] = saved diff[:] = diff*p # compute derivatives # j = 0 saved = values[p - 1, 0]*diff[0] der[0] = -saved # j = 1, ... , p for j in range(1, p): temp = saved saved = values[p - 1, j]*diff[j] der[j] = temp - saved # j = p der[p] = saved # ============================================================================== def basis_funs_1st_der(t: 'float[:]', p: int, eta: float, span: int, left: 'float[:]', right: 'float[:]', values: 'float[:]'): """ Parameters ---------- t : array_like Knots sequence. p : int Polynomial degree of B-splines. eta : double Evaluation point. span : int Knot span index. Returns ------- values : numpy.ndarray Derivatives of p + 1 non-vanishing B-Splines at location eta. """ # Compute nonzero basis functions and knot differences for splines up to degree p - 1 values_b = empty(p + 1, dtype=float) basis_funs(t, p - 1, eta, span, left, right, values_b) # Compute derivatives at x using formula based on difference of splines of degree p - 1 # ------- # j = 0 saved = p * values_b[0] / (t[span + 1] - t[span + 1 - p]) values[0] = -saved # j = 1, ... , p - 1 for j in range(1, p): temp = saved saved = p * values_b[j] / (t[span + j + 1] - t[span + j + 1 - p]) values[j] = temp - saved # j = degree values[p] = saved # ============================================================================== def basis_funs_all_ders(knots: 'float[:]', degree: int, eta: float, span: int, left: 'float[:]', right: 'float[:]', n: int, ders: 'float[:, :]'): """ Evaluate value and n derivatives at eta of all basis functions with support in interval [x_{span-1}, x_{span}]. ders[i,j] = (d/deta)^i B_k(eta) with k=(span-degree+j), for 0 <= i <= n and 0 <= j <= degree+1. Parameters ---------- knots : array_like Knots sequence. degree : int Polynomial degree of B-splines. eta : float Evaluation point. span : int Knot span index. n : int Max derivative of interest. Results ------- ders : numpy.ndarray (n+1,degree+1) 2D array of n+1 (from 0-th to n-th) derivatives at eta of all (degree+1) non-vanishing basis functions in given span. Notes ----- The original Algorithm A2.3 in The NURBS Book [1] is here improved: - 'left' and 'right' arrays are 1 element shorter; - inverse of knot differences are saved to avoid unnecessary divisions; - innermost loops are replaced with vector operations on slices. """ #left = empty(degree) #right = empty(degree) ndu = empty((degree+1, degree+1)) a = empty((2, degree+1)) #ders = zeros((n+1, degree+1)) # output array # Number of derivatives that need to be effectively computed # Derivatives higher than degree are = 0. ne = min(n, degree) # Compute nonzero basis functions and knot differences for splines # up to degree, which are needed to compute derivatives. # Store values in 2D temporary array 'ndu' (square matrix). ndu[0, 0] = 1.0 for j in range(0, degree): left[j] = eta - knots[span-j] right[j] = knots[span+1+j] - eta saved = 0.0 for r in range(0, j+1): # compute inverse of knot differences and save them into lower triangular part of ndu ndu[j+1, r] = 1.0 / (right[r] + left[j-r]) # compute basis functions and save them into upper triangular part of ndu temp = ndu[r, j] * ndu[j+1, r] ndu[r, j+1] = saved + right[r] * temp saved = left[j-r] * temp ndu[j+1, j+1] = saved # Compute derivatives in 2D output array 'ders' ders[0, :] = ndu[:, degree] for r in range(0, degree+1): s1 = 0 s2 = 1 a[0, 0] = 1.0 for k in range(1, ne+1): d = 0.0 rk = r-k pk = degree-k if r >= k: a[s2, 0] = a[s1, 0] * ndu[pk+1, rk] d = a[s2, 0] * ndu[rk, pk] j1 = 1 if (rk > -1) else -rk j2 = k-1 if (r-1 <= pk) else degree-r a[s2, j1:j2+1] = (a[s1, j1:j2+1] - a[s1, j1-1:j2] ) * ndu[pk+1, rk+j1:rk+j2+1] for l in range(j2 + 1 - j1): d += a[s2, j1 + l] * ndu[rk + j1 + l, pk] #d += dot(a[s2, j1:j2+1], ndu[rk+j1:rk+j2+1, pk]) if r <= pk: a[s2, k] = - a[s1, k-1] * ndu[pk+1, r] d += a[s2, k] * ndu[r, pk] ders[k, r] = d j = s1 s1 = s2 s2 = j # Multiply derivatives by correct factors r = degree for k in range(1, ne+1): ders[k, :] = ders[k, :] * r r = r * (degree-k)
PypiClean
/django-districts-2023.2.tar.gz/django-districts-2023.2/districts/migrations/0001_initial.py
from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='County', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created_at', models.DateTimeField(auto_now_add=True)), ('county', models.CharField(max_length=50)), ('updated_at', models.DateTimeField(auto_now=True)), ], options={ 'verbose_name': 'County/Municipality', 'verbose_name_plural': 'County/Municipality', }, ), migrations.CreateModel( name='Regions', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created_at', models.DateTimeField(auto_now_add=True)), ('region', models.CharField(max_length=20)), ('updated_at', models.DateTimeField(auto_now=True)), ], options={ 'verbose_name': 'Region', 'verbose_name_plural': 'Region', }, ), migrations.CreateModel( name='SubCounty', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created_at', models.DateTimeField(auto_now_add=True)), ('sub_county', models.CharField(max_length=50)), ('updated_at', models.DateTimeField(auto_now=True)), ('county', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='districts.county')), ], options={ 'verbose_name': 'Sub-County/Town Council/Division', 'verbose_name_plural': 'Sub-County/Town Council/Division', }, ), migrations.CreateModel( name='Parish', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created_at', models.DateTimeField(auto_now_add=True)), ('parish', models.CharField(max_length=50)), ('updated_at', models.DateTimeField(auto_now=True)), ('sub_county', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='districts.subcounty')), ], options={ 'verbose_name': 'Parish/Ward', 'verbose_name_plural': 'Parish/Ward', }, ), migrations.CreateModel( name='Location', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created_at', models.DateTimeField(auto_now_add=True)), ('latitude', models.FloatField()), ('longitude', models.FloatField()), ('updated_at', models.DateTimeField(auto_now=True)), ('parish', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='districts.parish')), ], options={ 'verbose_name': 'Location', 'verbose_name_plural': 'Location', }, ), migrations.CreateModel( name='Districts', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created_at', models.DateTimeField(auto_now_add=True)), ('district', models.CharField(max_length=50)), ('population', models.IntegerField()), ('updated_at', models.DateTimeField(auto_now=True)), ('region', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='districts.regions')), ], options={ 'verbose_name': 'District/City', 'verbose_name_plural': 'District/City', }, ), migrations.AddField( model_name='county', name='district', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='districts.districts'), ), ]
PypiClean
/cic-helper-0.2.4.tar.gz/cic-helper-0.2.4/cic_helper/cli.py
import logging import pathlib import subprocess import sys import click from cic_helper.constants import ( DEFAULT_CHAIN_SPEC, DEFAULT_GAS_LIMIT, DEFAULT_RPC_PROVIDER, ) from cic_helper.Person import load_people_from_csv, save_people_to_csv log = logging.getLogger(__name__) log_format = "%(message)s" def set_log_level(level: int = 1): # ERROR, WARN, INFO, DEBUG if level == 1: logging.basicConfig(format=log_format, level=logging.INFO) else: logging.basicConfig(format=log_format, level=logging.DEBUG) @click.group() def cli(): pass @cli.command() @click.argument("filename", type=click.Path(exists=True)) @click.option("-v", "--verbose", count=True, help="Verbosity Level (-v,-vv)") @click.option( "--fee-limit", nargs=1, type=str, default="800000", show_default=True, help="Fee limit for each tx", ) @click.option("-t", "--token", type=str, nargs=1, default=False, help="Token Address") @click.option( "-p", "--rpc_provider", type=str, nargs=1, default=DEFAULT_RPC_PROVIDER, show_default=True, help="RPC Provider", ) @click.option( "-i", "--chain_spec", type=str, nargs=1, default=DEFAULT_CHAIN_SPEC, show_default=True, help="Chain Spec", ) def get_balances(filename, verbose, fee_limit, token, rpc_provider, chain_spec): set_log_level(verbose) people = load_people_from_csv(filename) for person in people: err = person.verify(user_address=True) if err: raise Exception(err) for person in people: person.get_balance(token, chain_spec, rpc_provider, fee_limit) save_people_to_csv(filename, people) @cli.command() @click.argument("filename", type=click.Path(exists=True)) @click.option("-v", "--verbose", count=True, help="Verbosity Level (-v,-vv)") def verify_amount(filename, verbose): set_log_level(verbose) people = load_people_from_csv(filename) for person in people: err = person.verify(user_address=True, balance=True, contract_address=True) if err: log.error(err) @cli.command() @click.option("-c", "--config", type=str, help="Path to Kitabu Config Folder") def run(config): base = pathlib.Path(__file__).parent.resolve() kitabu_path = config or base.joinpath("kitabu") result = subprocess.run(["bash", "run.sh"], stdout=subprocess.PIPE, cwd=kitabu_path) log.info(result) @cli.command() @click.argument("filename", type=click.Path(exists=True)) @click.option("-v", "--verbose", count=True, help="Verbosity Level (-v,-vv)") def get_addresses(filename, verbose): set_log_level(verbose) people = load_people_from_csv(filename) log.info(f"Fetching Address for {len(people)} People") for idx, person in enumerate(people): log.info(f"[{idx}/{len(people)}] Fetching address for: {person.phone_number}") person.get_address() if person.user_address is None: log.error(f"Failed to get address for {person.phone_number}, so skipping") log.info(f"Saving to {filename}") save_people_to_csv(filename, people) log.info(f"Saved to {filename}") @cli.command() @click.argument("filename", type=click.Path(exists=True)) @click.argument("contract_address", type=str) @click.option( "--fee-limit", nargs=1, type=str, default=str(DEFAULT_GAS_LIMIT), show_default=True, help="Fee limit for each tx", ) @click.option("-v", "--verbose", count=True, help="Verbosity Level (-v,-vv)") @click.option( "-p", "--rpc_provider", type=str, nargs=1, show_default=True, default=DEFAULT_RPC_PROVIDER, help="RPC Provider", ) @click.option( "-i", "--chain_spec", type=str, nargs=1, show_default=True, default=DEFAULT_CHAIN_SPEC, help="Chain Spec", ) @click.option( "-y", "--signer", type=str, required=True, help='Signer Keyfile Location (e.g "/home/sarafu//wor-deployer-wallet-keyfile")', ) def send( filename, contract_address, fee_limit, rpc_provider, chain_spec, signer, verbose ): set_log_level(verbose) people = load_people_from_csv(filename) errors = [] for person in people: person.contract_address = contract_address err = person.verify(user_address=True, contract_address=True) if err: errors.append(err) if len(errors) > 0: log.error(errors) sys.exit(1) save_people_to_csv(filename=filename, people=people) for person in people: person.send( contract_address, signer, chain_spec, rpc_provider, fee_limit=fee_limit ) def print_help_msg(command): with click.Context(command) as ctx: click.echo(command.get_help(ctx)) if __name__ == "__main__": cli()
PypiClean
/alipay_sdk_python-3.6.740-py3-none-any.whl/alipay/aop/api/request/AlipayMerchantIndirectSharetokenCreateRequest.py
import json from alipay.aop.api.FileItem import FileItem from alipay.aop.api.constant.ParamConstants import * from alipay.aop.api.domain.AlipayMerchantIndirectSharetokenCreateModel import AlipayMerchantIndirectSharetokenCreateModel class AlipayMerchantIndirectSharetokenCreateRequest(object): def __init__(self, biz_model=None): self._biz_model = biz_model self._biz_content = None self._version = "1.0" self._terminal_type = None self._terminal_info = None self._prod_code = None self._notify_url = None self._return_url = None self._udf_params = None self._need_encrypt = False @property def biz_model(self): return self._biz_model @biz_model.setter def biz_model(self, value): self._biz_model = value @property def biz_content(self): return self._biz_content @biz_content.setter def biz_content(self, value): if isinstance(value, AlipayMerchantIndirectSharetokenCreateModel): self._biz_content = value else: self._biz_content = AlipayMerchantIndirectSharetokenCreateModel.from_alipay_dict(value) @property def version(self): return self._version @version.setter def version(self, value): self._version = value @property def terminal_type(self): return self._terminal_type @terminal_type.setter def terminal_type(self, value): self._terminal_type = value @property def terminal_info(self): return self._terminal_info @terminal_info.setter def terminal_info(self, value): self._terminal_info = value @property def prod_code(self): return self._prod_code @prod_code.setter def prod_code(self, value): self._prod_code = value @property def notify_url(self): return self._notify_url @notify_url.setter def notify_url(self, value): self._notify_url = value @property def return_url(self): return self._return_url @return_url.setter def return_url(self, value): self._return_url = value @property def udf_params(self): return self._udf_params @udf_params.setter def udf_params(self, value): if not isinstance(value, dict): return self._udf_params = value @property def need_encrypt(self): return self._need_encrypt @need_encrypt.setter def need_encrypt(self, value): self._need_encrypt = value def add_other_text_param(self, key, value): if not self.udf_params: self.udf_params = dict() self.udf_params[key] = value def get_params(self): params = dict() params[P_METHOD] = 'alipay.merchant.indirect.sharetoken.create' params[P_VERSION] = self.version if self.biz_model: params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':')) if self.biz_content: if hasattr(self.biz_content, 'to_alipay_dict'): params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':')) else: params['biz_content'] = self.biz_content if self.terminal_type: params['terminal_type'] = self.terminal_type if self.terminal_info: params['terminal_info'] = self.terminal_info if self.prod_code: params['prod_code'] = self.prod_code if self.notify_url: params['notify_url'] = self.notify_url if self.return_url: params['return_url'] = self.return_url if self.udf_params: params.update(self.udf_params) return params def get_multipart_params(self): multipart_params = dict() return multipart_params
PypiClean
/Aksarantara-1.1.0-py3-none-any.whl/aksarantara/post_processing.py
import Map as GM, post_options from Roman import Avestan from Core import ( Ahom, Tamil, Malayalam, Gurmukhi, Oriya, Saurashtra, Sinhala, Urdu, Devanagari, Chakma, Limbu, Takri, TamilExtended, ) from East import Tibetan, Thai, PhagsPa, ZanabazarSquare, Burmese, KhamtiShan import fixer as CF import re import functools def default(Strng, langage=""): Strng = ( Strng.replace("\uF001", "") .replace("\u05CC", "") .replace("ʻʻ", "") .replace("\u05CD", "") ) return Strng def defaultPost(Strng): Strng = Strng.replace("\u034F", "") return Strng def AnusvaraAsN(Strng): Strng = Strng.replace("m\u034F", "n") return Strng def ShowSchwaHindi(Strng): import pre_processing as PreP Strng = PreP.RemoveSchwaHindi(Strng, True) return Strng def KannadaSpacingCandrabindu(Strng): Strng = Strng.replace("\u0C81", "\u0C80") return Strng def KannadaNotRepha(Strng): ListC = "(" + "|".join(GM.CrunchSymbols(GM.Consonants, "Kannada")) + ")" Strng = re.sub("ರ್(?=" + ListC + ")", "ರ‍್", Strng) return Strng def KannadaNakaraPollu(Strng): ListC = "(" + "|".join(GM.CrunchSymbols(GM.Consonants, "Kannada")) + ")" Strng = re.sub("ನ್(?!" + ListC + ")", "\u0CDD", Strng) return Strng def TeluguRemoveNukta(Strng): Strng = Strng.replace("\u0C3C", "") return Strng def TeluguRemoveAeAo(Strng): Strng = Strng.replace("\u0952\u200B", "") return Strng def TeluguNakaraPollu(Strng): ListC = "(" + "|".join(GM.CrunchSymbols(GM.Consonants, "Telugu")) + ")" Strng = re.sub("న్(?!" + ListC + ")", "\u0C5D", Strng) return Strng def syriacVowelsBelow(Strng): return Strng def syriacWesternOToU(Strng): return Strng def olddogra(Strng): return Strng def ISO259Target(Strng): replacements = [ ("b", "ḃ"), ("p", "ṗ"), ("k", "k̇"), ("ḵ", "k"), ("v", "b"), ("f", "p"), ("꞉", "\u0307"), ("š̪", "ś"), ("š̮", "š"), ("š", "s̀"), ("ā", "å"), ("e", "ȩ"), ("ō", "ŵ"), ("ū", "ẇ"), ("\u033D", "°"), ("ĕ", "ḝ"), ] for x, y in replacements: Strng = Strng.replace(x, y) Strng = Strng.replace("\u00B0\u0307", "\u0307\u00B0") return Strng def HebrewSBLTarget(Strng): replacements = [ ("v", "ḇ"), ("f", "p̄"), ("d", "ḏ"), ("ḏ꞉", "d"), ("g", "ḡ"), ("ḡ꞉", "g"), ("t", "ṯ"), ("ṯ꞉", "t"), ("š̪", "ś"), ("š̮", "š"), ("ō", "ô"), ("o", "ō"), ("ū", "û"), ("\u033D", "ĕ"), ] for x, y in replacements: Strng = Strng.replace(x, y) Strng = Strng.replace("ĕ꞉", "꞉ĕ") if "\u05CE" in Strng: Strng = ( Strng.replace("ḏ", "d") .replace("ṯ", "t") .replace("ḡ", "g") .replace("\u05CE", "") ) Strng = Strng.replace("\u00B0\u0307", "\u0307\u00B0") return Strng def removetddash(Strng): Strng += "\u05CE" Strng = Strng.replace("d", "d꞉").replace("t", "t꞉").replace("g", "g꞉") Strng = ( Strng.replace("ḏ", "d") .replace("ṯ", "t") .replace("ḡ", "g") .replace("\u05CE", "") ) return Strng def ISO259Source(Strng): return Strng def ISO233Source(Strng): return Strng def HebrewSBLSource(Strng): return Strng def PersianDMGSBLSource(Strng): return Strng def ISO233Target(Strng): replacements = [ ("j", "ǧ"), ("g", "ǧ"), ("ḧ", "ẗ"), ("ḫ", "ẖ"), ("a̮", "ỳ"), ("ˀ", "ˈ"), ("aⁿ", "á"), ("iⁿ", "í"), ("uⁿ", "ú"), ("ā̂", "ʾâ"), ("\u033D", ""), ] for x, y in replacements: Strng = Strng.replace(x, y) return Strng def inherentAO(Strng): Strng = Strng.replace("a", "ô") return Strng def BengaliOldRA(Strng): Strng = Strng.replace("র", "ৰ") return Strng def PersianDMGTarget(Strng): replacements = [ ("ḏ", "ẕ"), ("ḍ", "ż"), ("ṯ", "s̱"), ("j", "ǧ"), ("ˀ", "ʼ"), ("ʔ", "ʼ"), ("ȳ", "ye"), ("ā̂", "ā"), ("\u033D", ""), ] for x, y in replacements: Strng = Strng.replace(x, y) return Strng def arabizeLatn(Strng, target="semitic"): cons = "(" + "|".join(GM.SemiticConsonants) + ")" Strng = re.sub(cons + "(ʾ)", r"\1" + "ā", Strng) if target == "indic": Strng = Strng.replace("ʾā", "ā̂") if target != "indic": Strng = re.sub("ʾ", "a", Strng) else: Strng = re.sub("ʾ", "â", Strng) Strng = re.sub("(a̮|ā̮)", "ā", Strng) Strng = re.sub("ˀ?ā̮̂", "ʼā", Strng) if target != "indic": Strng = re.sub("[ˀʔ]", "ʼ", Strng) else: Strng = re.sub("[ˀ]", "", Strng) if target != "indic": Strng = LatnInitialVowels(Strng, "ʾ") if target != "indic": Strng = re.sub("ʼʾ", "ʼ", Strng) if target != "indic": Strng = re.sub("\u033d", "", Strng) Strng = re.sub("(ā)([iau])(ⁿ)", r"\2\3", Strng) return Strng def BengaliSwitchYaYYa(Strng): Strng = re.sub("(?<!\u09CD)য", "@#$", Strng) Strng = re.sub("য়", "য", Strng) Strng = Strng.replace("@#$", "য়") return Strng def AlephMaterLectionis(Strng, target="semitic"): cons = "(" + "|".join(GM.SemiticConsonants) + ")" Strng = re.sub(cons + "(ʾ)", r"\1" + "ā", Strng) return Strng def urduizeLatn(Strng, target="semitic"): cons = "(" + "|".join(GM.SemiticConsonants) + ")" Strng = re.sub(cons + "(ʾ)", r"\1" + "ā", Strng) if target == "indic": Strng = Strng.replace("ʾā", "ā̂") Strng = re.sub("ʾ", "â", Strng) Strng = re.sub("[ˀʔ]", "ʾ", Strng) Strng = re.sub("(a̮|ā̮)", "ā", Strng) Strng = re.sub("ˀ?ā̮̂", "ʼā", Strng) if target != "indic": Strng = re.sub("\u033d", "", Strng) if target != "indic": Strng = LatnInitialVowels(Strng) Strng = re.sub("(ā)([iau])(ⁿ)", r"\2\3", Strng) return Strng def syricizeLatn(Strng, target="semitic"): cons = "(" + "|".join(GM.SemiticConsonants) + ")" if target != "indic": Strng = re.sub("â", "ʾa", Strng) Strng = re.sub("ā̂", "ʾā", Strng) Strng = re.sub("ê", "ʾe", Strng) Strng = re.sub("ē̂", "ʾē", Strng) if target != "indic": Strng = LatnInitialVowels(Strng) return Strng def hebraizeLatn(Strng, target="semitic"): if target != "indic": Strng = LatnInitialVowels(Strng, "ʾ") return Strng def syriacRoman(Strng): Strng = ( Strng.replace("v", "ḇ").replace("ġ", "ḡ").replace("ḫ", "ḵ").replace("f", "p̄") ) return Strng def alephAyinLatnAlternate(Strng): Strng = Strng.replace("ʾ", "ʼ").replace("ʿ", "ʽ") return Strng def alephAyinLatnAlternate2(Strng): Strng = Strng.replace("ʾ", "ʔ").replace("ʿ", "ʕ") return Strng def ArabRemoveAdditions(Strng): Strng = Strng.replace("ڨ", "ج").replace("ڤ", "ف").replace("پ", "ف") return Strng def arabicRemoveAdditionsPhonetic(Strng): Strng = Strng.replace("ڨ", "غ").replace("ڤ", "ف").replace("پ", "ب") return Strng def removeSemiticLetters(Strng): Strng = ( Strng.replace("ṭ", "t") .replace("ḥ", "h") .replace("ḍ", "z") .replace("ḏ", "z") .replace("ẓ", "z") .replace("w", "v") .replace("ʿ", "ʾ") .replace("ṣ", "s") ) return Strng def removeNikkud(Strng): nikkuds = [ "\u05B7", "\u05B8", "\u05B4", "\u05B4י", "\u05BB", "\u05C2", "\u05C1", "\u05B6", "\u05B5", "\u05B9", "וֹ", "\u05B1", "\u05B2", "\u05B3", "\u05BC", "\u05B0", "\u05C7", ] for nikkud in nikkuds: Strng = Strng.replace(nikkud, "") return Strng def LatnInitialVowels(Strng, initLetter=""): initVow = "â ā̂ î ī̂ û ū̂ ê ē̂ âŷ ô ō̂ âŵ".split(" ") nonInitVow = "a ā i ī u ū e ē aŷ o ō aŵ".split(" ") for x, y in zip(initVow, nonInitVow): Strng = Strng.replace(x, initLetter + y) Strng = Strng.replace("\u0302", "") Strng = re.sub("\u033d", "", Strng) return Strng def removeMajliyana(Strng): Strng = Strng.replace("\u0330", "") return Strng def removeRukkaka(Strng): Strng = Strng.replace("\u0741", "") return Strng def removeQussaya(Strng): Strng = Strng.replace("\u0742", "") return Strng def removeVowelsSyriac(Strng): Strng = re.sub("[\u0732\u0735\u073C\u0738\u0739\u073F]", "", Strng) Strng = re.sub("[ّܰܶܺܽܳ]", "", Strng) return Strng def removeDiacriticsArabic(Strng): diacrtics = ["\u0652", "\u064E", "\u0650", "\u064F"] for diacritic in diacrtics: Strng = Strng.replace(diacritic, "") return Strng def removeSukunEnd(Strng): Strng = re.sub("(\u0652)(\W|$)", r"\2", Strng) return Strng def persianPaGaFaJa(Strng): Strng = Strng.replace("پ", "ف").replace("گ", "ج") return Strng def removeDiacriticsPersian(Strng): return Strng def removeDiacriticsSyriac(Strng): return Strng def useKtivMale(Strng): return Strng def PhoneticMapping(Strng): return Strng def ArabicGimelGaGha(Strng): Strng = Strng.replace("ج", "غ") return Strng def ArabicGimelPaBa(Strng): Strng = Strng.replace("ف", "ب") return Strng def IASTLOCBurmeseSource(Strng): Strng = Strng.replace("ʻ", "") yrvh = ( Burmese.ConsonantMap[25:27] + Burmese.ConsonantMap[28:29] + Burmese.ConsonantMap[32:33] ) yrvhPat = "".join(yrvh) Strng = re.sub(f"(\u103A)(\u1039)([{yrvhPat}])", r"\2\3", Strng) virsub = "\u1039" yrvhsub = ["\u103B", "\u103C", "\u103D", "\u103E"] for x, y in zip(yrvh, yrvhsub): Strng = Strng.replace(virsub + x, y) vowDep = "အော် အော အိ အီ အု အူ အေ".split(" ") vowIndep = "ဪ ဩ ဣ ဤ ဥ ဦ ဧ".split(" ") Strng = Strng.replace("ʼ", "’") for x, y in zip(vowDep, vowIndep): Strng = Strng.replace("’" + y, x) Strng = Strng.replace("\u102Fဣ", "\u102D\u102F") Strng = Strng.replace("’အ", "အ") Strng = Strng.replace("့်", "့်") return Strng def removeSegmentSpacesBurmese(Strng): import regex Strng = regex.sub("(\p{L}\p{M}*) (\p{L})", r"\1\2", Strng) Strng = regex.sub("(\p{L}\p{M}*) (\p{L})", r"\1\2", Strng) return Strng def IASTLOCBurmeseTarget(Strng): Strng = Strng.replace("˳", "ʹ") Strng = Strng.replace("auʻ", "oʻ") Strng = Strng.replace("ḥ", "ʺ") chars_misc = {"e*": "၏", "n*": "၌", "r*": "၍", "l*": "၎"} for lat, bur in chars_misc.items(): Strng = Strng.replace(bur, lat) return Strng def insertARomanSemitic(Strng): Strng = Strng.replace("\u02BD", "") consonantsAll = ( "(" + "|".join(sorted(GM.SemiticConsonants, key=len, reverse=True)) + ")" ) vowelsAll = "(" + "|".join(GM.SemiticVowels) + ")" Strng = re.sub(consonantsAll + "(?![꞉ʰ])(?!" + vowelsAll + ")", r"\1" + "a", Strng) Strng = re.sub("(꞉)(?!ʰ)(?!" + vowelsAll + ")", r"\1" + "a", Strng) return Strng def FixSemiticRoman(Strng, Target): vir = "\u033D" Strng = re.sub("ō̂̄̂", "ō̂", Strng) if "Arab" in Target: consonantsAll = ( "(" + "|".join( sorted( GM.CrunchSymbols(GM.Consonants, "RomanSemitic"), key=len, reverse=True, ) ) + ")" ) Strng = re.sub(consonantsAll + vir + r"\1", r"\1" + "꞉", Strng) Strng = re.sub("âQ", "ʿ", Strng) Strng = re.sub("aQ", "Qa", Strng) SemiticIndic = [ ("ʾQā", "ā̂Q"), ("ʾQi", "îQ"), ("ʾQī", "ī̂Q"), ("ʾQu", "ûQ"), ("ʾQū", "ū̂Q"), ("ʾQe", "êQ"), ("ʾQē", "ē̂Q"), ("ʾQo", "ôQ"), ("ʾQō", "ō̂Q"), ("ṣ", "sQ"), ("ʿ", "ʾQ"), ("ṭ", "tQ"), ("ḥ", "hQ"), ("ḍ", "dQ"), ("p̣", "pQ"), ("ž", "šQ"), ("ž", "zQ"), ("ẓ", "jʰQ"), ("ḏ", "dʰQ"), ("ṯ", "tʰQ"), ("w", "vQ"), ] for s, i in SemiticIndic: Strng = Strng.replace(i, s) Strng = Strng.replace("\u033d\u033d", "\u033d") return Strng def ArabAtoAleph(Strng): Strng = Strng.replace("أ", "ا") return Strng def estrangelasyriac(Strng): return Strng def easternsyriac(Strng): return Strng def westernsyriac(Strng): return Strng def kawitan(Strng): return Strng def sundapura(Strng): return Strng def readableItrans(Strng): pairsReadable = [ ("R^i", "RRi"), ("R^I", "RRii"), ("", ""), ("", ""), ("A", "aa"), ("I", "ii"), ("U", "uu"), ("Ch", "chh"), ("kSh", "x"), ("M", ".m"), ] for x, y in pairsReadable: Strng = Strng.replace(x, y) return Strng def NasalTilde(Strng): Strng = re.sub("(m̐|ṃ|ṁ)", "\u0303", Strng) return Strng def verticalKana(Strng): return Strng def verticalSiddham(Strng): return Strng def vtobJapanese(txt): return txt def SogdReshAyin(Strng): Strng = Strng.replace("𐼽", "𐽀") return Strng def SogoReshAyinDaleth(Strng): Strng = Strng.replace("𐼓", "𐼘") return Strng def arabPaFa(Strng): return Strng.replace("پ", "ف") def arabChaSa(Strng): return Strng.replace("چ", "س") def gainGimel(Strng): return Strng.replace("עׄ", "ג") def tavTwodot(Strng): return Strng.replace("ת", "ת̈") def tavThreedot(Strng): return Strng.replace("תׄ", "ת֒") def gainGimel(Strng): return Strng.replace("ק", "ק̈") def tokushuon(txt): txt = txt.replace("si", "suxi").replace("zi", "zuxi") txt = txt.replace("yi", "ixi") txt = txt.replace("fy", "fux") txt = txt.replace("nye", "nixe") txt = re.sub("(?<![sc])hu", "hoxu", txt) txt = re.sub("(?<![sc])hye", "hixe", txt) return txt def JapanesePostProcess(src, tgt, txt, nativize, postoptions): from Other import kana2roman import pykakasi import convert txt = convert.convertScript(txt, src, "Telugu") txt = txt.replace("ˆ", "") txt = convert.convertScript(txt.lower(), "ISO", "Inter") txt = convert.convertScript(txt, "Telugu", "RomanKana") txt = re.sub("([aiueo])" + r"\1" + "H", r"\1" + r"\1" + "h" + r"\1", txt) txt = re.sub("([aiueo])H", r"\1" + "h" + r"\1", txt) txt = ( txt.replace("Gk", "nk") .replace("Gg", "ng") .replace("Jc", "nc") .replace("Jj", "nj") .replace("mb", "nb") .replace("mp", "np") ) txt = ( txt.replace("nn", "nnn") .replace("c", "ch") .replace("chch", "cch") .replace("shsh", "ssh") .replace("mm", "nm") ) txt = txt.replace(",", "、").replace("\uEA01", "。").replace("\uEA02", "。。") txt = txt.replace("JJ", "nnny") txt = txt.replace("J", "ny") if "vtobJapanese" in postoptions: txt = txt.replace("v", "b") txt = ( txt.replace("tr", "tor") .replace("dr", "dor") .replace("Dr", "dor") .replace("Tr", "tor") ) txt = ( txt.replace("tya", "tiya") .replace("dya", "diya") .replace("sya", "suya") .replace("shya", "shuya") .replace("chya", "chuya") ) txt = txt.replace("di", "dexi").replace("du", "doxu") txt = txt.replace("ti", "texi").replace("tu", "toxu") txt = txt.replace("mye", "mixe").replace("pye", "pixe").replace("bye", "bixe") txt = txt.replace("ye", "ixe") txt = txt.replace("vye", "vuxixe").replace("vy", "vuxy") txt = txt.replace("she", "shixe") if not nativize: txt = re.sub("(r)(r\u309A)", "rur\u309A", txt) txt = re.sub("(r\u309A)(r\u309A)", "rr" + "\u309A", txt) txt = re.sub("(k\u309A)(k\u309A)", "kk" + "\u309A", txt) txt = re.sub("([rk])(\u309A)([aieou])", r"\1\3\2", txt) txt = tokushuon(txt) else: txt = ( txt.replace("r\u309A", "r") .replace("k\u309Ak" + "\u309A", "ng") .replace("k\u309A", "ng") ) txt = txt.replace("yi", "i").replace("ye", "e").replace("wu", "u") txt = txt.replace("wo", "uxo") txt = txt.replace("she", "shie") if tgt == "Hiragana": txt = kana2roman.to_hiragana(txt) txt = re.sub( "(k|g|ch|j|p|b|m|y|r|w|sh|s|h|z|f)" + "(" + r"\1" + ")", r"\1" + "u", txt ) txt = re.sub("(d|t)" + "(" + r"\1" + ")", r"\1" + "o", txt) if not nativize: txt = tokushuon(txt) txt = kana2roman.to_hiragana(txt) txt = re.sub("(k|g|ch|j|p|b|m|y|r|sh|s|h|z|f|v)", r"\1" + "u", txt) txt = re.sub("(d|t)", r"\1" + "o", txt) if not nativize: txt = tokushuon(txt) txt = kana2roman.to_hiragana(txt) txt = txt.replace("う゛", "ゔ") if tgt == "Katakana": txt = ( txt.replace("aa", "a-") .replace("ii", "i-") .replace("ee", "e-") .replace("oo", "o-") .replace("uu", "u-") ) txt = ( txt.replace("a\u309Aa", "a\u309A-") .replace("i\u309Ai", "i\u309A-") .replace("e\u309Ae", "e\u309A-") .replace("o\u309Ao", "o\u309A-") .replace("u\u309Au", "u\u309A-") ) txt = kana2roman.to_katakana(txt) txt = re.sub( "(k|g|ch|j|p|b|m|y|r|sh|s|h|z|f|v)" + "(" + r"\1" + ")", r"\1" + "u", txt ) txt = re.sub("(d|t)" + "(" + r"\1" + ")", r"\1" + "o", txt) if not nativize: txt = tokushuon(txt) txt = kana2roman.to_katakana(txt) txt = re.sub("(k|g|ch|j|p|b|m|y|r|sh|s|h|z|f|v)", r"\1" + "u", txt) txt = re.sub("(d|t)", r"\1" + "o", txt) if not nativize: txt = tokushuon(txt) txt = kana2roman.to_katakana(txt) txt = convert.convertScript(txt, "Inter", "ISO") return txt def urduRemoveInherent(Strng): Strng = re.sub("\Ba", "", Strng) return Strng def HebrewVetVav(Strng): shortVowels = ( "(" + "|".join( [ "\u05B7", "\u05B8", "\u05B4", "\u05BB", "\u05B5", "\u05B6", "\u05B9", "\u05B0", ] ) + ")" ) Strng = re.sub(shortVowels + "(" + "ו" + ")" + "(?!\u05BC)", r"\1" + "ב", Strng) Strng = Strng.replace("בֺ", "בֹ") return Strng def devanagariuttara(Strng): return Strng def devanagarinepali(Strng): return Strng def devanagaribalbodh(Strng): return Strng def devanagarijain(Strng): return Strng def HiraganaaunotDipthong(Strng): return Strng def IASTISONasalTilde(Strng): return Strng def HeberewQoph(Strng): Strng = Strng.replace("כּ", "ק").replace("ךּ", "ק") return Strng def HebewShortO(Strng): Strng = re.sub("(?<!ו)\u05B9", "\u05C7", Strng) return Strng def HebrewKatevMalei(Strng): Strng = Strng.replace("ָ", "א") Strng = Strng.replace("ַ", "א") return Strng def HebrewnonFinalShort(Strng): finals = ["ך", "ם", "ן", "ף", "ץ", "ףּ", "ךּ"] finalCons = ["כ", "מ", "נ", "פ", "צ", "פּ", "כּ"] otherCons = "ב,ח,ע,צ,ש,ת".split(",") consonantsAll = ( "(" + "|".join( GM.CrunchSymbols(GM.Consonants, "Hebrew") + finals + ["׳", "י", "ו"] + otherCons ) + ")" ) shortVowels = [ "\u05B7", "\u05B8", "\u05B4", "\u05BB", "\u05B5", "\u05B6", "\u05B9", "\u05C7", ] shortVowelsR = ( "(" + "|".join( [ "\u05B7", "\u05B8", "\u05B4", "\u05BB", "\u05B5", "\u05B6", "\u05B9", "\u05C7", ] + ["׳"] ) + ")" ) for s in shortVowels: Strng = re.sub( "(" + s + ")" + "(׳?)" + "(?!" + consonantsAll + ")", r"\1\2" + "ה" + "\u02BE", Strng, ) for f, c in zip(finals, finalCons): Strng = re.sub( "(" + f + ")" + shortVowelsR + "(׳?)" + "ה" + "\u02BE", c + r"\2\3" + "ה", Strng, ) for f in finals: Strng = Strng.replace(f + "\u05B0", f) Strng = Strng.replace("\u05B0" + "׳" + "ה" + "\u02BE", "\u05B0" + "׳") Strng = Strng.replace("וֹה" + "\u02BE", "וֹ") Strng = Strng.replace("\u02BE", "") uVowels = ["וֹ", "וּ"] return Strng def DevanagariAnusvara(Strng): return NasalToAnusvara(Strng, "Devanagari") def jainomDevangari(Strng): Strng = Strng.replace("ॐ", "ꣽ") return Strng def GurmukhiCandrabindu(Strng): Strng = Strng.replace("ਁ", "ਂ") return Strng def mDotAboveToBelow(Strng): Strng = Strng.replace("ṃ", "ṁ") return Strng def noLongEO(Strng): Strng = Strng.replace("ē", "e").replace("ō", "o") return Strng def TamilStyleUUCore(Strng): Strng = re.sub("([ഖഗഘഛഝഠഡഢഥദധഫബഭ])" + "([ുൂ])", r"\1" + "\u200D" + r"\2", Strng) return Strng def TamilStyleUUOther(Strng): Strng = re.sub("([ജശഷസഹ])" + "([ുൂ])", r"\1" + "\u200D" + r"\2", Strng) Strng = re.sub("(ശ്ര)" + "([ുൂ])", r"\1" + "\u200D" + r"\2", Strng) Strng = re.sub("(ശ്‍ര)" + "([ുൂ])", r"\1" + "\u200D" + r"\2", Strng) return Strng def ContextualLLa(Strng): ListVS = "|".join(GM.CrunchSymbols(GM.VowelSigns, "Tamil")) ListC = "|".join(GM.CrunchSymbols(GM.Consonants, "Tamil")) Strng = re.sub("(ஆவ|ாவ)" + "ல", r"\1" + "ள", Strng) Strng = re.sub("(்ரவா|்ரவ|ர|பவ|வி|ரா|ஷ்க|த⁴வ)" + "ல", r"\1" + "ள", Strng) Strng = re.sub("(யா|யாம|கோம)" + "ல", r"\1" + "ள", Strng) Strng = re.sub("(மௌ)" + "ல", r"\1" + "ள", Strng) Strng = re.sub("([\s^])(ந)" + "ல", r"\1" + "ள", Strng) Strng = Strng.replace("கலத்ர", "களத்ர") Strng = Strng.replace("ஶீதல", "ஶீதள") Strng = Strng.replace("ஸுதல", "ஸுதள") Strng = Strng.replace("காலி", "காளி") Strng = Strng.replace("காலீ", "காளீ") Strng = Strng.replace("கலேவர", "களேவர") Strng = Strng.replace("கலேவர", "களேவர") Strng = Strng.replace("ப³ஹுல", "ப³ஹுள") Strng = Strng.replace("கஶ்மல", "கஶ்மள") Strng = re.sub( "([கத])" + "(" + ListVS + ")?" + "([³⁴])" + "ல", r"\1\2\3" + "ள", Strng ) Strng = re.sub("(ஜு)" + "ல", r"\1" + "ள", Strng) Strng = re.sub("(து)" + "லசி", r"\1" + "ளசி", Strng) Strng = re.sub("(ரிம)" + "ல", r"\1" + "ள", Strng) Strng = Strng.replace("ள்ய", "ல்ய") return Strng def FinalNNa(Strng): Strng = re.sub("ன", "ந", Strng) Strng = re.sub("ந்" + "([\.।॥,!-])", "ன்" + r"\1", Strng) Strng = re.sub("ந்" + "(\s)", "ன்" + r"\1", Strng) Strng = re.sub("ந்$", "ன்", Strng) return Strng def TamilpredictDentaNaExtended(Strng): listDentalNa = """ഩഖ ഩഗര ഩകുല ഩഗ്‌ഩ ഩക്ഷത്‌ര ഩടരാജ ഩടീ ഩദീ ഩന്‌ദഩ ഩപുംസക ഩഭ** ഩമ** ഩമശ്‌ ഩമസ്‌ ഩമാമ ഩമാമി ഩമാമോ ഩമുചി ഩമോ ഩമോനമ ഩമോനമോ ഩമോസ്‌തു ഩമോസ്‌തുതേ ഩമഃ ഩയഩ ഩര** ഩരക ഩര്‌തക ഩര്‌തഩ ഩര്‌മദ ഩല** ഩലിഩ ഩവ** ഩവീഩ ഩവ്‌യ ഩശ്‌** ഩഷ്‌ട ഩാരായണ ഩാഗ ഩാടക ഩാഡീ ഩാട്‌യ ഩാഡ്‌യ ഩാഥ ഩാദ ഩാരത ഩാഩാ*** ഩാഩ്‌യ** ഩാഩൃത ഩാഭ ഩാമ ഩായക ഩായികാ ഩാരദ ഩാരസിംഹ ഩാരി ഩാരീ ഩാവ*** ഩാശ ഩാസിക ഩിഗമ ഩികട ഩികര ഩികാമ ഩികായ ഩിഖില ഩികുഞ്‌ജ ഩിഘൂഩ ഩികേത ഩിഗ്‌രഹ ഩിഗൃഹ ഩികൃന്‌ത ഩിഗ്‌രന്‌ത ഩിക്ഷിപ ഩിക്ഷേപ ഩിഘ്‌ഩ ഩിജ ഩിദര്‌ശ ഩിതമ്‌ബ ഩിതര ഩിദാഘ ഩിദാഩ ഩിതാന്‌ത ഩിധാഩ ഩിധായ ഩിധ ഩിധേഹി ഩിദ്‌ര ഩിത്‌യ ഩിന്‌ദാ ഩിബദ്‌ധ ഩിബധ്‌ ഩിബന്‌ധഩ ഩിപട ഩിപതിത ഩിപത്‌യ ഩിപപാത ഩിപാതിത ഩിപാത്‌യ ഩിപുണ ഩിബോധ ഩിഭൃത ഩിമഗ്‌ഩ ഩിമിത്‌ത ഩിമിഷ ഩിയത ഩിയന്‌ത ഩിയന്‌ത്‌ര ഩിയമ ഩിയുക്‌ത ഩിയുജ്‌യ ഩിയോ ഩിര ഩിര്‌ ഩിലയ ഩിവര്‌ ഩിവസ ഩിവാര ഩിവാസ ഩിവിഷ്‌ട ഩിവേദ ഩിവേശ ഩിവൃ ഩിശ ഩിശ്‌ ഩിഷ ഩിഷ്‌ ഩിസ ഩിസ്‌ ഩിഹിത ഩിഃശ ഩിഃഷ ഩിഃസ ഩീച ഩീതി ഩീര ഩീല ഩൂതഩ ഩൂപുര ഩേത്‌ര ഩേയ** ഩൈമിത്‌ത ഩൈമിഷ ഩൈരാശ്‌യ ഩൈരൃത ഩൈവേദ്‌യ ഩൈഷ്‌ ഩ്‌യായ ഩ്‌യാസ ഩ്‌യൂഩ ഩൃ""".split( "\n" ) vir = Tamil.ViramaMap[0] for wordNna in listDentalNa: wordNa = re.sub("^ഩ", "ന", wordNna) if "²" in wordNna[-1] or "³" in wordNna[-1] or "⁴" in wordNna[-1]: number = wordNna[-1] wordNnaN = wordNna[:-1] wordNaN = wordNa[:-1] for vow in GM.CrunchSymbols(GM.VowelSigns, "Tamil"): Strng = Strng.replace(wordNnaN + vow + number, wordNaN + vow + number) Strng = Strng.replace(wordNna, wordNa) for wordNna in ["ഩാമ", "ഩര"]: wordNa = re.sub("^ഩ", "ന", wordNna) Strng = Strng.replace(wordNa + vir, wordNna + vir) Strng = Strng.replace("ഩ്‌ന", "ന്‌ന") return Strng def TamilpredictDentaNa(Strng): listDentalNa = """னக² னக³ர னகுல னக்³ன னக்ஷத்ர னடராஜ னடீ னதீ³ னந்த³ன னபும்ʼஸக னப⁴** னம** னமஶ் னமஸ் னமாம னமாமி னமாமோ னமுசி னமோ னமோநம னமோநமோ னமோஸ்து னமோஸ்துதே னம꞉ னயன னர** னரக னர்தக னர்தன னர்மத³ னல** னலின னவ** னவீன னவ்ய னஶ்** னஷ்ட னாராயண னாக³ னாடக னாடீ³ னாட்ய னாட்³ய னாத² னாத³ னாரத னானா*** னான்ய** னான்ருʼத னாப⁴ னாம னாயக னாயிகா னாரத³ னாரஸிம்ʼஹ னாரி னாரீ னாவ*** னாஶ னாஸிக னிக³ம னிகட னிகர னிகாம னிகாய னிகி²ல னிகுஞ்ஜ னிகூ⁴ன னிகேத னிக்³ரஹ னிக்³ருʼஹ னிக்ருʼந்த னிக்³ரந்த னிக்ஷிப னிக்ஷேப னிக்⁴ன னிஜ னித³ர்ஶ னிதம்ப³ னிதர னிதா³க⁴ னிதா³ன னிதாந்த னிதா⁴ன னிதா⁴ய னித⁴ னிதே⁴ஹி னித்³ர னித்ய னிந்தா³ னிப³த்³த⁴ னிப³த்⁴ னிப³ந்த⁴ன னிபட னிபதித னிபத்ய னிபபாத னிபாதித னிபாத்ய னிபுண னிபோ³த⁴ னிப்⁴ருʼத னிமக்³ன னிமித்த னிமிஷ னியத னியந்த னியந்த்ர னியம னியுக்த னியுஜ்ய னியோ னிர னிர் னிலய னிவர் னிவஸ னிவார னிவாஸ னிவிஷ்ட னிவேத³ னிவேஶ னிவ்ருʼ னிஶ னிஶ் னிஷ னிஷ் னிஸ னிஸ் னிஹித னி꞉ஶ னி꞉ஷ னி꞉ஸ னீச னீதி னீர னீல னூதன னூபுர னேத்ர னேய** னைமித்த னைமிஷ னைராஶ்ய னைர்ருʼத னைவேத்³ய னைஷ் ன்யாய ன்யாஸ ன்யூன ன்ருʼ""".split( "\n" ) vir = Tamil.ViramaMap[0] Tamillist = "²³⁴ஃஅஆஇஈஉஊஎஏஐஒஓஔகஙசஜஞடணதநனபமயரறலளழவஷஸஹாிீுூெேைொோௌ்ௗ" for wordNna in listDentalNa: wordNa = re.sub("^ன", "ந", wordNna) if "²" in wordNna[-1] or "³" in wordNna[-1] or "⁴" in wordNna[-1]: number = wordNna[-1] wordNnaN = wordNna[:-1] wordNaN = wordNa[:-1] for vow in GM.CrunchSymbols(GM.VowelSigns, "Tamil"): Strng = Strng.replace(wordNnaN + vow + number, wordNaN + vow + number) Strng = Strng.replace(wordNna, wordNa) for wordNna in ["னாம", "னர"]: wordNa = re.sub("^ன", "ந", wordNna) Strng = re.sub( "([" + Tamillist + "])(" + wordNa + vir + ")", r"\1" + wordNna + vir, Strng, ) Strng = Strng.replace("ன்ந", "ந்ந") Strng = Strng.replace("னாம்ன", "நாம்ன") return Strng def AhomClosed(Strng): vir = Ahom.ViramaMap[0] anu = Ahom.AyogavahaMap[1] Strng = Strng.replace("\U00011722", "\U00011723") Strng = re.sub("(\U00011723)(.)(" + vir + ")", "\U00011722" + r"\2\3", Strng) Strng = Strng.replace(anu + "\U00011723", anu + "\U00011722") Strng = Strng.replace("\U00011724", "\U00011725") Strng = re.sub("(\U00011725)(.)(" + vir + ")", "\U00011724" + r"\2\3", Strng) Strng = Strng.replace(anu + "\U00011725", anu + "\U00011724") Strng = re.sub( "(\U00011726\U00011727)(.)(" + vir + ")", "\U00011726" + r"\2\3", Strng ) Strng = Strng.replace("\U00011726\U0001172A\U00011727", anu + "\U00011727") Strng = re.sub( "(\U00011726\U00011721)(.)(" + vir + ")", "\U00011728" + r"\2\3", Strng ) Strng = Strng.replace("\U00011726\U0001172A\U00011721", anu + "\U00011728") return Strng def TeluguTamilZha(Strng): return Strng def TeluguTamilRra(Strng): Strng = Strng.replace("ఱ్ఱ", "ౘ్ౘ") Strng = Strng.replace("ట్ర", "ౘ్ౘ") Strng = Strng.replace("ండ్ర", "న్ఱ") return Strng def ThaiNativeConsonants(Strng): Strng = Strng.replace("ท", "ด") Strng = Strng.replace("พ", "บ") Strng = Strng.replace("\u0E36", "\u0E34\u0E4D") Strng = Strng.replace("ํ", "งฺ") Strng = re.sub("(\u0E3A)([ยรลวห])", "\u035C" + r"\2", Strng) Strng = Strng.replace("ห\u0E3A", "ห\u035C") Strng = re.sub("([ยรลวห])" + "\u035C" + r"\1", r"\1" + "\u0E3A" + r"\1", Strng) Strng = re.sub("(า)(.)(ฺ)", "็" + r"\1\2\3", Strng) Strng = re.sub("([เโ])(.)(.)(ฺ)", r"\1\2" + "็" + r"\3\4", Strng) Strng = ThaiTranscription(Strng, False) Strng = Strng.replace("ะ͜", "\u035C") Strng = Strng.replace("ะ็", "็") Strng = re.sub("([เโไ])(.)(\u035C)(.)([ะ\u0E31])", r"\1\2\3\4", Strng) Strng = Strng.replace("ค", "ก\u0325") Strng = Strng.replace("ช", "จ\u0325") Strng = Strng.replace("ํ", "ง") Strng = Strng.replace("ง", "งํ") Strng = Strng.replace("ะงํ\u035C", "\u0E31งํ") Strng = re.sub("([เโไ])(งํ)([าัะ])", r"\1" + "ง" + r"\2", Strng) Strng = re.sub("([เโไ])(งํ)", r"\1" + "ง", Strng) Strng = re.sub("(งํ)([าัะ])", "ง" + r"\2", Strng) return Strng def KhamiShanMyanmarNumerals(Strng): for x, y in zip(KhamtiShan.NumeralMap, Burmese.NumeralMap): Strng = Strng.replace(x, y) return Strng def KhamtiShanRa(Strng): Strng = Strng.replace("ရ", "ꩳ") return Strng def granthafinal(Strng): return Strng def Dot2Dandas(Strng): Strng = Strng.replace("..", "॥") Strng = Strng.replace(".", "।") return Strng def SaurastraHaaruColon(Strng): vir = Tamil.ViramaMap[0] ha = Tamil.ConsonantMap[-1] Strng = Strng.replace(vir + ha, ":") ListVS = "|".join(GM.CrunchSymbols(GM.VowelSigns, "Tamil")) Strng = re.sub("(:)" + "(" + ListVS + ")", r"\2\1", Strng) Strng = re.sub("(\s)(ன)", r"\1" + "ந", Strng) Strng = re.sub("^ன", "ந", Strng) return Strng def TamilExtendedNNA(Strng): na = TamilExtended.ConsonantMap[19] nna = TamilExtended.SouthConsonantMap[3] vir = TamilExtended.ViramaMap[0] ta = TamilExtended.ConsonantMap[15] ListV = "|".join( GM.CrunchSymbols(GM.Vowels + GM.VowelSigns + GM.Consonants, "TamilExtended") + [TamilExtended.SignMap[0]] ) Strng = re.sub( "(" + ListV + ")" + GM.VedicSvaras + "(" + na + ")" + "(?!" + vir + ")", r"\1\2" + nna, Strng, ) Strng = re.sub( "(" + ListV + ")" + GM.VedicSvaras + "(" + na + ")" + "(?!" + vir + ")", r"\1\2" + nna, Strng, ) Strng = re.sub("(ന്‌)(?![തഥദധ])", "ഩ്‌", Strng) Strng = re.sub("(\s)ഩ്", r"\1" + "ന്‌", Strng) Strng = re.sub("^ഩ്", r"" + "ന്‌", Strng) Strng = TamilpredictDentaNaExtended(Strng) return Strng def TakriRemoveGemination(Strng): Strng = re.sub("(.)" + Takri.ViramaMap[0] + r"\1", r"\1", Strng) return Strng def MongolianSyllabize(Strng): vowels = "(" + "|".join(GM.CrunchSymbols(GM.Vowels, "Mongolian") + ["\u1820"]) + ")" consonants = "(" + "|".join(GM.CrunchSymbols(GM.Consonants, "Mongolian")) + ")" Strng = re.sub(consonants + "?" + vowels, r"\1\2" + " ", Strng) Strng = re.sub("(\u180E\u1820)" + consonants, r"\1 \2", Strng) Strng = re.sub("\u1820 ", "\u1820\u180B ", Strng) Strng = Strng.replace("ᠣᠸᠠ᠋", "ᠣᠸᠠ") Strng = Strng.replace("ᠣᠸᠸᠠ᠋", "ᠣᠸᠸᠠ") Strng = Strng.replace(" \u180E", "\u180E") Strng = Strng.replace(" " + "\u200B", "") Strng = Strng.replace(" ᢁ", "ᢁ") return Strng def TibetanSyllabize(Strng): vowels = "(" + "|".join(GM.CrunchSymbols(GM.Vowels, "Tibetan")) + ")" consonants = ( "(" + "|".join( GM.CrunchSymbols(GM.Consonants, "Tibetan") + ["ཨ", "ཅ", "ཆ", "ཇ", "ཇྷ"] ) + ")" ) vowelsigns = ( "(" + "|".join(GM.CrunchSymbols(GM.VowelSigns, "Tibetan") + ["\u0F80"]) + ")" ) combiningSigns = ( "(" + "|".join(GM.CrunchSymbols(GM.CombiningSigns, "Tibetan") + ["\u0F82"]) + ")" ) ListSubC = ( "(" + "|".join([chr(x + 80) for x in range(0x0F40, 0x0F68)] + ["ྻ", "ྺ", "ྼ"]) + ")" ) Strng = re.sub(vowelsigns + combiningSigns + "?", r"\1\2་", Strng) Strng = re.sub(consonants, r"\1་", Strng) Strng = re.sub(ListSubC, r"\1་", Strng) Strng = re.sub("་" + vowelsigns, r"\1", Strng) Strng = re.sub("་" + ListSubC, r"\1", Strng) Strng = re.sub("་" + combiningSigns, r"\1", Strng) Strng = re.sub(combiningSigns, r"\1་", Strng) Strng = Strng.replace("་་", "་") return Strng def SoyomboSyllabize(Strng): vowels = "(" + "|".join(GM.CrunchSymbols(GM.Vowels, "Soyombo")) + ")" consonants = ( "(" + "|".join(GM.CrunchSymbols(GM.Consonants, "Soyombo") + ["𑩐", "\U00011A83"]) + ")" ) vowelsigns = "(" + "|".join(GM.CrunchSymbols(GM.VowelSigns, "Soyombo")) + ")" combiningSigns = ( "(" + "|".join(GM.CrunchSymbols(GM.CombiningSigns, "Soyombo")) + ")" ) fin = ( "(" + "|".join( [ "\U00011A8A", "\U00011A8B", "\U00011A8C", "\U00011A8D", "\U00011A8E", "\U00011A8F", "\U00011A90", "\U00011A91", "\U00011A92", "\U00011A93", "\U00011A94", ] ) + ")" ) Strng = re.sub(vowelsigns + combiningSigns + "?", r"\1\2 ", Strng) Strng = re.sub(consonants, r"\1 ", Strng) Strng = re.sub(" " + vowelsigns, r"\1", Strng) Strng = re.sub(" " + combiningSigns, r"\1", Strng) Strng = re.sub("\U00011A99" + " ", "\U00011A99", Strng) Strng = re.sub(combiningSigns, r"\1 ", Strng) Strng = re.sub(" 𑪘", "\U00011A98", Strng) Strng = re.sub(fin, r"\1 ", Strng) Strng = re.sub("( )" + fin, r"\2 ", Strng) return Strng def TakriArchaicKha(Strng): return Strng.replace("𑚸", "𑚋") def TeluguReph(Strng): consonants = "(" + "|".join(GM.CrunchSymbols(GM.Consonants, "Telugu")) + ")" Strng = re.sub("ర్" + consonants, "ర్‍" + r"\1", Strng) Strng = Strng.replace("\u0C4Dర్‍", "\u0C4Dర్") return Strng def PhagsPaTib(Strng): return Strng def PhagsPaSeal(Strng): return Strng def TamilExtendedAnusvara(Strng): Strng = AnusvaraToNasal(Strng, "TamilExtended") Strng = Strng.replace("\u0D02", "മ്‌") return Strng def RomanReadableLongEO(Strng): Strng = Strng.replace("o", "oa") Strng = Strng.replace("oa'", "o") Strng = Strng.replace("e", "ae") Strng = Strng.replace("ae'", "e") Strng = Strng.replace("aeae", "ee") Strng = Strng.replace("oaoa", "oo") return Strng def TeluguArasunnaChandrabindu(Strng): Strng = Strng.replace("ఀ", "ఁ") return Strng def MarchenSanskritPalatals(Strng): tsaSeries = ["\U00011C82", "\U00011C83", "\U00011C84"] jaSereis = ["\U00011C76", "\U00011C77", "\U00011C78"] for x, y in zip(tsaSeries, jaSereis): Strng = Strng.replace(x, y) return Strng def SoyomboSanskritPalatals(Strng): tsaSeries = ["𑩵", "𑩶", "𑩷"] caSeries = ["𑩡", "𑩢", "𑩣"] for x, y in zip(tsaSeries, caSeries): Strng = Strng.replace(x, y) return Strng def TibetanSanskritPalatals(Strng): caSeries = ["ཅ", "ཆ", "ཇ", "ཇྷ"] tsaSeries = ["ཙ", "ཚ", "ཛ", "ཛྷ"] for x, y in zip(tsaSeries, caSeries): Strng = Strng.replace(x, y) return Strng def ZanabazarSanskritPalatals(Strng): tsaSeries = ["𑨣", "𑨤", "𑨥"] caSeries = ["𑨐", "𑨑", "𑨒"] for x, y in zip(tsaSeries, caSeries): Strng = Strng.replace(x, y) return Strng def SoyomboFinals(Strng): return Strng def SoyomboInitials(Strng): viraCon = ["\U00011A7C\U00011A99", "\U00011A7D\U00011A99", "\U00011A81\U00011A99"] initial = ["\U00011A86", "\U00011A87", "\U00011A89"] for x, y in zip(viraCon, initial): Strng = Strng.replace(x, y) return Strng def ZanzabarSpaceTsheg(Strng): Strng = Strng.replace(" ", "\U00011A41") return Strng def SoyomboSpaceTscheg(Strng): Strng = Strng.replace(" ", "\U00011A9A") return Strng def AnusvaratoNasalASTISO(Strng): Strng = Strng.replace("ṁ", "ṃ") Strng = re.sub("(ṃ)(k|g)", "ṅ" + r"\2", Strng) Strng = re.sub("(ṃ)(c|j)", "ñ" + r"\2", Strng) Strng = re.sub("(ṃ)(ṭ|ḍ)", "ṇ" + r"\2", Strng) Strng = re.sub("(ṃ)(t|d)", "n" + r"\2", Strng) Strng = re.sub("(ṃ)(p|b)", "m" + r"\2", Strng) return Strng def removeDiacritics(Strng): diacritics = [ "\u0331", "\u0306", "\u0323", "\u035F", "\u0324", "\u035F", "\u0307", "\u0301", "\u0303", "\u0310", "\u0306", "\u0302", "\u0304", ] for dia in diacritics: Strng = Strng.replace(dia, "") vowelDia = ["а̄", "ӣ", "ӯ", "ӗ"] vowel = ["\u0430", "\u0438", "\u0443", "\u044D"] for x, y in zip(vowelDia, vowel): Strng = Strng.replace(x, y) return Strng def ranjanalantsa(Strng): Strng = Strng.replace("་", " ") return Strng def ranjanawartu(Strng): Strng = Strng.replace("་", "࿎ ") return Strng def TaiKuen(Strng): return Strng def TaiThamLao(Strng): return Strng def egrantamil(Strng): return Strng def tibetandbumed(Strng): return Strng def oldtamilortho(Strng): return Strng def nepaldevafont(Strng): return Strng def granthaserif(Strng): return Strng def ChakmaPali(Strng): listC = ( "(" + "|".join( sorted( GM.CrunchSymbols(GM.Consonants, "Chakma") + Chakma.VowelMap[:1], key=len, reverse=True, ) ) + ")" ) listV = ( "(" + "|".join( sorted( GM.CrunchSymbols(GM.VowelSigns, "Chakma") + Chakma.ViramaMap + ["\U00011133"], key=len, reverse=True, ) ) + ")" ) Strng = ChakmaGemination(Strng, reverse=True) Strng = Strng.replace("𑄤", "\U00011147") Strng = Strng.replace("𑄡", "𑄠") Strng = Strng.replace("\U00011127", "\u02BE") Strng = re.sub( "(" + listC + ")" + "(?!" + listV + "|\u02BE" + ")", r"\1" "\U00011127", Strng ) Strng = Strng.replace("\u02BE", "") Strng = Strng.replace("\U00011127", "\U00011102") Strng = Strng.replace("\U00011133", "\U00011134") return Strng def ThaiSajjhayawithA(Strng): Strng = ThaiSajjhayaOrthography(Strng) Strng = Strng.replace("ัง", "ังฺ") Strng = ThaiTranscription(Strng, anusvaraChange=True) Strng = Strng.replace("ะํ", "ํ") Strng = Strng.replace("ะั", "ั") Strng = Strng.replace("ะ๎", "๎") Strng = re.sub("([เโไ])(.๎)([ยรลวศษสหฬ])ะ", r"\1\2\3", Strng) Strng = Strng.replace("\u0E32\u0E4D", "\u0E33").replace("\u0E34\u0E4D", "\u0E36") return Strng def LaoSajjhaya(Strng): Strng = ThaiSajjhayaOrthography(Strng, Script="LaoPali") Strng = re.sub("([ເໂໄ])(.)(\u0ECE)", r"\2\3\1", Strng) return Strng def LaoSajjhayawithA(Strng): Strng = LaoSajjhaya(Strng) Strng = Strng.replace("\u0ECE", "\u0E4E") Strng = Strng.replace("ັງ", "ັງ຺") Strng = CF.LaoPaliTranscribe(Strng, anusvaraChange=True) Strng = Strng.replace("ະໍ", "ໍ") Strng = Strng.replace("ະັ", "ັ") Strng = Strng.replace("ະ๎", "๎") Strng = Strng.replace("ະ໌", "໌") Strng = Strng.replace("ະົ", "ົ") Strng = re.sub("([ເໂໄ])(.๎)([ຍຣລວຨຩສຫຬ])ະ", r"\1\2\3", Strng) Strng = Strng.replace("າໍ", "ຳ") Strng = Strng.replace("\u0E4E", "\u0ECE") return Strng def UseAlternateVSU(Strng): Strng = Strng.replace("𑖲", "𑗜") return Strng def UseAlternateVSUU(Strng): Strng = Strng.replace("𑖳", "𑗝") return Strng def UseAlternateU(Strng): Strng = Strng.replace("𑖄", "𑗛") return Strng def UseAlternateI1(Strng): Strng = Strng.replace("𑖂", "𑗘") return Strng def UseAlternateI2(Strng): Strng = Strng.replace("𑖂", "𑗙") return Strng def UseAlternateII(Strng): Strng = Strng.replace("𑖃", "𑗚") return Strng def GranthaOldau(Strng): Strng = Strng.replace("𑍗", "𑍌") return Strng def DevanagariACandra(Strng): Strng = Strng.replace("ऍ", "ॲ") return Strng def WarangCitiModernOrthogaphy(Strng): Strng = re.sub( "([\U000118D4\U000118D5\U000118CC\U000118CB\U000118CF\U000118CE\U000118D2\U000118D1\U000118D5\U000118D4\U000118D8\U000118D7\U000118DB])(\u200D)(𑣙)", r"\1", Strng, ) Strng = Strng.replace("𑣝", "𑣞") Strng = Strng.replace("\u200D", "") return Strng def ChakmaEnableAllConjuncts(Strng): listC = ( "(" + "|".join( sorted( GM.CrunchSymbols(GM.Consonants, "Chakma") + Chakma.VowelMap[:1], key=len, reverse=True, ) ) + ")" ) Strng = re.sub("\U00011134" + "(" + listC + ")", "\U00011133" + r"\1", Strng) Strng = ChakmaGemination(Strng) return Strng def ChakmaGemination(Strng, reverse=False): ListC = "(" + "|".join(GM.CrunchSymbols(GM.Consonants, "Chakma")) + ")" virs = "([\U00011134\U00011133])" virExp = "\U00011134" virDep = "\U00011133" ListV = ( "(" + "|".join( sorted(GM.CrunchSymbols(GM.VowelSignsNV, "Chakma"), key=len, reverse=True) ) + ")" ) if not reverse: Strng = re.sub(ListC + virs + r"\1" + ListV, r"\1" + virExp + r"\3", Strng) Strng = re.sub( ListC + virExp + r"\1" + virDep + ListC, r"\1" + virExp + virDep + r"\2", Strng, ) Strng = re.sub( ListC + virDep + r"\1" + virDep + ListC, r"\1" + virExp + virDep + r"\2", Strng, ) Strng = re.sub( virDep + ListC + virExp + ListV, virExp + r"\1" + virExp + r"\2", Strng ) else: Strng = re.sub(ListC + virExp + ListV, r"\1" + virExp + r"\1" + r"\2", Strng) Strng = re.sub(ListC + virExp + virDep, r"\1" + virExp + r"\1" + virDep, Strng) return Strng def ChakmaVowelsIndependent(Strng): vowelDepA = ["𑄃𑄨", "𑄃𑄪", "𑄃𑄬"] vowelIndep = ["\U00011104", "\U00011105", "\U00011106"] for x, y in zip(vowelDepA, vowelIndep): Strng = Strng.replace(x, y) return Strng def MultaniAbjad(Strng): ListAll = ( "(" + "|".join(GM.CrunchSymbols(GM.Characters, "Multani") + ["𑊓", "𑊍"]) + ")" ) ListC = ( "(" + "|".join(GM.CrunchSymbols(GM.Consonants, "Multani") + ["𑊓", "𑊍"]) + ")" ) ListV = "(" + "|".join(GM.CrunchSymbols(GM.Vowels, "Multani") + ["𑊓", "𑊍"]) + ")" Strng = re.sub(ListC + ListV + ListC, r"\1\3", Strng) Strng = re.sub("(" + ListC + "{2,})" + ListV, r"\1", Strng) Strng = re.sub(ListV + ListC + ListV, r"\1\2", Strng) return Strng def LaoNative(Strng): Strng = re.sub("ຕ([ເແໂໄ]?)ຕ", "ດ" + r"\1" + "ຕ", Strng) Strng = re.sub("ຕ([ເແໂໄ]?)ຖ", "ດ" + r"\1" + "ຖ", Strng) Strng = re.sub("ທ([ເແໂໄ]?)ທ", "ດ" + r"\1" + "ທ", Strng) Strng = re.sub("ສ([ເແໂໄ]?)ສ", "ດ" + r"\1" + "ສ", Strng) Strng = re.sub("ປ([ເແໂໄ]?)ປ", "ບ" + r"\1" + "ປ", Strng) Strng = re.sub("ພ([ເແໂໄ]?)ພ", "ບ" + r"\1" + "ພ", Strng) return Strng def SundaneseHistoricConjuncts(Strng, reverse=False): ListC = "|".join( GM.CrunchSymbols(GM.Consonants + GM.Vowels + GM.VowelSignsNV, "Sundanese") ) if not reverse: Strng = Strng.replace("᮪ᮙ", "\u1BAC") ListC = "|".join( GM.CrunchSymbols(GM.Consonants + GM.Vowels + GM.VowelSignsNV, "Sundanese") ) Strng = re.sub("(" + ListC + ")" + "ᮊ᮪", r"\1" + "ᮾ", Strng) Strng = re.sub("(" + ListC + ")" + "ᮙ᮪", r"\1" + "ᮿ", Strng) else: Strng = Strng.replace("\u1BAC", "᮪ᮙ") Strng = Strng.replace("\u1BAD", "᮪ᮝ") Strng = Strng.replace("ᮾ", "ᮊ᮪") Strng = Strng.replace("ᮿ", "ᮙ᮪") return Strng def LimbuSpellingSaI(Strng): vir = Limbu.ViramaMap[0] FCons = [ x + vir for x in [Limbu.ConsonantMap[x] for x in [0, 4, 15, 19, 20, 24, 26, 27]] ] FinalCons = [ "\u1930", "\u1931", "\u1933", "\u1934", "\u1935", "\u1936", "\u1937", "\u1938", ] for x, y in zip(FCons, FinalCons): Strng = Strng.replace("\u193A" + y, x) Strng = Strng.replace("\u193A\u1922" + y, "\u1922" + x) return Strng def siddhammukta(Strng): return Strng def tradOrtho(Strng): return Strng def siddhamap(Strng): return Strng def KhojkiRetainSpace(Strng): Strng = Strng.replace("\U0001123A", " ") return Strng def BhaiksukiRetainSpace(Strng): Strng = Strng.replace("𑱃", " ") return Strng def KaithiRetainSpace(Strng): Strng = Strng.replace("⸱", " ") return Strng def MedievalTamilOrthography(Strng): OldEO = ["எ்", "ெ்", "ஒ்", "ெ்ா", "எ", "ெ", "ஒ", "ொ"] NewEO = ["எ", "ெ", "ஒ", "ொ", "ஏ", "ே", "ஓ", "ோ"] for x, y in zip(NewEO, OldEO): Strng = Strng.replace(x, y) return Strng def AmbigousTamilOrthography(Strng): return Strng def NewaMurmurConsonants(Strng): murmur = ["𑐓", "𑐙", "𑐤", "𑐪", "𑐭", "𑐯"] connsh = ["𑐴𑑂𑐒", "𑐴𑑂𑐘", "𑐴𑑂𑐣", "𑐴𑑂𑐩", "𑐴𑑂𑐬", "𑐴𑑂𑐮"] for x, y in zip(murmur, connsh): Strng = Strng.replace(y, x) return Strng def ModiRemoveLong(Strng): Strng = Strng.replace("𑘂", "𑘃") Strng = Strng.replace("𑘅", "𑘄") Strng = Strng.replace("𑘱", "𑘲") Strng = Strng.replace("𑘴", "𑘳") Strng = Strng.replace("𑘆", "𑘨𑘲") Strng = Strng.replace("𑘇", "𑘨𑘲") Strng = Strng.replace("𑘈", "𑘩𑘲") Strng = Strng.replace("𑘉", "𑘩𑘲") Strng = Strng.replace("𑘵", "𑘿𑘨𑘲") Strng = Strng.replace("𑘶", "𑘿𑘨𑘲") Strng = Strng.replace("𑘷", "𑘿𑘩𑘲") Strng = Strng.replace("𑘸", "𑘿𑘩𑘲") return Strng def LimbuDevanagariConvention(Strng): Strng = Strng.replace("ऎ", "ए़") Strng = Strng.replace("ऒ", "ओ़") Strng = Strng.replace("ॆ", "े़") Strng = Strng.replace("ॊ", "ो़") Strng = Strng.replace("꞉", "ः") return Strng def NandinagariPrishtamatra(Strng, reverse=False): if not reverse: Strng = Strng.replace("𑧚", "𑧤") Strng = Strng.replace("𑧛", "𑧤𑧚") Strng = Strng.replace("𑧜", "𑧤𑧑") Strng = Strng.replace("𑧝", "𑧤𑧜") else: Strng = Strng.replace("𑧤𑧚", "𑧛") Strng = Strng.replace("𑧤𑧑", "𑧜") Strng = Strng.replace("𑧤𑧜", "𑧝") Strng = Strng.replace("𑧤", "𑧚") return Strng def DevanagariPrishtamatra(Strng, reverse=False): if not reverse: Strng = Strng.replace("े", "ॎ") Strng = Strng.replace("ै", "ॎे") Strng = Strng.replace("ो", "ॎा") Strng = Strng.replace("ौ", "ॎो") else: Strng = Strng.replace("ॎे", "ै") Strng = Strng.replace("ॎो", "ौ") Strng = Strng.replace("ॎा", "ो") Strng = Strng.replace("ॎ", "े") return Strng def ThaanaRemoveHistorical(Strng): return Strng.replace("ޱ", "ނ") def OriyaVaAlt(Strng): return Strng.replace("ୱ", "ଵ") def GurmukhiYakaash(Strng, reverse=False): if not reverse: Strng = Strng.replace("੍ਯ", "ੵ") else: Strng = Strng.replace("ੵ", "੍ਯ") return Strng def dotReph(Strng): ListC = "(" + "|".join(sorted(GM.CrunchSymbols(GM.Consonants, "Malayalam"))) + ")" Strng = re.sub("(?<!്)" + "ർ" + ListC, "ൎ" + r"\1", Strng) Strng = re.sub("(?<!്)" + "ര്" + ListC, "ൎ" + r"\1", Strng) return Strng def TamilGranthaVisarga(Strng): Strng = Strng.replace("꞉", "𑌃") return Strng def archaicAIAU(Strng): Strng = Strng.replace("ൗ", "ൌ") Strng = Strng.replace("ഈ", "ൟ") return Strng def MalayalamremoveHistorical(Strng): Strng = Strng.replace("\u0D29", "\u0D28") Strng = Strng.replace("ന‍്", "ൻ") return Strng def LimburemoveHistorical(Strng): removePairs = [("ᤉ", "ᤈ"), ("ᤊ", "ᤏ"), ("ᤚ", "ᤙ"), ("ᤲ", "ᤱ")] for x, y in removePairs: Strng = Strng.replace(x, y) return Strng def MalayalamPrakrit(Strng): Strng = Strng.replace("ം", "ഀ") Strng = InsertGeminationSign(Strng, "Malayalam") return Strng def GranthaPrakrit(Strng): Strng = Strng.replace("𑌂", "𑌀") Strng = InsertGeminationSign(Strng, "Grantha") pat = r"\s𑌂." Strng = functools.reduce( lambda s, m: s.replace(m, ReverseGeminationSign(m, "Grantha")), re.findall(pat, Strng), Strng, ) pat = r"𑍍𑌂." Strng = functools.reduce( lambda s, m: s.replace(m, ReverseGeminationSign(m, "Grantha")), re.findall(pat, Strng), Strng, ) return Strng def MeeteiMayekremoveHistorical(Strng): removePairs = [ ("ꫢ", "ꯆ"), ("ꫣ", "ꯅ"), ("ꫤ", "ꯇ"), ("ꫥ", "ꯊ"), ("ꫦ", "ꯗ"), ("ꫧ", "ꯙ"), ("ꫨ", "ꯅ"), ("ꫩ", "ꯁ"), ("ꫪ", "ꯁ"), ("\uAAF5", "ꯍ꯭"), ("ꯑꫫ", "ꯏ"), ("ꯑꫬ", "ꯎ"), ("ꫫ", "ꯤ"), ("ꫬ", "ꯨ"), ] for x, y in removePairs: Strng = Strng.replace(x, y) return Strng def TamilOmDisable(Strng): return Strng.replace("ௐ", "ஓம்") def TamilSHADisable(Strng): return Strng.replace("ஶ", "ஸ²") def TamilNaToNNa(Strng): na = Tamil.ConsonantMap[19] nna = Tamil.SouthConsonantMap[3] vir = Tamil.ViramaMap[0] ta = Tamil.ConsonantMap[15] ListV = "|".join( GM.CrunchSymbols(GM.Vowels + GM.VowelSigns + GM.Consonants, "Tamil") + [Tamil.SignMap[0].replace("(", "\(").replace(")", "\)")] ) Strng = re.sub( "(" + ListV + ")" + GM.VedicSvaras + "(" + na + ")" + "(?!" + vir + ta + ")", r"\1\2" + nna, Strng, ) Strng = re.sub( "(" + ListV + ")" + GM.VedicSvaras + "(" + na + ")" + "(?!" + vir + ta + ")", r"\1\2" + nna, Strng, ) Strng = re.sub( "(²|³|⁴)" + GM.VedicSvaras + "(" + na + ")" + "(?!" + vir + ta + ")", r"\1\2" + nna, Strng, ) Strng = re.sub( "(²|³|⁴)" + GM.VedicSvaras + "(" + na + ")" + "(?!" + vir + ta + ")", r"\1\2" + nna, Strng, ) Strng = re.sub("(?<=ஶ்ரீ)(ன)(?!" + vir + ")", "ந", Strng) return Strng def MalayalamChillu(Strng, reverse=False, preserve=False): Chillus = ["\u0D7A", "\u0D7B", "\u0D7C", "\u0D7D", "\u0D7E", "ഩ‍്"] ListC = "(" + "|".join(GM.CrunchSymbols(GM.CharactersNV, "Malayalam") + ["ഽ"]) + ")" vir = Malayalam.ViramaMap[0] ConVir = [ Malayalam.ConsonantMap[14] + vir, Malayalam.ConsonantMap[19] + vir, Malayalam.ConsonantMap[26] + vir, Malayalam.ConsonantMap[27] + vir, Malayalam.SouthConsonantMap[0] + vir, "ഩ്", ] CList = [ Malayalam.ConsonantMap[10:15] + Malayalam.ConsonantMap[24:26] + Malayalam.ConsonantMap[28:29], Malayalam.ConsonantMap[15:20] + Malayalam.ConsonantMap[24:27] + Malayalam.ConsonantMap[28:29], Malayalam.ConsonantMap[25:27], Malayalam.ConsonantMap[20:21] + Malayalam.ConsonantMap[24:26] + Malayalam.ConsonantMap[27:29], Malayalam.SouthConsonantMap[0:1] + Malayalam.ConsonantMap[25:27], Malayalam.ConsonantMap[15:20] + Malayalam.ConsonantMap[24:27] + Malayalam.ConsonantMap[28:29], ] if not reverse: for i in range(len(Chillus)): Strng = re.sub( ListC + GM.VedicSvaras + "(" + ConVir[i] + ")" + "(?![" + "".join(CList[i]) + "])", r"\1\2" + Chillus[i], Strng, ) Strng = re.sub( ListC + GM.VedicSvaras + "(" + ConVir[i] + ")" + "(?=([" + "".join(CList[i]) + "])" + vir + r"\4" + ")", r"\1\2" + Chillus[i], Strng, ) Strng = re.sub("(?<!ത്)ˍ", "", Strng) else: if preserve: for x, y in zip(Chillus, ConVir): Strng = Strng.replace(x, y + "ˍ") else: for x, y in zip(Chillus, ConVir): Strng = Strng.replace(x, y) return Strng def RemoveSchwa(Strng, Target): vir = ( GM.CrunchSymbols(GM.VowelSigns, Target)[0] + GM.CrunchSymbols(GM.VowelSigns, Target)[0] ) ListC = "|".join(GM.CrunchSymbols(GM.Consonants, Target)) ListV = "|".join(GM.CrunchSymbols(GM.Vowels, Target)) ListVS = "|".join(GM.CrunchSymbols(GM.VowelSignsNV, Target)) ListAll = "|".join( GM.CrunchSymbols( GM.Vowels + GM.VowelSigns + GM.Consonants + GM.CombiningSigns, Target ) ) Strng = re.sub( "(" + ListAll + ")" + "(" + ListC + ")" + "(?!" + ListAll + ")", r"\1\2" + vir, Strng, ) Strng = re.sub( "(" + ListAll + ")" + "(?<!" + vir + ")" + "(" + ListC + ")" + "(" + ListC + ")" + "(" + ListVS + ")", r"\1\2" + vir + r"\3\4", Strng, ) return Strng def InsertGeminationSign(Strng, Target): vir = GM.CrunchSymbols(GM.VowelSigns, Target)[0] ConUnAsp = [ GM.CrunchList("ConsonantMap", Target)[x] for x in [ 0, 2, 5, 7, 10, 12, 15, 17, 20, 22, 4, 9, 14, 19, 24, 25, 26, 27, 28, 29, 30, 31, 32, ] ] ConUnAsp = ( ConUnAsp + GM.CrunchList("SouthConsonantMap", Target) + GM.CrunchList("NuktaConsonantMap", Target) ) ConAsp = [ GM.CrunchList("ConsonantMap", Target)[x] for x in [1, 3, 6, 8, 11, 13, 16, 18, 21, 23] ] ConOthrs = [ GM.CrunchList("ConsonantMap", Target)[x] for x in [0, 2, 5, 7, 10, 12, 15, 17, 20, 22, 4, 9, 14, 19, 24] ] Strng = re.sub( "(" + "|".join(ConUnAsp) + ")" + "(" + vir + ")" + r"\1", GM.Gemination[Target] + r"\1", Strng, ) for i in range(len(ConAsp)): Strng = re.sub( "(" + ConUnAsp[i] + ")" + "(" + vir + ")" + "(" + ConAsp[i] + ")", GM.Gemination[Target] + r"\3", Strng, ) return Strng def ReverseGeminationSign(Strng, Target): vir = GM.CrunchSymbols(GM.VowelSigns, Target)[0] ConUnAsp = [ GM.CrunchList("ConsonantMap", Target)[x] for x in [ 0, 2, 5, 7, 10, 12, 15, 17, 20, 22, 4, 9, 14, 19, 24, 25, 26, 27, 28, 29, 30, 31, 32, ] ] ConUnAsp = ( ConUnAsp + GM.CrunchList("SouthConsonantMap", Target) + GM.CrunchList("NuktaConsonantMap", Target) ) ConAsp = [ GM.CrunchList("ConsonantMap", Target)[x] for x in [1, 3, 6, 8, 11, 13, 16, 18, 21, 23] ] ConOthrs = [ GM.CrunchList("ConsonantMap", Target)[x] for x in [0, 2, 5, 7, 10, 12, 15, 17, 20, 22, 4, 9, 14, 19, 24] ] Strng = re.sub( "(" + GM.Gemination[Target] + ")" + "(" + "|".join(ConUnAsp) + ")", r"\2" + vir + r"\2", Strng, ) for i in range(len(ConAsp)): Strng = re.sub( "(" + GM.Gemination[Target] + ")" + "(" + ConAsp[i] + ")", ConUnAsp[i] + vir + r"\2", Strng, ) return Strng def GurmukhiTippiBindu(Strng): Bindi = Gurmukhi.AyogavahaMap[1] Tippi = "\u0A70" ListTippi = "|".join( GM.CrunchSymbols(GM.Consonants, "Gurmukhi") + [Gurmukhi.VowelMap[x] for x in [0, 2, 3, 4]] + [Gurmukhi.VowelSignMap[1]] + [Gurmukhi.VowelSignMap[3]] + [Gurmukhi.VowelSignMap[4]] ) Char = "|".join( GM.CrunchSymbols(GM.Consonants, "Gurmukhi") + GM.CrunchSymbols(GM.Vowels, "Gurmukhi") ) Strng = re.sub( "(" + Gurmukhi.VowelSignMap[4] + ")" + Bindi + "(?!" + Char + ")", r"\1" + Tippi, Strng, ) Strng = re.sub("(" + ListTippi + ")" + "(" + Bindi + ")", r"\1" + Tippi, Strng) return Strng def GurmukhiTippiGemination(Strng): n = Gurmukhi.ConsonantMap[19] m = Gurmukhi.ConsonantMap[24] vir = Gurmukhi.ViramaMap[0] Addak = "ੱ" Tippi = "\u0A70" Strng = Strng.replace(Addak + m, Tippi + m) Strng = Strng.replace(Addak + n, Tippi + n) return Strng def BengaliConjunctVB(Strng): Strng = Strng.replace("\u09CD\u200C\u09AC", "\u09CD\u09AC") Strng = khandatabatova(Strng) return Strng def khandatabatova(Strng): Strng = Strng.replace("ৎব", "ত্ব") Strng = Strng.replace("ৎ\u200Cব", "ত্ব") return Strng def BengaliRaBa(Strng): Strng = ( Strng.replace("ব", "ৰ") .replace("ভ়", "ব") .replace("ৰু", "ৰ‌ু") .replace("ৰূ", "ৰ‌ূ") ) Strng = Strng.replace("\u09CD\u09F0", "\u09CD\u200C\u09F0") Strng = re.sub( "(\u09F0)(\u09CD)([\u09B0\u09AF])", r"\1" + "\u200D" + r"\2\3", Strng ) Strng = re.sub("(\u09F0)(\u09CD)", r"\1\2" + "\u200C", Strng) Strng = Strng.replace("র্‌ৰ", "ৰ্ৰ") return Strng def BengaliIntervocalicDDA(Strng): Target = "Bengali" ListC = "|".join( GM.CrunchSymbols(GM.Characters, Target) + [GM.CrunchList("SignMap", Target)[0]] + ["ৰ"] ) replacements = [("ড", "ড়"), ("ঢ", "ঢ়")] for x, y in replacements: Strng = re.sub("(" + ListC + ")" + GM.VedicSvaras + x, r"\1\2" + y, Strng) return Strng def KhandaTa(Strng, Target, reverse=False): ta = GM.CrunchSymbols(GM.Consonants, Target)[15] khandata = "\u09CE" vir = GM.CrunchSymbols(GM.VowelSigns, Target)[0] ListC = "|".join( [ GM.CrunchList("ConsonantMap", Target)[x] for x in [15, 16, 19, 27, 24, 25, 26, 28] ] + ["ৰ", "য়"] ) if not reverse: Strng = re.sub( "(?<!" + vir + ")" + "(" + ta + ")" + "(" + vir + ")" + "(?!" + ListC + ")", khandata, Strng, ) Strng = Strng.replace("ৎˍ", "ৎ") else: Strng = Strng.replace(khandata, ta + vir) return Strng def NasalToAnusvara(Strng, Target): ListN = [GM.CrunchSymbols(GM.Consonants, Target)[x] for x in [4, 9, 14, 19, 24]] ListC = [ "|".join(GM.CrunchList("ConsonantMap", Target)[0:4]), "|".join(GM.CrunchList("ConsonantMap", Target)[5:9]), "|".join(GM.CrunchList("ConsonantMap", Target)[10:14]), "|".join(GM.CrunchList("ConsonantMap", Target)[15:19]), "|".join(GM.CrunchList("ConsonantMap", Target)[20:24]), ] ListCAll = "(" + "|".join(GM.CrunchSymbols(GM.Characters, Target)) + ")" vir = GM.CrunchSymbols(GM.VowelSigns, Target)[0] Anu = GM.CrunchSymbols(GM.CombiningSigns, Target)[1] for i in range(len(ListN)): Strng = re.sub( ListCAll + GM.VedicSvaras + "(?<!" + vir + ")" + "(" + ListN[i] + ")" + "(" + vir + ")" + "(" + ListC[i] + ")", r"\1\2" + Anu + r"\5", Strng, ) Strng = re.sub( ListCAll + GM.VedicSvaras + "(?<!" + vir + ")" + "(" + ListN[i] + ")" + "(" + vir + ")" + "(" + ListC[i] + ")", r"\1\2" + Anu + r"\5", Strng, ) for svara in GM.VedicSvarasList: Strng = Strng.replace(svara + Anu, Anu + svara) return Strng def AnusvaraToNasal(Strng, Target): ListN = [GM.CrunchSymbols(GM.Consonants, Target)[x] for x in [4, 9, 14, 19, 24]] ListC = [ "|".join(GM.CrunchList("ConsonantMap", Target)[0:4]), "|".join(GM.CrunchList("ConsonantMap", Target)[5:9]), "|".join(GM.CrunchList("ConsonantMap", Target)[10:14]), "|".join(GM.CrunchList("ConsonantMap", Target)[15:19]), "|".join(GM.CrunchList("ConsonantMap", Target)[20:24]), ] vir = GM.CrunchSymbols(GM.VowelSigns, Target)[0] Anu = GM.CrunchSymbols(GM.CombiningSigns, Target)[1] for i in range(len(ListN)): Strng = re.sub( "(" + Anu + ")" + GM.VedicSvaras + "(" + ListC[i] + ")", ListN[i] + vir + r"\2\3", Strng, ) if Target == "Tamil": Strng = re.sub( "(ம்)" + GM.VedicSvaras + "(ʼ)" + "(" + ListC[i] + ")", ListN[i] + vir + r"\2\4", Strng, ) return Strng def MalayalamAnusvaraNasal(Strng): ListNNasal = [Malayalam.ConsonantMap[x] for x in [4, 9, 14, 19, 24]] ListCNasal = [ "|".join(Malayalam.ConsonantMap[0:1]), "|".join(Malayalam.ConsonantMap[5:8]), "|".join(Malayalam.ConsonantMap[10:14]), "|".join(Malayalam.ConsonantMap[15:19]), "|".join(Malayalam.ConsonantMap[20:21]), ] ListNAnu = [Malayalam.ConsonantMap[x] for x in [4, 24]] ListCAnu = [ "|".join(Malayalam.ConsonantMap[1:4]), "|".join(Malayalam.ConsonantMap[21:24]), ] vir = Malayalam.ViramaMap[0] Anu = Malayalam.AyogavahaMap[1] Chillus = ["\u0D7A", "\u0D7B", "\u0D7C", "\u0D7D", "\u0D7E", "ഩ‍്"] for i in range(len(ListNNasal)): Strng = re.sub( "(" + Anu + ")" + "(" + ListCNasal[i] + ")", ListNNasal[i] + vir + r"\2", Strng, ) for i in range(len(ListNAnu)): Strng = re.sub( "(?<![" + ".".join(Chillus) + "])" + "(" + ListNAnu[i] + ")" + "(" + vir + ")" + "(" + ListCAnu[i] + ")", Anu + r"\3", Strng, ) return Strng def MToAnusvara(Strng, Target): M = ( GM.CrunchList("ConsonantMap", Target)[24] + GM.CrunchList("ViramaMap", Target)[0] ) vir = GM.CrunchList("ViramaMap", Target)[0] Anusvara = GM.CrunchList("AyogavahaMap", Target)[1] ListC = "|".join(GM.CrunchSymbols(GM.Characters, Target)) Chillus = "|".join([vir, "\u0D7A", "\u0D7B", "\u0D7C", "\u0D7D", "\u0D7E"]) ListCAll = "(" + "|".join(GM.CrunchSymbols(GM.Characters, Target)) + ")" Strng = re.sub( ListCAll + GM.VedicSvaras + "(?<!" + vir + ")" + "(" + M + ")" + "(?!" + ListC + ")", r"\1\2" + Anusvara, Strng, ) for svara in GM.VedicSvarasList: Strng = Strng.replace(svara + Anusvara, Anusvara + svara) return Strng def OriyaYYA(Strng): return YYAEverywhere(Strng, "Oriya") def BengaliYYA(Strng): return YYAEverywhere(Strng, "Bengali") def YYAEverywhere(Strng, Target): Ya = GM.CrunchList("ConsonantMap", Target)[25] YYa = GM.CrunchList("NuktaConsonantMap", Target)[7] Strng = Strng.replace(Ya, YYa) return Strng def YaToYYa(Strng, Target): YYa = GM.CrunchList("NuktaConsonantMap", Target)[7] ListC = "|".join( GM.CrunchSymbols(GM.Characters, Target) + [GM.CrunchList("SignMap", Target)[0]] + ["ৰ"] ) ListS = "(" + "|".join(GM.CrunchSymbols(GM.VowelSignsNV, Target)) + ")" Ya = GM.CrunchList("ConsonantMap", Target)[25] vir = GM.CrunchSymbols(GM.VowelSigns, Target)[0] ListVarga = "|".join(GM.CrunchList("ConsonantMap", Target)[0:25]) if Target in ["Assamese", "Bengali", "Oriya", "Chakma"]: Strng = re.sub("(" + ListC + ")" + GM.VedicSvaras + Ya, r"\1\2" + YYa, Strng) if Target in ["Assamese", "Bengali"]: Strng = Strng.replace(vir + YYa, vir + Ya) if Target == "Chakma": Strng = Strng.replace("𑄠𑄡", "𑄠𑄠") Strng = Strng.replace(vir + YYa, "\U00011133" + YYa) return Strng def VaToBa(Strng, Target): va = GM.CrunchSymbols(GM.Consonants, Target)[28] ba = GM.CrunchSymbols(GM.Consonants, Target)[22] if Target == "Bengali": pass Strng = Strng.replace(va, ba) return Strng def tbadiff(Strng, Target): Strng = Strng.replace("ৎব", "ত্ব") return Strng def RetainDandasIndic(Strng, Target, reverse=False): Dandas = GM.CrunchList("SignMap", Target)[1:3] if not reverse: Strng = Strng.replace("..", Dandas[1]) Strng = Strng.replace(".", Dandas[0]) else: Strng = Strng.replace(Dandas[0], ".") Strng = Strng.replace(Dandas[1], "..") return Strng def RetainIndicNumerals(Strng, Target, reverse=False): NativeNumerals = GM.CrunchList("NumeralMap", Target) ArabicNumerals = GM.CrunchList("NumeralMap", "ISO") if not reverse: for x, y in zip(ArabicNumerals, NativeNumerals): Strng = re.sub("(?<!h)" + x, y, Strng) else: for x, y in zip(NativeNumerals, ArabicNumerals): Strng = Strng.replace(x, y) return Strng def RetainRomanNumerals(Strng, Target, reverse=False): NativeNumerals = GM.CrunchList("NumeralMap", Target) ArabicNumerals = GM.CrunchList("NumeralMap", "ISO") if not reverse: for y, x in zip(ArabicNumerals, NativeNumerals): Strng = re.sub("(?<!h)" + x, y, Strng) else: for y, x in zip(NativeNumerals, ArabicNumerals): Strng = Strng.replace(x, y) return Strng def RetainTeluguDanda(Strng): return RetainDandasIndic(Strng, "Telugu") def RetainTeluguNumerals(Strng): return RetainIndicNumerals(Strng, "Telugu") def RetainTamilDanda(Strng): return RetainDandasIndic(Strng, "Tamil") def RetainTamilNumerals(Strng): return RetainIndicNumerals(Strng, "Tamil") def RetainKannadaDanda(Strng): return RetainDandasIndic(Strng, "Kannada") def RetainKannadaNumerals(Strng): return RetainIndicNumerals(Strng, "Kannada") def RetainMalayalamDanda(Strng): return RetainDandasIndic(Strng, "Malayalam") def RetainMalayalamNumerals(Strng): return RetainIndicNumerals(Strng, "Malayalam") def RetainGujaratiDanda(Strng): return RetainDandasIndic(Strng, "Gujarati") def RetainGurmukhiNumerals(Strng): return RetainIndicNumerals(Strng, "Gurmukhi") def SundaneseRemoveHistoric(Strng): Strng = Strng.replace("᮪ᮻ", "ᮢᮩ") Strng = Strng.replace("᮪ᮼ", "ᮣᮩ") Strng = Strng.replace("ᮻ", "ᮛᮩ") Strng = Strng.replace("ᮼ", "ᮜᮩ") Strng = Strng.replace("\u1BBD", "\u1B98") return Strng def OriyaVa(Strng): va = Oriya.ConsonantMap[28] OriyaVa = "\u0B2C" Strng = re.sub("(?<!୍)" + va, OriyaVa, Strng) return Strng def RemoveDiacritics(Strng): for x in GM.DiacriticsRemovable: Strng = Strng.replace(x, "") return Strng def RemoveDiacriticsTamil(Strng): for x in GM.DiacriticsRemovableTamil: Strng = Strng.replace(x, "") return Strng def TamilSubScript(Strng): SuperScript = ["\u00B9", "\u00B2", "\u00B3", "\u2074"] SubScript = ["\u2081", "\u2082", "\u2083", "\u2084"] for x, y in zip(SuperScript, SubScript): Strng = Strng.replace(x, y) return Strng def TamilAddFirstVarga(Strng): CM = GM.CrunchList("ConsonantMap", "Tamil") ConUnVoiced = "|".join([CM[x] for x in [0, 5, 10, 15, 20]]) SuperScript = "|".join(["\u00B2", "\u00B3", "\u2074"]) Strng = re.sub( "(" + ConUnVoiced + ")" + "(?!" + SuperScript + ")", r"\1" + "\u00B9", Strng ) return Strng def SaurashtraHaru(Strng): ListC = "|".join([Saurashtra.ConsonantMap[x] for x in [19, 24, 26, 27]]) vir = Saurashtra.ViramaMap[0] ha = Saurashtra.ConsonantMap[32] Strng = re.sub("(" + ListC + ")" + vir + ha, r"\1" + "\uA8B4", Strng) return Strng def SinhalaDefaultConjuncts(Strng): vir = Sinhala.ViramaMap[0] YR = "|".join(Sinhala.ConsonantMap[25:27]) Strng = re.sub("(" + vir + ")" + "(" + YR + ")", r"\1" + "\u200D" + r"\2", Strng) Strng = re.sub( "(" + YR[2] + ")" + "(" + vir + ")" + "(" + "\u200D" + ")" + "(" + YR[0] + ")", r"\1\3\2\3\4", Strng, ) Strng = Strng.replace( Sinhala.ConsonantMap[7] + Sinhala.ViramaMap[0] + Sinhala.ConsonantMap[9], "\u0DA5", ) Strng = Strng.replace( Sinhala.ConsonantMap[0] + vir + Sinhala.ConsonantMap[30], Sinhala.ConsonantMap[0] + vir + "\u200D" + Sinhala.ConsonantMap[30], ) Strng = Strng.replace("ර‍්‍ය", "ර්ය") Strng = Strng.replace("ර්‍ර", "ර්ර") return Strng def IASTPali(Strng): Strng = Strng.replace("l̤", "ḷ") return Strng def CyrillicPali(Strng): Strng = Strng.replace("л̤", "л̣") return Strng def SinhalaConjuncts(Strng): ListC = Sinhala.ConsonantMap + [Sinhala.SouthConsonantMap[0]] vir = Sinhala.ViramaMap[0] ZWJ = "\u200D" conjoining = [ (0, 28), (2, 18), (9, 5), (10, 11), (15, 16), (15, 28), (17, 18), (17, 28), (19, 16), (19, 17), (19, 18), (19, 28), ] for x, y in conjoining: Strng = Strng.replace( ListC[x] + vir + ListC[y], ListC[x] + vir + ZWJ + ListC[y] ) for x in ListC: Strng = Strng.replace(ListC[26] + vir + x, ListC[26] + vir + ZWJ + x) for x in ListC: for y in ListC: Strng = Strng.replace(x + vir + y, x + ZWJ + vir + y) Strng = Strng.replace("ර‍්‍ය", "ර්‍ය") return Strng def SinhalaPali(Strng, reverse=False): EOLong = ( Sinhala.VowelMap[10:11] + Sinhala.VowelMap[12:13] + Sinhala.VowelSignMap[9:10] + Sinhala.VowelSignMap[11:12] ) EOShort = Sinhala.SouthVowelMap + Sinhala.SouthVowelSignMap for x, y in zip(EOLong, EOShort): if not reverse: Strng = Strng.replace(x, y) else: Strng = Strng.replace(y, x) return Strng def UrduAlternateUU(Strng): Strng = Strng.replace("\\u064F\\u0648", "\u0648\u0657") return Strng def TibetanNada(Strng): Strng = Strng.replace("\u0F83", "\u0F82") return Strng def TibetanTsheg(Strng): Strng = Strng.replace("\u0F0B", " ") return Strng def TibetanRemoveVirama(Strng): Strng = Strng.replace(Tibetan.ViramaMap[0], "") return Strng def TibetanRemoveBa(Strng): Strng = VaToBa(Strng, "Tibetan") Strng = Strng.replace("ཪྺ", "རྦ") Strng = Strng.replace("བྺ", "བྦ") Strng = Strng.replace("ྦྺ", "ྦྦ") return Strng def ThaiLaoTranscription( Strng, Script, shortA, shortAconj, reverse=False, anusvaraChange=True ): Strng = Strng.replace("\u02BD", "") cons = "|".join( GM.CrunchSymbols(GM.Consonants, Script) + GM.CrunchList("VowelMap", Script)[0:1] ) if Script == "Thai": cons = "|".join( GM.CrunchSymbols(GM.Consonants, Script) + GM.CrunchList("VowelMap", Script)[0:1] + ["ฮ", "บ", "ฝ", "ด"] ) if Script == "Lao": cons = "|".join( GM.CrunchSymbols(GM.Consonants, Script) + GM.CrunchList("VowelMap", Script)[0:1] + ["ດ", "ບ", "ຟ"] ) consnA = cons[:-2] listVS = "|".join(GM.CrunchSymbols(GM.VowelSignsNV, Script)) vir = GM.CrunchList("ViramaMap", Script)[0] AIUVir = "".join(GM.CrunchList("VowelSignMap", Script)[0:5] + [vir]) EAIO = "".join( GM.CrunchList("VowelSignMap", Script)[9:12] + GM.CrunchList("SinhalaVowelSignMap", Script)[:] ) Anu = GM.CrunchList("AyogavahaMap", Script)[1] ng = GM.CrunchList("ConsonantMap", Script)[4] vowA = GM.CrunchList("VowelMap", Script)[0] if anusvaraChange: Strng = AnusvaraToNasal(Strng, Script) if not reverse: if Script == "Thai": Strng = re.sub( "([" + EAIO + "])" + "(" + cons + ")" + "(" + vir + ")", r"\2\3\1", Strng, ) Strng = Strng.replace("\u0E33", "\u0E32\u0E4D").replace( "\u0E36", "\u0E34\u0E4D" ) if Script == "LaoPali": Strng = Strng.replace("ຳ", "າໍ") if anusvaraChange: Strng = Strng.replace(Anu, ng + vir) Strng = re.sub( "(?<![" + EAIO + "])" + "(" + cons + ")" + "(?![" + AIUVir + "])", r"\1" + shortA, Strng, ) Strng = re.sub( "(" + shortA + ")" + "(?=(" + cons + ")" + "(" + vir + "))", shortAconj, Strng, ) Strng = Strng.replace(shortAconj + "ห" + vir, "ห" + vir) Strng = re.sub( "(" + shortAconj + ")" + "(.)(" + vir + ")([รล])", shortA + r"\2\3\4", Strng ) consswap = "|".join(GM.CrunchSymbols(GM.Consonants, "Thai")) Strng = re.sub( "(" + consswap + ")" + "(" + vir + ")" + "([" + EAIO + "])" + "([รล])", r"\3\1\2\4", Strng, ) Strng = re.sub(shortAconj + "([" + EAIO + "])", shortA + r"\1", Strng) Strng = Strng.replace(vir, "") Strng = Strng.replace(shortAconj + "ร", "รร") else: consOnly = "|".join(GM.CrunchSymbols(GM.Consonants, Script)) aVow = GM.CrunchList("VowelMap", Script)[0] Strng = re.sub( "(" + consnA + ")" + "(?!" + listVS + "|" + shortA + "|" + shortAconj + ")", r"\1" + vir, Strng, ) if Script == "Lao": Strng = re.sub( "(?<!ໂ)" + "(?<!ແ)" + "(?<!ເ)" + "(" + aVow + ")" + "(?<!ເ)" + shortA + "|" + shortAconj, r"\1", Strng, ) Strng = re.sub( "(" + consOnly + ")" + "(?<!າ|ໂ|ແ|ເ)" + shortA + "|" + shortAconj, r"\1", Strng, ) Strng = Strng.replace("຺ຳ", "ຳ") else: Strng = re.sub( "(?<!โ)" + "(?<!แ)" + "(?<!เ)" + "(" + aVow + ")" + "(?<!เ)" + shortA + "|" + shortAconj, r"\1", Strng, ) Strng = re.sub( "(" + consOnly + ")" + "(?<!า|โ|แ|เ)" + shortA + "|" + shortAconj, r"\1", Strng, ) Strng = re.sub(vir + "รฺรฺ", "รฺ", Strng) Strng = re.sub(vir + "หฺ", "หฺ", Strng) return Strng def LaoTranscription(Strng): Strng = CF.LaoPaliTranscribe(Strng) Strng = Strng.replace("ະ໌", "໌") return Strng def ThaiVisargaSaraA(Strng): Strng = Strng.replace("ห์", "ะ") return Strng def ThamTallADisable(Strng): Strng = Strng.replace("\u1A64", "\u1A63") return Strng def ThamTallAOthers(Strng): TallACons = "|".join(["ᨧ", "ᨻ", "ᩁ", "ᨽ"]) Strng = FixTallA(Strng, TallACons) return Strng def LaoPhonetic(Strng): Strng = re.sub("(\u0EBA)([ໂເໄ]?)([ຍຣລວຫ])", "\u035C" + r"\2\3", Strng) Strng = re.sub( "([ຍຣລວຫ])" + "\u035C" + "([ໂເໄ]?)" + r"\1", r"\1" + "\u0EBA" + r"\2\1", Strng ) Strng = Strng.replace("ຫ\u0EBA", "ຫ\u035C") Strng = re.sub("([ຍຣລວຫ])" + "\u035C" + r"\1", r"\1" + "\u0EBA" + r"\1", Strng) Strng = LaoTranscription(Strng) Strng = Strng.replace("\u0EB0\u035C", "\u035C") Strng = Strng.replace("ງ", "ງໍ") Strng = Strng.replace("ທ", "ດ") Strng = Strng.replace("ພ", "ບ") return Strng def RephaDoubleMalayalam(Strng): repha = "[ർൎ]" Target = "Malayalam" vir = GM.CrunchSymbols(GM.VowelSigns, Target)[0] ConUnAsp = [ GM.CrunchList("ConsonantMap", Target)[x] for x in [0, 2, 5, 7, 10, 12, 15, 17, 20, 22, 4, 9, 14, 19, 24, 25, 28, 29, 31] ] ConUnAsp = ConUnAsp + ["ള"] ConAsp = [ GM.CrunchList("ConsonantMap", Target)[x] for x in [1, 3, 6, 8, 11, 13, 16, 18, 21] ] Strng = re.sub( "(" + repha + ")" + "(" + "|".join(ConUnAsp) + ")", r"\1\2" + vir + r"\2", Strng ) for i in range(len(ConAsp)): Strng = re.sub( "(" + repha + ")" + "(" + ConAsp[i] + ")", r"\1" + ConUnAsp[i] + vir + r"\2", Strng, ) return Strng def DograShaKha(Strng): Strng = Strng.replace("𑠨", "𑠋") return Strng def ThamShiftMaiKangLai(Strng): Strng = re.sub("(\u1A58)(.)", r"\2\1", Strng) ListV = "(" + "|".join(GM.CrunchSymbols(GM.VowelSigns, "TaiTham") + ["ᩤ"]) + ")" Strng = re.sub("(\u1A58)([\u1A55\u1A56])", r"\2\1", Strng) Strng = re.sub("(\u1A58)(\u1A60.)", r"\2\1", Strng) Strng = re.sub("(\u1A58)" + ListV, r"\2\1", Strng) Strng = re.sub("(\u1A58)" + ListV, r"\2\1", Strng) return Strng def FixTallA(Strng, TallACons): ListC = "|".join(GM.CrunchSymbols(GM.Consonants, "TaiTham")) Sub = ["\u1A55", "\u1A56"] E = "ᩮ" AA = "ᩣ" Strng = re.sub( "(?<!᩠)(" + TallACons + ")" + "(" + E + "?)" + AA, r"\1\2" + "ᩤ", Strng ) Strng = re.sub( "(" + TallACons + ")(᩠)(" + ListC + ")" + "(" + E + "?)" + AA, r"\1\2\3\4" + "ᩤ", Strng, ) Strng = re.sub( "(" + TallACons + ")(᩠)(" + ListC + ")" + "(᩠)(" + ListC + ")" + "(" + E + "?)" + AA, r"\1\2\3\4\5\6" + "ᩤ", Strng, ) Strng = re.sub( "(" + TallACons + ")" + "(" + "|".join(Sub) + ")" + "(" + E + "?)" + AA, r"\1\2\3" + "ᩤ", Strng, ) reverseSub = "([" + "".join(["ᨥ", "ᨫ", "ᨬ", "ᨰ", "ᨸ", "ᩈ", "ᨿ", "ᩇ", "ᨹ"]) + "])" Strng = re.sub( "(\u1A60)" + reverseSub + "(\u1A6E\u1A64)", r"\1\2" + "\u1A6E\u1A63", Strng ) Strng = re.sub("(\u1A60)" + reverseSub + "(\u1A64)", r"\1\2" + "\u1A63", Strng) return Strng def ThaiSajjhayaOrthography(Strng, Script="Thai"): Strng = CF.ThaiReverseVowelSigns(Strng, True) Strng = CF.ThaiDigraphConjuncts(Strng, True) Strng = CF.ThaiReverseVowelSigns(Strng) if Script == "Thai": Strng = Strng.replace("ฺ", "์") if Script == "LaoPali": Strng = Strng.replace("຺", "์") cons = "|".join( GM.CrunchSymbols(GM.Consonants, Script) + GM.CrunchList("VowelMap", Script)[0:1] ) EAIO = "".join( GM.CrunchList("VowelSignMap", Script)[9:12] + GM.CrunchList("SinhalaVowelSignMap", Script)[:] ) Strng = re.sub( "(?<![" + EAIO + "])" + "(" + cons + ")" + "(" + cons + ")" + "(์)", r"\1" + "ั" + r"\2\3", Strng, ) if Script == "Thai": cons_others = "([ยรลวศษสหฬ])" if Script == "LaoPali": cons_others = "([ຍຣລວຨຩສຫຬ])" Strng = re.sub( "(?<![" + EAIO + "])" + "(" + cons + ")" + "(" + cons + ")" + "(์)", r"\1" + "ั" + r"\2\3", Strng, ) Strng = re.sub( "(" + cons + ")" + "(์)" + "([" + EAIO + "]?)" + cons_others, r"\1" + "๎" + r"\3\4", Strng, ) Strng = re.sub( cons_others + "(์)" + "([" + EAIO + "]?)" + "(" + cons + ")", r"\1" + "๎" + r"\3\4", Strng, ) Strng = re.sub( cons_others + "(๎)" + "([" + EAIO + "]?)" + r"\1", r"\1" + "์" + r"\3\1", Strng ) Strng = re.sub( "(" + cons + ")" + "(๎)" + "([" + EAIO + "])" + "(" + cons + ")", r"\3\1\2\4", Strng, ) if Script == "Thai": Strng = Strng.replace("ง์", "ง") Strng = re.sub("(\u0E31)(.)(\u0E4E)", r"\2\3", Strng) if Script == "LaoPali": Strng = Strng.replace("ั", "ັ") Strng = Strng.replace("ງ์", "ງ") Strng = Strng.replace("์", "໌") Strng = re.sub("(\u0EB1)(.)(\u0E4E)", r"\2\3", Strng) Strng = Strng.replace("\u0E4E", "\u0ECE") return Strng def ThaiTranscription(Strng, anusvaraChange=True): Strng = CF.ThaiReverseVowelSigns(Strng, True) Strng = CF.ThaiDigraphConjuncts(Strng, True) Strng = CF.ThaiReverseVowelSigns(Strng) Strng = ThaiLaoTranscription( Strng, "Thai", "\u0E30", "\u0E31", anusvaraChange=anusvaraChange ) Strng = Strng.replace("ะ์", "์") Strng = Strng.replace("ะงัง", "\u0E31งํ") return Strng def AvestanConventions(Strng): extraCons = ["\U00010B33", "\U00010B32", "\U00010B1D", "\U00010B12", "𐬣", "𐬝"] ListC = "|".join(GM.CrunchSymbols(GM.Consonants, "Avestan") + extraCons) ListV = "|".join(GM.CrunchSymbols(GM.Vowels, "Avestan")) ListA = "|".join( GM.CrunchSymbols(GM.Vowels + GM.Consonants, "Avestan") + extraCons + ["𐬄", "𐬅"] ) ii = Avestan.VowelMap[2] * 2 uu = Avestan.VowelMap[4] * 2 i = Avestan.VowelMap[2] a = Avestan.VowelMap[0] kha = Avestan.ConsonantMap[1] nga = Avestan.ConsonantMap[4] ya = Avestan.ConsonantMap[25] va = Avestan.ConsonantMap[28] ta = Avestan.ConsonantMap[15] tha = Avestan.ConsonantMap[16] dha = Avestan.ConsonantMap[18] na = Avestan.ConsonantMap[19] ma = Avestan.ConsonantMap[24] kb = "|".join([Avestan.ConsonantMap[0], Avestan.ConsonantMap[22]]) nna = Avestan.ConsonantMap[14] sha = Avestan.ConsonantMap[29] VelarDental = "|".join(Avestan.ConsonantMap[0:4] + Avestan.ConsonantMap[15:19]) Strng = Strng.replace(nga + i, "𐬣" + i) Strng = re.sub(a + "([" + na + ma + "])" + "(?!" + ListA + ")", "𐬆" + r"\1", Strng) Strng = re.sub("(" + na + ")" + "(" + VelarDental + ")", nna + r"\2", Strng) Strng = re.sub("(" + kha + ")" + "(?=" + ii + ")", "\U00010B12", Strng) Strng = re.sub("(" + sha + ")" + "(?=" + ii + ")", "\U00010B33", Strng) Strng = re.sub("(" + tha + "|" + dha + ")" + "(" + uu + ")", r"\1" "𐬡", Strng) Strng = re.sub( "(" + ta + ")" + "(?!" + "((" + ListV + ")" + "|" + "(" + ListC + "))" + ")", "\U00010B1D", Strng, ) Strng = re.sub("(" + ta + ")" + "(?=" + "(" + kb + ")" + ")", "\U00010B1D", Strng) return Strng def TaiThamO(Strng): Strng = Strng.replace("\u1A6E\u1A63", "\u1A70") return Strng def TaiThamHighNga(Strng): Strng = Strng.replace("\u1A58", "\u1A59") return Strng def TaiThamMoveNnga(Strng): Strng = re.sub("(.)(\u1A58|\u1A50)", r"\2\1", Strng) return Strng def UrduRemoveShortVowels(Strng): ShortVowels = ["\u0652", "\u064E", "\u0650", "\u064F"] for vow in ShortVowels: Strng = Strng.replace(vow, "") return Strng def PhagsPaRearrange(Strng, Target): vir = GM.CrunchList("ViramaMap", Target)[0] ListC = "|".join(GM.CrunchSymbols(GM.Consonants, Target)) ListV = "|".join(GM.CrunchSymbols(GM.Vowels, Target)) ListVS = "|".join(GM.CrunchSymbols(GM.VowelSignsNV, Target)) Strng = re.sub( "(?<!( |" + vir + "))" + "(" + ListC + ")" + "(?= )", r"\2" + vir, Strng ) Strng = Strng.replace(" ", "").replace("᠂", " ").replace("᠃", " ") return Strng def DevanagariAVowels(Strng): oldVowels = Devanagari.VowelMap[2:12] + Devanagari.SouthVowelMap[:1] a = Devanagari.VowelMap[0] newAVowels = [ a + x for x in Devanagari.VowelSignMap[1:11] + Devanagari.SouthVowelSignMap[:1] ] for x, y in zip(oldVowels, newAVowels): Strng = Strng.replace(x, y) return Strng def AnusvaraToNasalIPA(Strng): Strng = Strng.replace("̃k", "ŋk") Strng = Strng.replace("̃g", "ŋg") Strng = Strng.replace("̃c", "ɲc") Strng = Strng.replace("̃j", "ɲj") Strng = Strng.replace("̃t̪", "n̪t̪") Strng = Strng.replace("̃d̪", "n̪d̪") Strng = Strng.replace("̃ɖ", "ɳɖ") Strng = Strng.replace("̃ʈ", "ɳʈ") Strng = Strng.replace("̃ːk", "ːŋk") Strng = Strng.replace("̃ːg", "ːŋg") Strng = Strng.replace("̃ːc", "ːɲc") Strng = Strng.replace("̃ːj", "ːɲj") Strng = Strng.replace("̃ːt̪", "ːn̪t̪") Strng = Strng.replace("̃ːd̪", "ːn̪d̪") Strng = Strng.replace("̃ːɖ", "ːɳɖ") Strng = Strng.replace("̃ːʈ", "ːɳʈ") return Strng def IPARemoveCross(Strng): Strng = Strng.replace("×", "") return Strng def ChakmaAVowels(Strng): return Strng def ZanabazarSquareContextual(Strng): yrlv = ZanabazarSquare.ConsonantMap[25:29] yrlv_sub = ["\U00011A3B", "\U00011A3C", "\U00011A3D", "\U00011A3E"] for x, y in zip(yrlv, yrlv_sub): Strng = Strng.replace("\U00011A47" + x, y) Strng = re.sub("(?<!\U00011A47)" + yrlv[1] + "\U00011A47", "\U00011A3A", Strng) return Strng def ZanabazarSquareAiAu(Strng): Strng = Strng.replace("\U00011A04\U00011A0A", "\U00011A07") Strng = Strng.replace("\U00011A06\U00011A0A", "\U00011A08") return Strng def ZanabazarSquareMongolianFinal(Strng): Strng = Strng.replace(ZanabazarSquare.ViramaMap[0], "\U00011A33") return Strng def TamilRemoveApostrophe(Strng): Strng = Strng.replace("ʼ", "") return Strng def TamilRemoveNumbers(Strng): numerals = ["²", "³", "⁴", "₂", "₃", "₄"] for num in numerals: Strng = Strng.replace(num, "") return Strng def NewaSpecialTa(Strng): Strng = Strng.replace("𑐟𑑂", "𑐟𑑂‍") return Strng def TamilDisableSHA(Strng): Strng = Strng.replace("ஶ", "ஷ²") Strng = CF.ShiftDiacritics(Strng, "Tamil") return Strng def swapEe(Strng): Strng = Strng.replace("e", "X@X@") Strng = Strng.replace("e", "E") Strng = Strng.replace("X@X@") return Strng def capitalizeSentence(Strng): Strng = re.sub( r"(\A\w)|" + "(?<!\.\w)([\.?!]\s*)\w|" + "\w(?:\.\w)|" + "(\n)\w|" + "(\n(\"|\“|'|\‘))\w|" + "(?<=\w\.)\w", lambda x: x.group().upper(), Strng, ) Strng = re.sub(r"(@)(.)", lambda x: x.groups()[1].upper(), Strng) return Strng def NewaDisableRepha(Strng): Strng = Strng.replace("𑐬𑑂", "𑐬𑑂\u200D") return Strng
PypiClean
/MoonNectar-0.6.0.tar.gz/MoonNectar-0.6.0/mnectar/library/view.py
from __future__ import annotations import logging _logger = logging.getLogger(__name__) import collections import functools import inspect import random import weakref from dataclasses import dataclass, field from typing import Iterable from mnectar.config import Setting from mnectar.registry import Registry, Plugin from mnectar.util.signal import Signal class View(Plugin, registry=Registry.Playlist): """ A Read-Only view of a list of mrl records. This class takes any list of mrl records and acts as a read-only container for indexing into and iterating over the list of records. The record can be of any Mapping data type (a dictionary-like object) so long as it provides an 'mrl' attribute so that records can be easily searched by mrl. This class is designed to work as a base-class for other read-only views which modify the apparent content of the list without modifying the actual underlying data object. Each view based on this class should have unique methods so that multiple views may be combined to provide compound functionality. Recommended subclass implementation: * Create the subclass * Define an 'action' method (e.g. 'sort') * Mark the action method using the decorator ``@View.action`` * In the action method: * Prepare a generator (or any Iterable type) * Each item must be a size 2 tuple: (orignal_content_index, original_content_value) * The original content *MUST* be accessed using ``self.all`` * Update the content mapping: * ``self._populate_map(view_generator)`` * Update the internal data mapping by calling `self._populate_map(view_generator)` Example: >>> class Sorted(View, registry=Registry.Playlist): ... @View.action ... def sort(self): ... self._populate_map(sorted( ... enumerate(self.all), ... key=lambda _: _[1] ... )) Views may also be chained together by providing one view as the initializer for another view. If the above example usage is followed, the View class will automatically simplify the lookup process for each chained view so that calls to ``__getitem__()`` directly access the original list, bypassing the intermediate chained views. This significantly improves performance. In addition, all available actions will be propogated to the chained view. Example: >>> class Filtered(View, registry=Registry.Playlist): ... @View.action ... def sort(self, column, value): ... self._populate_map(filter( ... lambda _: _[1][column] == value, ... enumerate(self.all), ... )) ... foo = Sorted(Filtered(my_library, app)) ... foo.sort() ... foo.filter('album', 'My Album') ... foo[0] # 'Sorted' looks up the value in 'my_library' bypassing 'Filtered' Terminology: Inner Chain View: A view used as content for another view Outer Chain View: A view which uses this view as its content Example: >>> source_data = [] ... view_1 = Editable(soruce_data) ... view_2 = Filtered(view_1) ... view_3 = Sorted(view_2) ... view_4 = Ordered(view_2) Considering the above code objects: view_1: - Inner View: None - Outer Views: view_2 view_2: - Inner View: view_1 - Outer Views: view_3, view_4 view 3: - Inner View: view_2 - Outer View: None view 4: - Inner View: view_2 - Outer View: None """ _first_only = False _default = None @staticmethod def action(method): """ Method decorator to be used with subclasses. Marks a method as a view action which is available when chaining together multiple views. """ method._is_view_action = True @functools.wraps(method) def wrapped(self, *arg, **kw): return_value = method(self, *arg, **kw) self.update_outer() return return_value return wrapped @staticmethod def index_convert(argname): """ Method decorator which marks a method as needing an index conversion when called from an outer chained view. Usage: class Foo(View, ...): @index_convert('idx') def my_func(self, idx): ... """ def decorator(method): argspec = inspect.getfullargspec(method) if argname in argspec.args: # Save the location of the index arg for when calling as positional argument # ... But subtract 1 because this is a class method and 'self' should be # ignored argindex = argspec.args.index(argname) - 1 elif argname in argspec.kwonlyargs: # Will be found as a keyword, no need to save the index argindex = None else: raise ValueError(f"Index '{argname}' from decorator `@index_convert(...)` not found (not outer-most decorator?)") method._index_convert=(argname, argindex) return method return decorator def __index_convert_wrap(self, method): """ Method decorator which will update an index in a chained view into an index in the view implementing the method. This decorator is applied when classes are chained together, never as an explicit '@' decorator to the method. """ argname, argindex = method._index_convert @functools.wraps(method) def wrapped(*args, **kwargs): # Obtain the new index by doing a lookup of the object # ... this compares id values which are maintained between chained views # Once found, update the calling args/kwargs and call the real method if argname in kwargs: old_index = kwargs[argname] if old_index >= len(self): kwargs[argname] = len(self.__chained) else: new_index = self.__chained.index(self[old_index]) kwargs[argname] = new_index else: old_index = args[argindex] if old_index >= len(self): new_args = list(args) new_args[argindex] = len(self.__chained) args = tuple(new_args) else: new_args = list(args) new_index = self.__chained.index(self[old_index]) new_args[argindex] = new_index args = tuple(new_args) return method(*args, **kwargs) return wrapped def __init_subclass__(cls, first_only: bool = False, **kw): """ Subclass Configuration Parameters: :param first_only: This view must be first in a chain (cannot be an outer view) """ super().__init_subclass__(**kw) cls._first_only = first_only def __init__(self, content, app=None, *, default=None): if self._first_only and isinstance(content, View): raise ValueError(f"'{self.__class__.__name__}' view can only be the first view in a chain!") # Views can be initialized with no app if it is defined by the content. # ... This permits chaining view creation with no intermediate variables. # ... This must be done before initializing the parent class if app is None and hasattr(content, 'app'): app = content.app # But if app is still None, raise an error if app is None: raise TypeError("__init__() missing 1 required positional argument: 'app'") # Initialize the plugin class now the `app` variable is taken care of super().__init__(app) # Find any actions in the contained view and propogatre them to this view if isinstance(content, View): self.__chained = content self.__content = content.__content for name, method in inspect.getmembers( content, lambda _: hasattr(_, "_is_view_action") ): if not hasattr(self, name): if hasattr(method, '_index_convert'): method = self.__index_convert_wrap(method) setattr(self, name, method) else: self.__chained = None self.__content = content # Create a set of outer chained views # ... An "outer chained view" is any view which uses THIS view as its content # ... This permits calling methods in any view and updating the entire chain in # both directions. # ... A weak reference is used so that outer chained views can be safely deleted # without invalidating the entire chain. self.__chained_outer = weakref.WeakSet() if self.__chained is not None: self.__chained.__chained_outer.add(self) # Save the default action (if any) # ... which is used optionally by the subclass # ... so no format or restriction is defined here! self._default = default # Perform the default action for the class self.update() def __add_outer_view(self, view): """ Add a reference to an outer view """ @property def all(self): """ All records this view was initialized with. This may be another View object in the case of chained views. """ if self.__chained is not None: return self.__chained else: return self.__content @property def inner(self): """ The inner view (if any) or None if this is the top level view """ if self.__chained is not None: return self.__chained else: return None @property def chain(self): """ The compmlete inner chain (if any) including this view """ chain = [self] view = self while view.inner: view = view.inner chain.append(view) return tuple(chain) @property def outer(self): """ A weakref set of outer views which reference this view """ return self.__chained_outer def _chained_index(self, index, who): """ Convert a view index into an index into the original content. """ if self.__chained is not None and self == who: return self.__chained._chained_index(index, who) elif who != self: return self._map_idx[index] else: return index def _populate_map(self, content: Iterable): """ Update the view mapping for this view bsed on the provided iterator. This method must be called any time this view may have changed. :param content: an iterable object returning (index,record) pairs """ self._map_id = { id(rec): self._chained_index(index, self) for index,rec in content } self._map_idx = tuple(self._map_id.values()) self._map_mrl = { self.__content[_].mrl: _ for _ in self._map_idx } def update(self): """ Update the view after the content has changed The default implementation simply refreshes the view from the content and should be overridden to use the action defined by the class, reapplying the most recent action applied. """ self._populate_map(enumerate(self.all)) def update_outer(self): """ Update all outer elements in the view chain. An outer element is any view which uses this view as its content. """ # Call a separate recursion method # ... This ensures this class is not accidentally updated twice for outer in self.outer: outer.__update_outer_recursion() def __update_outer_recursion(self): """ Private method used for recursive outer chain view updates """ # Update ourselves self.update() # Update outer views along the chain for outer in self.outer: outer.__update_outer_recursion() def __len__(self): return len(self._map_id) def __getitem__(self, index): try: if isinstance(index, int): return self.__content[self._map_idx[index]] elif isinstance(index, ViewPointer): return self[self.index(index)] elif isinstance(index, str): return self.__content[self._map_mrl[index]] elif isinstance(index, slice): return [self.__content[self._map_idx[_]] for _ in range(*index.indices(len(self)))] else: raise TypeError(f"Invalid index type: {type(index)}") except IndexError as ex: raise IndexError(f"Invalid View Index: {index}") except KeyError as ex: raise IndexError(f"Invalid View MRL: {index}") except ValueError as ex: raise IndexError(f"Invalid View Pointer: {index}") def __contains__(self, item): return ( type(item) == str and item in self._map_mrl or id(item) in self._map_id and hasattr(item, 'mrl') or isinstance(item, ViewPointer) and item.valid and self.__contains__(item.record) or isinstance(item, View) and self.contains_view(item) ) def contains_view(self, view: View) -> bool: """ Implements __contains__ for a view, indicating if a specified view is contained in the inner chain of views for this object. :param view: The view to test :returns: True if `view` is an inner view of this view, else False """ return view == self or (self.inner is not None and self.inner.contains_view(view)) def contains_id(self, id_value: int) -> bool: """ Test if the view contains the id() value of a record :param id_value: The id() value to test for :returns: True if `id_value` exists in this view """ return id_value in self._map_id def __iter__(self): for index in range(len(self)): yield self[index] def index(self, item, *, is_id=False): if type(item) == int and is_id == True: return tuple(self._map_id.keys()).index(item) elif item in self and not type(item) == View: if type(item) == str: return tuple(self._map_mrl.keys()).index(item) elif isinstance(item, ViewPointer): return self.index(item.id, is_id=True) else: return tuple(self._map_id.keys()).index(id(item)) else: raise ValueError(f"'{item}' not in playlist") def count(self, item): if type(item) == str: return len([_ for _ in self if _.mrl == item]) elif isinstance(item, ViewPointer): return len([_ for _ in self if _ == item.record]) else: return len([_ for _ in self if _ == item]) def pointer(self, index: Union[int,str,ViewPointer]) -> ViewPointer: """ Obtain a pointer to this view which can be used to iterate over records. :param index: An index into the view :returns: A view pointer (not valid if the index does not exist) """ try: return ViewPointer(self.app, self, id(self[index]), self[index].mrl) except IndexError: return ViewPointer(self.app, self, ) class WholeLibrary(View, registry=Registry.Playlist): """ This view is designed to work directly with the library to content to automatically update when files are added or removed from monitored directories. """ def on_inserted_or_updated(self, records): """ Library records have been inserted or updated externally. """ for rec in records: # Look up using the MRL # ... lookup of the record directly will fail # ... because object ids are comapred if not rec['mrl'] in self.all: self.all.append(rec) self.update() self.update_outer() def on_deleted(self, records): """ Library records have been deleted externally """ for rec in records: # Look up using the MRL # ... lookup of the record directly will fail # ... because object ids are comapred if rec['mrl'] in self.all: self.all.remove(rec['mrl']) self.update() self.update_outer() class Filtered(View, registry=Registry.Playlist): _default = "" def update(self): self.filter() @View.action def filter(self, filterstr=None): # Detect if the previous filter should be used ... if filterstr is None: filterstr = self._default or "" # Save the filter string for later use self._default = filterstr filtered = self.app.search.filtered(enumerate(self.all), filterstr, lambda _: _[1]) self._populate_map(filtered) class Sorted(View, registry=Registry.Playlist): _default = None def update(self): self.sort() @View.action def sort(self, column=None, reverse=False, *, smart=True): # Detect if the previous sort should be used ... if column is None: if type(self._default) in (list,tuple): column,reverse,smart = self._default elif type(self._default) == str: column = self._default else: column = self.app.columns[0].name # Convert any string column to an index elif type(column) == int: column = self.app.columns[column].name # Save the sort details for later use .... self._default = (column, reverse, smart) # Convert any string column to an index colnum = self.app.columns.indexOfName(column) if smart: # Collect sort details col_dict = self.app.columns sort_cols = self.app.columns[colnum].sortCols sort_funcs = [col_dict[_].sortFunc for _ in sort_cols] sort_defs = [col_dict[_].sortDefault for _ in sort_cols] sort_key = lambda _: "#".join([f"{default if col not in _[1] else func(_[1], col)}" for col,func,default in zip(sort_cols,sort_funcs,sort_defs)]) # Create sorted playlist generator sorted_playlist = sorted(enumerate(self.all), key=sort_key, reverse=reverse) else: sort_key = lambda _: self.app.columns[colnum].sortFunc(_[1],column) sorted_playlist = sorted(enumerate(self.all), key=sort_key, reverse=reverse) self._populate_map(sorted_playlist) class Editable(View, registry=Registry.Playlist, first_only=True): """ This is a mixin view (no View.action) methods defined which makes any view editable. """ @View.action def append(self, item): self.all.append(item) self.update() @View.action @View.index_convert('index') def insert(self, index, item): if len(self) == 0: real_index = 0 else: real_index = self._chained_index(index, self) self.all.insert(real_index, item) self.update() @View.action def extend(self, other): self.all.extend(other) self.update() @View.action @View.index_convert('index') def pop(self, index): real_index = self._chained_index(index, self) popped = self.all.pop(real_index) self.update() return popped @View.action def remove(self, value): if type(value) == str: self.all.remove(self[value]) else: self.all.remove(value) self.update() class Changed(View, registry=Registry.Playlist): """ This is a mixin class (no View.aciton) which provides a signal indicating the content has changed in one of the chained views. Note that if the signal is not propogated to outer views! """ changed = Signal() # All content changed (library reload) def update(self): super().update() self.changed.emit() class Randomized(View, registry=Registry.Playlist): """ Randomize the order of the view contents. """ def update(self): self.randomize() @View.action def randomize(self): self._populate_map(random.sample(list(enumerate(self.all)), len(self.all))) class Grouped(View, registry=Registry.Playlist): """ Group the view content by a specified column without otherwise changing the order. In order to sort by the group wrap this around a Sorted view and sort by the same column. """ _default = "" def update(self): self.group() @View.action def group(self, group=None): if group is None: group = self._default or "" self._default = group if group == "": super().update() else: # Using dict as an ordered set (python 3.7+) group_values = {_[group]: None for _ in self.all} groups = tuple(group_values.keys()) self._populate_map( sorted( enumerate(self.all), key = lambda _: groups.index(_[1][group]) ) ) class RandomGroup(View, registry=Registry.Playlist): """ Group the view by the specified column, randomizing the order of the groups without changing the order within each group. """ _default = "" def update(self): self.randomize_group() @View.action def randomize_group(self, group=None): if group is None: group = self._default or "" self._default = group if group == "": super().update() else: # Using dict as an ordered set (python 3.7+) group_values = {_.get(group, ""): None for _ in self.all} groups = tuple(group_values.keys()) group_randomized = random.sample(groups, len(groups)) self._populate_map( sorted( enumerate(self.all), key = lambda _: group_randomized.index(_[1].get(group, "")) ) ) class Shifted(View, registry=Registry.Playlist): """ Resequence the playlist so that the current playing track is at the start of the playlist. This is used by ordered view pointers to ensure that a new order plays all tracks in the playlist when not looping. """ def update(self): self.shift() @View.action def shift(self, index=None): if index is None: index = self._default self._default = index if type(index) == int: start = index elif self.inner: try: start = self.inner.index(index) except ValueError: start = -1 else: start = -1 if 0 <= start < len(self.all): self._populate_map( sorted( enumerate(self.all), key = lambda _: _[0] + (len(self.all) if _[0] < start else 0) ) ) else: super().update() class Selected(View, registry=Registry.Playlist): """ Internal application view representing user selected records within a different view. """ _default = [] def update(self): self.select() @View.action def select(self, indicies=None): # Detect if the previous filter should be used ... if indicies is None: indicies = self._default or [] if all([type(_) == int for _ in indicies]): # Created the filtered list of records based on index filtered = [(_, self.all[_]) for _ in indicies if _ < len(self.all)] elif all([type(_) == str for _ in indicies]): # Created the filtered list of records based on mrl filtered = [(self.all.index(_), self.all[_]) for _ in indicies if _ in self.all] elif all([type(_) == type(self.all[0]) for _ in indicies]): # Created the filtered list based on matching records filtered = [(self.all.index(_),_) for _ in indicies if _ in self.all] else: # Should never happen, but just in case .... filtered = [] _logger.error(f"Unable to interpret selection list:\n{indicies}") self._populate_map(filtered) @dataclass(frozen=True) class ViewPointer: """ A pointer used to iterate forwards and backwards through records in a view. Iteration order can optionally be modified by specifying an `order` view. """ app: object = None # AppInit Instance (cannot import because of circular references) view: View = None id: int = 0 mrl: str = "" order: View = field(default=None, compare=False, hash=True) loop = Setting('playback.loop', default=False) def reorder(self, order_view): """ Create a new ordered view pointer. This permits using an outer-chained view as a mechanism to access the view in this pointer without changing the actual view named by the pointer. This maintains knowledge of the real data source for the pointer vs a different order in which it is accessed. """ if not self.view in order_view: _logger.error("Attempt to create an ordered view pointer from views that are not chained!") return ViewPointer(self.app, self.view, order=order_view) else: if self.valid: rec = self.order[self.mrl] if rec in order_view: idx = order_view.index(rec) return ViewPointer(self.app, self.view, id(rec), rec.mrl, order_view) elif self.loop and len(order_view) > 0: rec = order_view[0] return ViewPointer(self.app, self.view, id(rec), rec.mrl, order_view) else: return ViewPointer(self.app, self.view, order=order_view) else: return ViewPointer(self.app, self.view, order=order_view) def __post_init__(self): if self.order is None: object.__setattr__(self, 'order', self.view) def __iter__(self): pointer = start = self not_start = True while pointer.valid and not_start: yield pointer pointer = pointer.next not_start = pointer != start @property def view_index(self) -> int: if self.valid: return self.view.index(self.id, is_id=True) else: return None @property def order_index(self) -> int: if self.valid: return self.order.index(self.id, is_id=True) else: return None @property def valid(self) -> bool: """ :returns: True if the pointer is valid (points to the correct record) """ return self.order is not None and self.id != 0 and self.order.contains_id(self.id) @property def record(self): """ Return the original record associated with this pointer. :returns: A record object or None if the record is not found. """ if self.valid: return self.order[self.order_index] else: return None @property def next(self) -> ViewPointer: """ Return the next pointer in the sequence, or raise a StopIteration exception if therre is no next record. :returns: A record object """ if not self.valid: return ViewPointer(self.app, self.view, order=self.order) else: new = (self.order_index + 1) if self.loop: new %= len(self.order) if new >= len(self.order): return ViewPointer(self.app, self.view, order=self.order) rec = self.order[new] return ViewPointer(self.app, self.view, id(rec), rec.mrl, order=self.order) @property def prev(self) -> ViewPointer: """ Return the previous pointer in the sequence, or raise a StopIteration exception if therre is no previous record. :returns: A record object """ if not self.valid: return ViewPointer(self.app, self.view, order=self.order) else: new = (self.order_index - 1) if self.loop: new %= len(self.order) if new < 0: return ViewPointer(self.app, self.view, order=self.order) rec = self.order[new] return ViewPointer(self.app, self.view, id(rec), rec.mrl, order=self.order) @property def first(self) -> ViewPointer: """ Return a pointer to the first record in the view. """ pointer = self with ViewPointer.loop.tempval(self, False): while pointer.prev.valid: pointer = pointer.prev return pointer @property def last(self) -> ViewPointer: """ Return a pointer to the last record in the view. """ pointer = self with ViewPointer.loop.tempval(self, False): while pointer.next.valid: pointer = pointer.next return pointer
PypiClean
/concrete-datastore-1.57.2.tar.gz/concrete-datastore-1.57.2/concrete_datastore/api/v1_1/urls.py
from django.urls import re_path from django.conf import settings from rest_framework.routers import SimpleRouter from concrete_datastore.concrete.meta import list_of_meta from concrete_datastore.api.v1.views import ( LoginApiView, RegisterApiView, ResetPasswordApiView, ChangePasswordView, RetrieveSecureTokenApiView, SecureLoginApiView, GenerateSecureTokenApiView, RetrieveSecureConnectCode, SecureLoginCodeApiView, ) from concrete_datastore.api.v1_1.views import ( # pylint:disable=E0611 ConcreteRoleApiView, ConcretePermissionApiView, EmailDeviceAuthView, LDAPLoginApiView, TwoFactorLoginView, UnBlockUsersApiViewset, BlockedUsersApiViewset, AccountMeApiView, ProcessRegisterApiView, ) from concrete_datastore.api.v1_1 import views, API_NAMESPACE app_name = 'concrete_datastore.concrete' # API Front end router = SimpleRouter() for meta_model in list_of_meta: if meta_model.get_model_name() in ["EntityDividerModel", "UndividedModel"]: continue viewset = getattr( views, '{}ModelViewSet'.format(meta_model.get_model_name()) ) router.register( prefix=meta_model.get_dashed_case_class_name(), viewset=viewset, basename=meta_model.get_dashed_case_class_name(), ) router.register( prefix='acl/role', viewset=ConcreteRoleApiView, basename='acl-role' ) router.register( prefix='acl/permission', viewset=ConcretePermissionApiView, basename='acl-permission', ) router.register( prefix='blocked-users', viewset=BlockedUsersApiViewset, basename='blocked-users', ) router.register( prefix='email-device', viewset=EmailDeviceAuthView, basename='email-device' ) specific_urlpatterns = [ re_path( r'^auth/ldap/login', LDAPLoginApiView.as_view(), name='login-ldap-view' ), re_path( r'auth/two-factor/login', TwoFactorLoginView.as_view(), name='two-factor-login', ), re_path( r'^auth/login/', LoginApiView.as_view(api_namespace=API_NAMESPACE), name='login-view', ), re_path( r'^auth/register/', RegisterApiView.as_view(api_namespace=API_NAMESPACE), name='register-view', ), re_path( r'^account/me/', AccountMeApiView.as_view(api_namespace=API_NAMESPACE), name='account-me', ), re_path( r'^auth/change-password/', ChangePasswordView.as_view(api_namespace=API_NAMESPACE), name='change-password', ), re_path( r'^auth/reset-password/', ResetPasswordApiView.as_view(), name='reset-password', ), re_path( r'secure-connect/retrieve-token', RetrieveSecureTokenApiView.as_view(), name='retrieve-secure-token', ), re_path( r'secure-connect/retrieve-code/', RetrieveSecureConnectCode.as_view(), name='retrieve-secure-code', ), re_path( r'secure-connect/login/', SecureLoginApiView.as_view(api_namespace=API_NAMESPACE), name='secure-connect-login', ), re_path( r'secure-connect/login-code/', SecureLoginCodeApiView.as_view(api_namespace=API_NAMESPACE), name='secure-connect-login-code', ), re_path( r'secure-connect/generate-token', GenerateSecureTokenApiView.as_view(), name='generate-secure-token', ), re_path( r'process/register/', ProcessRegisterApiView.as_view(), name='register-as-process', ), re_path( r'unblock-users', UnBlockUsersApiViewset.as_view(), name='unblock-users', ), ] if settings.USE_AUTH_LDAP: specific_urlpatterns += [ re_path( r'^auth/ldap/login', LDAPLoginApiView.as_view(), name='login-ldap-view', ) ] urlpatterns = router.urls + specific_urlpatterns
PypiClean
/janis_pipelines.runner-0.13.0-py3-none-any.whl/janis_assistant/management/envvariables.py
from enum import Enum from os import getenv, path, getcwd class HashableEnum(str, Enum): def __str__(self): return self.value def to_yaml(self): return self.value pass # def __hash__(self): # return self.value.__hash__() def try_get_home_dir(): try: return getenv("HOME") except: try: return path.expanduser("~") except: return getcwd() class EnvVariables(HashableEnum): #: Default template to use, NB this template should have NO required arguments. default_template = "JANIS_DEFAULTTEMPLATE" #: (Default: ~/.janis) Directory of default Janis settings config_dir = "JANIS_CONFIGDIR" #: (Default: ``$JANIS_CONFIGDIR/janis.conf``) Default configuration file for Janis config_path = "JANIS_CONFIGPATH" #: Use this directory as a BASE to generate a new output directory for each Janis run output_dir = "JANIS_OUTPUTDIR" #: Use this directory for intermediate files exec_dir = "JANIS_EXCECUTIONDIR" #: Additional search paths (comma separated) to lookup Janis workflows in search_path = "JANIS_SEARCHPATH" #: List of YAML recipe files (comma separated) for Janis to consume, See the RECIPES section for more information. recipe_paths = "JANIS_RECIPEPATHS" #: Directories for which each file (ending in .yaml | .yml) is a key of input values. See the RECIPES section for more information. recipe_directory = "JANIS_RECIPEDIRECTORY" # secretly comma separated #: Override the Cromwell JAR that Janis uses cromwelljar = "JANIS_CROMWELLJAR" db_script_generator = "JANIS_DBCREDENTIALSGENERATOR" db_script_generator_cleanup = "JANIS_DBCREDENTIALSGENERATORCLEANUP" def __str__(self): return self.value def default(self): import os.path HOME = try_get_home_dir() if self == EnvVariables.config_dir: return os.path.join(HOME, ".janis/") if self == EnvVariables.output_dir: return None elif self == EnvVariables.exec_dir: return os.path.join(HOME, "janis", "execution/") elif self == EnvVariables.config_path: return os.path.join(EnvVariables.config_dir.resolve(True), "janis.conf") elif self == EnvVariables.recipe_paths: return [] elif self == EnvVariables.recipe_directory: return [] raise Exception(f"Couldn't determine default() for '{self.value}'") def resolve(self, include_default=False): value = getenv(self.value) if value is None and include_default: value = self.default() if self == EnvVariables.recipe_paths: return value.split(",") if value else None if self == EnvVariables.recipe_directory: return value.split(",") if value else None return value
PypiClean
/stackdio-server-0.8.0a4.tar.gz/stackdio-server-0.8.0a4/stackdio/ui/static/stackdio/lib/bower_components/moment/src/locale/hr.js
import moment from '../moment'; function translate(number, withoutSuffix, key) { var result = number + ' '; switch (key) { case 'm': return withoutSuffix ? 'jedna minuta' : 'jedne minute'; case 'mm': if (number === 1) { result += 'minuta'; } else if (number === 2 || number === 3 || number === 4) { result += 'minute'; } else { result += 'minuta'; } return result; case 'h': return withoutSuffix ? 'jedan sat' : 'jednog sata'; case 'hh': if (number === 1) { result += 'sat'; } else if (number === 2 || number === 3 || number === 4) { result += 'sata'; } else { result += 'sati'; } return result; case 'dd': if (number === 1) { result += 'dan'; } else { result += 'dana'; } return result; case 'MM': if (number === 1) { result += 'mjesec'; } else if (number === 2 || number === 3 || number === 4) { result += 'mjeseca'; } else { result += 'mjeseci'; } return result; case 'yy': if (number === 1) { result += 'godina'; } else if (number === 2 || number === 3 || number === 4) { result += 'godine'; } else { result += 'godina'; } return result; } } export default moment.defineLocale('hr', { months : { format: 'siječnja_veljače_ožujka_travnja_svibnja_lipnja_srpnja_kolovoza_rujna_listopada_studenoga_prosinca'.split('_'), standalone: 'siječanj_veljača_ožujak_travanj_svibanj_lipanj_srpanj_kolovoz_rujan_listopad_studeni_prosinac'.split('_') }, monthsShort : 'sij._velj._ožu._tra._svi._lip._srp._kol._ruj._lis._stu._pro.'.split('_'), monthsParseExact: true, weekdays : 'nedjelja_ponedjeljak_utorak_srijeda_četvrtak_petak_subota'.split('_'), weekdaysShort : 'ned._pon._uto._sri._čet._pet._sub.'.split('_'), weekdaysMin : 'ne_po_ut_sr_če_pe_su'.split('_'), weekdaysParseExact : true, longDateFormat : { LT : 'H:mm', LTS : 'H:mm:ss', L : 'DD. MM. YYYY', LL : 'D. MMMM YYYY', LLL : 'D. MMMM YYYY H:mm', LLLL : 'dddd, D. MMMM YYYY H:mm' }, calendar : { sameDay : '[danas u] LT', nextDay : '[sutra u] LT', nextWeek : function () { switch (this.day()) { case 0: return '[u] [nedjelju] [u] LT'; case 3: return '[u] [srijedu] [u] LT'; case 6: return '[u] [subotu] [u] LT'; case 1: case 2: case 4: case 5: return '[u] dddd [u] LT'; } }, lastDay : '[jučer u] LT', lastWeek : function () { switch (this.day()) { case 0: case 3: return '[prošlu] dddd [u] LT'; case 6: return '[prošle] [subote] [u] LT'; case 1: case 2: case 4: case 5: return '[prošli] dddd [u] LT'; } }, sameElse : 'L' }, relativeTime : { future : 'za %s', past : 'prije %s', s : 'par sekundi', m : translate, mm : translate, h : translate, hh : translate, d : 'dan', dd : translate, M : 'mjesec', MM : translate, y : 'godinu', yy : translate }, ordinalParse: /\d{1,2}\./, ordinal : '%d.', week : { dow : 1, // Monday is the first day of the week. doy : 7 // The week that contains Jan 1st is the first week of the year. } });
PypiClean
/w20e.pycms-1.1.2b.tar.gz/w20e.pycms-1.1.2b/w20e/pycms/static/tinymce/jscripts/tiny_mce/plugins/media/editor_plugin_src.js
(function() { var rootAttributes = tinymce.explode('id,name,width,height,style,align,class,hspace,vspace,bgcolor,type'), excludedAttrs = tinymce.makeMap(rootAttributes.join(',')), Node = tinymce.html.Node, mediaTypes, scriptRegExp, JSON = tinymce.util.JSON, mimeTypes; // Media types supported by this plugin mediaTypes = [ // Type, clsid:s, mime types, codebase ["Flash", "d27cdb6e-ae6d-11cf-96b8-444553540000", "application/x-shockwave-flash", "http://download.macromedia.com/pub/shockwave/cabs/flash/swflash.cab#version=6,0,40,0"], ["ShockWave", "166b1bca-3f9c-11cf-8075-444553540000", "application/x-director", "http://download.macromedia.com/pub/shockwave/cabs/director/sw.cab#version=8,5,1,0"], ["WindowsMedia", "6bf52a52-394a-11d3-b153-00c04f79faa6,22d6f312-b0f6-11d0-94ab-0080c74c7e95,05589fa1-c356-11ce-bf01-00aa0055595a", "application/x-mplayer2", "http://activex.microsoft.com/activex/controls/mplayer/en/nsmp2inf.cab#Version=5,1,52,701"], ["QuickTime", "02bf25d5-8c17-4b23-bc80-d3488abddc6b", "video/quicktime", "http://www.apple.com/qtactivex/qtplugin.cab#version=6,0,2,0"], ["RealMedia", "cfcdaa03-8be4-11cf-b84b-0020afbbccfa", "audio/x-pn-realaudio-plugin", "http://download.macromedia.com/pub/shockwave/cabs/flash/swflash.cab#version=6,0,40,0"], ["Java", "8ad9c840-044e-11d1-b3e9-00805f499d93", "application/x-java-applet", "http://java.sun.com/products/plugin/autodl/jinstall-1_5_0-windows-i586.cab#Version=1,5,0,0"], ["Silverlight", "dfeaf541-f3e1-4c24-acac-99c30715084a", "application/x-silverlight-2"], ["Iframe"], ["Video"], ["EmbeddedAudio"], ["Audio"] ]; function toArray(obj) { var undef, out, i; if (obj && !obj.splice) { out = []; for (i = 0; true; i++) { if (obj[i]) out[i] = obj[i]; else break; } return out; } return obj; }; tinymce.create('tinymce.plugins.MediaPlugin', { init : function(ed, url) { var self = this, lookup = {}, i, y, item, name; function isMediaImg(node) { return node && node.nodeName === 'IMG' && ed.dom.hasClass(node, 'mceItemMedia'); }; self.editor = ed; self.url = url; // Parse media types into a lookup table scriptRegExp = ''; for (i = 0; i < mediaTypes.length; i++) { name = mediaTypes[i][0]; item = { name : name, clsids : tinymce.explode(mediaTypes[i][1] || ''), mimes : tinymce.explode(mediaTypes[i][2] || ''), codebase : mediaTypes[i][3] }; for (y = 0; y < item.clsids.length; y++) lookup['clsid:' + item.clsids[y]] = item; for (y = 0; y < item.mimes.length; y++) lookup[item.mimes[y]] = item; lookup['mceItem' + name] = item; lookup[name.toLowerCase()] = item; scriptRegExp += (scriptRegExp ? '|' : '') + name; } // Handle the media_types setting tinymce.each(ed.getParam("media_types", "video=mp4,m4v,ogv,webm;" + "silverlight=xap;" + "flash=swf,flv;" + "shockwave=dcr;" + "quicktime=mov,qt,mpg,mpeg;" + "shockwave=dcr;" + "windowsmedia=avi,wmv,wm,asf,asx,wmx,wvx;" + "realmedia=rm,ra,ram;" + "java=jar;" + "audio=mp3,ogg" ).split(';'), function(item) { var i, extensions, type; item = item.split(/=/); extensions = tinymce.explode(item[1].toLowerCase()); for (i = 0; i < extensions.length; i++) { type = lookup[item[0].toLowerCase()]; if (type) lookup[extensions[i]] = type; } }); scriptRegExp = new RegExp('write(' + scriptRegExp + ')\\(([^)]+)\\)'); self.lookup = lookup; ed.onPreInit.add(function() { // Allow video elements ed.schema.addValidElements('object[id|style|width|height|classid|codebase|*],param[name|value],embed[id|style|width|height|type|src|*],video[*],audio[*],source[*]'); // Convert video elements to image placeholder ed.parser.addNodeFilter('object,embed,video,audio,script,iframe', function(nodes) { var i = nodes.length; while (i--) self.objectToImg(nodes[i]); }); // Convert image placeholders to video elements ed.serializer.addNodeFilter('img', function(nodes, name, args) { var i = nodes.length, node; while (i--) { node = nodes[i]; if ((node.attr('class') || '').indexOf('mceItemMedia') !== -1) self.imgToObject(node, args); } }); }); ed.onInit.add(function() { // Display "media" instead of "img" in element path if (ed.theme && ed.theme.onResolveName) { ed.theme.onResolveName.add(function(theme, path_object) { if (path_object.name === 'img' && ed.dom.hasClass(path_object.node, 'mceItemMedia')) path_object.name = 'media'; }); } // Add contect menu if it's loaded if (ed && ed.plugins.contextmenu) { ed.plugins.contextmenu.onContextMenu.add(function(plugin, menu, element) { if (element.nodeName === 'IMG' && element.className.indexOf('mceItemMedia') !== -1) menu.add({title : 'media.edit', icon : 'media', cmd : 'mceMedia'}); }); } }); // Register commands ed.addCommand('mceMedia', function() { var data, img; img = ed.selection.getNode(); if (isMediaImg(img)) { data = ed.dom.getAttrib(img, 'data-mce-json'); if (data) { data = JSON.parse(data); // Add some extra properties to the data object tinymce.each(rootAttributes, function(name) { var value = ed.dom.getAttrib(img, name); if (value) data[name] = value; }); data.type = self.getType(img.className).name.toLowerCase(); } } if (!data) { data = { type : 'flash', video: {sources:[]}, params: {} }; } ed.windowManager.open({ file : url + '/media.htm', width : 430 + parseInt(ed.getLang('media.delta_width', 0)), height : 500 + parseInt(ed.getLang('media.delta_height', 0)), inline : 1 }, { plugin_url : url, data : data }); }); // Register buttons ed.addButton('media', {title : 'media.desc', cmd : 'mceMedia'}); // Update media selection status ed.onNodeChange.add(function(ed, cm, node) { cm.setActive('media', isMediaImg(node)); }); }, convertUrl : function(url, force_absolute) { var self = this, editor = self.editor, settings = editor.settings, urlConverter = settings.url_converter, urlConverterScope = settings.url_converter_scope || self; if (!url) return url; if (force_absolute) return editor.documentBaseURI.toAbsolute(url); return urlConverter.call(urlConverterScope, url, 'src', 'object'); }, getInfo : function() { return { longname : 'Media', author : 'Moxiecode Systems AB', authorurl : 'http://tinymce.moxiecode.com', infourl : 'http://wiki.moxiecode.com/index.php/TinyMCE:Plugins/media', version : tinymce.majorVersion + "." + tinymce.minorVersion }; }, /** * Converts the JSON data object to an img node. */ dataToImg : function(data, force_absolute) { var self = this, editor = self.editor, baseUri = editor.documentBaseURI, sources, attrs, img, i; data.params.src = self.convertUrl(data.params.src, force_absolute); attrs = data.video.attrs; if (attrs) attrs.src = self.convertUrl(attrs.src, force_absolute); if (attrs) attrs.poster = self.convertUrl(attrs.poster, force_absolute); sources = toArray(data.video.sources); if (sources) { for (i = 0; i < sources.length; i++) sources[i].src = self.convertUrl(sources[i].src, force_absolute); } img = self.editor.dom.create('img', { id : data.id, style : data.style, align : data.align, hspace : data.hspace, vspace : data.vspace, src : self.editor.theme.url + '/img/trans.gif', 'class' : 'mceItemMedia mceItem' + self.getType(data.type).name, 'data-mce-json' : JSON.serialize(data, "'") }); img.width = data.width || (data.type == 'audio' ? "300" : "320"); img.height = data.height || (data.type == 'audio' ? "32" : "240"); return img; }, /** * Converts the JSON data object to a HTML string. */ dataToHtml : function(data, force_absolute) { return this.editor.serializer.serialize(this.dataToImg(data, force_absolute), {forced_root_block : '', force_absolute : force_absolute}); }, /** * Converts the JSON data object to a HTML string. */ htmlToData : function(html) { var fragment, img, data; data = { type : 'flash', video: {sources:[]}, params: {} }; fragment = this.editor.parser.parse(html); img = fragment.getAll('img')[0]; if (img) { data = JSON.parse(img.attr('data-mce-json')); data.type = this.getType(img.attr('class')).name.toLowerCase(); // Add some extra properties to the data object tinymce.each(rootAttributes, function(name) { var value = img.attr(name); if (value) data[name] = value; }); } return data; }, /** * Get type item by extension, class, clsid or mime type. * * @method getType * @param {String} value Value to get type item by. * @return {Object} Type item object or undefined. */ getType : function(value) { var i, values, typeItem; // Find type by checking the classes values = tinymce.explode(value, ' '); for (i = 0; i < values.length; i++) { typeItem = this.lookup[values[i]]; if (typeItem) return typeItem; } }, /** * Converts a tinymce.html.Node image element to video/object/embed. */ imgToObject : function(node, args) { var self = this, editor = self.editor, video, object, embed, iframe, name, value, data, source, sources, params, param, typeItem, i, item, mp4Source, replacement, posterSrc, style, audio; // Adds the flash player function addPlayer(video_src, poster_src) { var baseUri, flashVars, flashVarsOutput, params, flashPlayer; flashPlayer = editor.getParam('flash_video_player_url', self.convertUrl(self.url + '/moxieplayer.swf')); if (flashPlayer) { baseUri = editor.documentBaseURI; data.params.src = flashPlayer; // Convert the movie url to absolute urls if (editor.getParam('flash_video_player_absvideourl', true)) { video_src = baseUri.toAbsolute(video_src || '', true); poster_src = baseUri.toAbsolute(poster_src || '', true); } // Generate flash vars flashVarsOutput = ''; flashVars = editor.getParam('flash_video_player_flashvars', {url : '$url', poster : '$poster'}); tinymce.each(flashVars, function(value, name) { // Replace $url and $poster variables in flashvars value value = value.replace(/\$url/, video_src || ''); value = value.replace(/\$poster/, poster_src || ''); if (value.length > 0) flashVarsOutput += (flashVarsOutput ? '&' : '') + name + '=' + escape(value); }); if (flashVarsOutput.length) data.params.flashvars = flashVarsOutput; params = editor.getParam('flash_video_player_params', { allowfullscreen: true, allowscriptaccess: true }); tinymce.each(params, function(value, name) { data.params[name] = "" + value; }); } }; data = node.attr('data-mce-json'); if (!data) return; data = JSON.parse(data); typeItem = this.getType(node.attr('class')); style = node.attr('data-mce-style') if (!style) { style = node.attr('style'); if (style) style = editor.dom.serializeStyle(editor.dom.parseStyle(style, 'img')); } // Handle iframe if (typeItem.name === 'Iframe') { replacement = new Node('iframe', 1); tinymce.each(rootAttributes, function(name) { var value = node.attr(name); if (name == 'class' && value) value = value.replace(/mceItem.+ ?/g, ''); if (value && value.length > 0) replacement.attr(name, value); }); for (name in data.params) replacement.attr(name, data.params[name]); replacement.attr({ style: style, src: data.params.src }); node.replace(replacement); return; } // Handle scripts if (this.editor.settings.media_use_script) { replacement = new Node('script', 1).attr('type', 'text/javascript'); value = new Node('#text', 3); value.value = 'write' + typeItem.name + '(' + JSON.serialize(tinymce.extend(data.params, { width: node.attr('width'), height: node.attr('height') })) + ');'; replacement.append(value); node.replace(replacement); return; } // Add HTML5 video element if (typeItem.name === 'Video' && data.video.sources[0]) { // Create new object element video = new Node('video', 1).attr(tinymce.extend({ id : node.attr('id'), width: node.attr('width'), height: node.attr('height'), style : style }, data.video.attrs)); // Get poster source and use that for flash fallback if (data.video.attrs) posterSrc = data.video.attrs.poster; sources = data.video.sources = toArray(data.video.sources); for (i = 0; i < sources.length; i++) { if (/\.mp4$/.test(sources[i].src)) mp4Source = sources[i].src; } if (!sources[0].type) { video.attr('src', sources[0].src); sources.splice(0, 1); } for (i = 0; i < sources.length; i++) { source = new Node('source', 1).attr(sources[i]); source.shortEnded = true; video.append(source); } // Create flash fallback for video if we have a mp4 source if (mp4Source) { addPlayer(mp4Source, posterSrc); typeItem = self.getType('flash'); } else data.params.src = ''; } // Add HTML5 audio element if (typeItem.name === 'Audio' && data.video.sources[0]) { // Create new object element audio = new Node('audio', 1).attr(tinymce.extend({ id : node.attr('id'), width: node.attr('width'), height: node.attr('height'), style : style }, data.video.attrs)); // Get poster source and use that for flash fallback if (data.video.attrs) posterSrc = data.video.attrs.poster; sources = data.video.sources = toArray(data.video.sources); if (!sources[0].type) { audio.attr('src', sources[0].src); sources.splice(0, 1); } for (i = 0; i < sources.length; i++) { source = new Node('source', 1).attr(sources[i]); source.shortEnded = true; audio.append(source); } data.params.src = ''; } if (typeItem.name === 'EmbeddedAudio') { embed = new Node('embed', 1); embed.shortEnded = true; embed.attr({ id: node.attr('id'), width: node.attr('width'), height: node.attr('height'), style : style, type: node.attr('type') }); for (name in data.params) embed.attr(name, data.params[name]); tinymce.each(rootAttributes, function(name) { if (data[name] && name != 'type') embed.attr(name, data[name]); }); data.params.src = ''; } // Do we have a params src then we can generate object if (data.params.src) { // Is flv movie add player for it if (/\.flv$/i.test(data.params.src)) addPlayer(data.params.src, ''); if (args && args.force_absolute) data.params.src = editor.documentBaseURI.toAbsolute(data.params.src); // Create new object element object = new Node('object', 1).attr({ id : node.attr('id'), width: node.attr('width'), height: node.attr('height'), style : style }); tinymce.each(rootAttributes, function(name) { var value = data[name]; if (name == 'class' && value) value = value.replace(/mceItem.+ ?/g, ''); if (value && name != 'type') object.attr(name, value); }); // Add params for (name in data.params) { param = new Node('param', 1); param.shortEnded = true; value = data.params[name]; // Windows media needs to use url instead of src for the media URL if (name === 'src' && typeItem.name === 'WindowsMedia') name = 'url'; param.attr({name: name, value: value}); object.append(param); } // Setup add type and classid if strict is disabled if (this.editor.getParam('media_strict', true)) { object.attr({ data: data.params.src, type: typeItem.mimes[0] }); } else { object.attr({ classid: "clsid:" + typeItem.clsids[0], codebase: typeItem.codebase }); embed = new Node('embed', 1); embed.shortEnded = true; embed.attr({ id: node.attr('id'), width: node.attr('width'), height: node.attr('height'), style : style, type: typeItem.mimes[0] }); for (name in data.params) embed.attr(name, data.params[name]); tinymce.each(rootAttributes, function(name) { if (data[name] && name != 'type') embed.attr(name, data[name]); }); object.append(embed); } // Insert raw HTML if (data.object_html) { value = new Node('#text', 3); value.raw = true; value.value = data.object_html; object.append(value); } // Append object to video element if it exists if (video) video.append(object); } if (video) { // Insert raw HTML if (data.video_html) { value = new Node('#text', 3); value.raw = true; value.value = data.video_html; video.append(value); } } if (audio) { // Insert raw HTML if (data.video_html) { value = new Node('#text', 3); value.raw = true; value.value = data.video_html; audio.append(value); } } var n = video || audio || object || embed; if (n) node.replace(n); else node.remove(); }, /** * Converts a tinymce.html.Node video/object/embed to an img element. * * The video/object/embed will be converted into an image placeholder with a JSON data attribute like this: * <img class="mceItemMedia mceItemFlash" width="100" height="100" data-mce-json="{..}" /> * * The JSON structure will be like this: * {'params':{'flashvars':'something','quality':'high','src':'someurl'}, 'video':{'sources':[{src: 'someurl', type: 'video/mp4'}]}} */ objectToImg : function(node) { var object, embed, video, iframe, img, name, id, width, height, style, i, html, param, params, source, sources, data, type, lookup = this.lookup, matches, attrs, urlConverter = this.editor.settings.url_converter, urlConverterScope = this.editor.settings.url_converter_scope, hspace, vspace, align, bgcolor; function getInnerHTML(node) { return new tinymce.html.Serializer({ inner: true, validate: false }).serialize(node); }; function lookupAttribute(o, attr) { return lookup[(o.attr(attr) || '').toLowerCase()]; } function lookupExtension(src) { var ext = src.replace(/^.*\.([^.]+)$/, '$1'); return lookup[ext.toLowerCase() || '']; } // If node isn't in document if (!node.parent) return; // Handle media scripts if (node.name === 'script') { if (node.firstChild) matches = scriptRegExp.exec(node.firstChild.value); if (!matches) return; type = matches[1]; data = {video : {}, params : JSON.parse(matches[2])}; width = data.params.width; height = data.params.height; } // Setup data objects data = data || { video : {}, params : {} }; // Setup new image object img = new Node('img', 1); img.attr({ src : this.editor.theme.url + '/img/trans.gif' }); // Video element name = node.name; if (name === 'video' || name == 'audio') { video = node; object = node.getAll('object')[0]; embed = node.getAll('embed')[0]; width = video.attr('width'); height = video.attr('height'); id = video.attr('id'); data.video = {attrs : {}, sources : []}; // Get all video attributes attrs = data.video.attrs; for (name in video.attributes.map) attrs[name] = video.attributes.map[name]; source = node.attr('src'); if (source) data.video.sources.push({src : urlConverter.call(urlConverterScope, source, 'src', node.name)}); // Get all sources sources = video.getAll("source"); for (i = 0; i < sources.length; i++) { source = sources[i].remove(); data.video.sources.push({ src: urlConverter.call(urlConverterScope, source.attr('src'), 'src', 'source'), type: source.attr('type'), media: source.attr('media') }); } // Convert the poster URL if (attrs.poster) attrs.poster = urlConverter.call(urlConverterScope, attrs.poster, 'poster', node.name); } // Object element if (node.name === 'object') { object = node; embed = node.getAll('embed')[0]; } // Embed element if (node.name === 'embed') embed = node; // Iframe element if (node.name === 'iframe') { iframe = node; type = 'Iframe'; } if (object) { // Get width/height width = width || object.attr('width'); height = height || object.attr('height'); style = style || object.attr('style'); id = id || object.attr('id'); hspace = hspace || object.attr('hspace'); vspace = vspace || object.attr('vspace'); align = align || object.attr('align'); bgcolor = bgcolor || object.attr('bgcolor'); data.name = object.attr('name'); // Get all object params params = object.getAll("param"); for (i = 0; i < params.length; i++) { param = params[i]; name = param.remove().attr('name'); if (!excludedAttrs[name]) data.params[name] = param.attr('value'); } data.params.src = data.params.src || object.attr('data'); } if (embed) { // Get width/height width = width || embed.attr('width'); height = height || embed.attr('height'); style = style || embed.attr('style'); id = id || embed.attr('id'); hspace = hspace || embed.attr('hspace'); vspace = vspace || embed.attr('vspace'); align = align || embed.attr('align'); bgcolor = bgcolor || embed.attr('bgcolor'); // Get all embed attributes for (name in embed.attributes.map) { if (!excludedAttrs[name] && !data.params[name]) data.params[name] = embed.attributes.map[name]; } } if (iframe) { // Get width/height width = iframe.attr('width'); height = iframe.attr('height'); style = style || iframe.attr('style'); id = iframe.attr('id'); hspace = iframe.attr('hspace'); vspace = iframe.attr('vspace'); align = iframe.attr('align'); bgcolor = iframe.attr('bgcolor'); tinymce.each(rootAttributes, function(name) { img.attr(name, iframe.attr(name)); }); // Get all iframe attributes for (name in iframe.attributes.map) { if (!excludedAttrs[name] && !data.params[name]) data.params[name] = iframe.attributes.map[name]; } } // Use src not movie if (data.params.movie) { data.params.src = data.params.src || data.params.movie; delete data.params.movie; } // Convert the URL to relative/absolute depending on configuration if (data.params.src) data.params.src = urlConverter.call(urlConverterScope, data.params.src, 'src', 'object'); if (video) { if (node.name === 'video') type = lookup.video.name; else if (node.name === 'audio') type = lookup.audio.name; } if (object && !type) type = (lookupAttribute(object, 'clsid') || lookupAttribute(object, 'classid') || lookupAttribute(object, 'type') || {}).name; if (embed && !type) type = (lookupAttribute(embed, 'type') || lookupExtension(data.params.src) || {}).name; // for embedded audio we preserve the original specified type if (embed && type == 'EmbeddedAudio') { data.params.type = embed.attr('type'); } // Replace the video/object/embed element with a placeholder image containing the data node.replace(img); // Remove embed if (embed) embed.remove(); // Serialize the inner HTML of the object element if (object) { html = getInnerHTML(object.remove()); if (html) data.object_html = html; } // Serialize the inner HTML of the video element if (video) { html = getInnerHTML(video.remove()); if (html) data.video_html = html; } data.hspace = hspace; data.vspace = vspace; data.align = align; data.bgcolor = bgcolor; // Set width/height of placeholder img.attr({ id : id, 'class' : 'mceItemMedia mceItem' + (type || 'Flash'), style : style, width : width || (node.name == 'audio' ? "300" : "320"), height : height || (node.name == 'audio' ? "32" : "240"), hspace : hspace, vspace : vspace, align : align, bgcolor : bgcolor, "data-mce-json" : JSON.serialize(data, "'") }); } }); // Register plugin tinymce.PluginManager.add('media', tinymce.plugins.MediaPlugin); })();
PypiClean
/scimes-0.3.2.tar.gz/scimes-0.3.2/astropy_helpers/astropy_helpers/version.py
from __future__ import unicode_literals import datetime import locale import os import subprocess import warnings def _decode_stdio(stream): try: stdio_encoding = locale.getdefaultlocale()[1] or 'utf-8' except ValueError: stdio_encoding = 'utf-8' try: text = stream.decode(stdio_encoding) except UnicodeDecodeError: # Final fallback text = stream.decode('latin1') return text def update_git_devstr(version, path=None): """ Updates the git revision string if and only if the path is being imported directly from a git working copy. This ensures that the revision number in the version string is accurate. """ try: # Quick way to determine if we're in git or not - returns '' if not devstr = get_git_devstr(sha=True, show_warning=False, path=path) except OSError: return version if not devstr: # Probably not in git so just pass silently return version if 'dev' in version: # update to the current git revision version_base = version.split('.dev', 1)[0] devstr = get_git_devstr(sha=False, show_warning=False, path=path) return version_base + '.dev' + devstr else: #otherwise it's already the true/release version return version def get_git_devstr(sha=False, show_warning=True, path=None): """ Determines the number of revisions in this repository. Parameters ---------- sha : bool If True, the full SHA1 hash will be returned. Otherwise, the total count of commits in the repository will be used as a "revision number". show_warning : bool If True, issue a warning if git returns an error code, otherwise errors pass silently. path : str or None If a string, specifies the directory to look in to find the git repository. If `None`, the current working directory is used, and must be the root of the git repository. If given a filename it uses the directory containing that file. Returns ------- devversion : str Either a string with the revision number (if `sha` is False), the SHA1 hash of the current commit (if `sha` is True), or an empty string if git version info could not be identified. """ if path is None: path = os.getcwd() if not _get_repo_path(path, levels=0): return '' if not os.path.isdir(path): path = os.path.abspath(os.path.dirname(path)) if sha: # Faster for getting just the hash of HEAD cmd = ['rev-parse', 'HEAD'] else: cmd = ['rev-list', '--count', 'HEAD'] def run_git(cmd): try: p = subprocess.Popen(['git'] + cmd, cwd=path, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) stdout, stderr = p.communicate() except OSError as e: if show_warning: warnings.warn('Error running git: ' + str(e)) return (None, b'', b'') if p.returncode == 128: if show_warning: warnings.warn('No git repository present at {0!r}! Using ' 'default dev version.'.format(path)) return (p.returncode, b'', b'') if p.returncode == 129: if show_warning: warnings.warn('Your git looks old (does it support {0}?); ' 'consider upgrading to v1.7.2 or ' 'later.'.format(cmd[0])) return (p.returncode, stdout, stderr) elif p.returncode != 0: if show_warning: warnings.warn('Git failed while determining revision ' 'count: {0}'.format(_decode_stdio(stderr))) return (p.returncode, stdout, stderr) return p.returncode, stdout, stderr returncode, stdout, stderr = run_git(cmd) if not sha and returncode == 129: # git returns 129 if a command option failed to parse; in # particular this could happen in git versions older than 1.7.2 # where the --count option is not supported # Also use --abbrev-commit and --abbrev=0 to display the minimum # number of characters needed per-commit (rather than the full hash) cmd = ['rev-list', '--abbrev-commit', '--abbrev=0', 'HEAD'] returncode, stdout, stderr = run_git(cmd) # Fall back on the old method of getting all revisions and counting # the lines if returncode == 0: return str(stdout.count(b'\n')) else: return '' elif sha: return _decode_stdio(stdout)[:40] else: return _decode_stdio(stdout).strip() def _get_repo_path(pathname, levels=None): """ Given a file or directory name, determine the root of the git repository this path is under. If given, this won't look any higher than ``levels`` (that is, if ``levels=0`` then the given path must be the root of the git repository and is returned if so. Returns `None` if the given path could not be determined to belong to a git repo. """ if os.path.isfile(pathname): current_dir = os.path.abspath(os.path.dirname(pathname)) elif os.path.isdir(pathname): current_dir = os.path.abspath(pathname) else: return None current_level = 0 while levels is None or current_level <= levels: if os.path.exists(os.path.join(current_dir, '.git')): return current_dir current_level += 1 if current_dir == os.path.dirname(current_dir): break current_dir = os.path.dirname(current_dir) return None _packagename = "astropy_helpers" _last_generated_version = "1.1.dev" _last_githash = "9a77f664b3a1344228bc51ee512218170b14bb9b" # Determine where the source code for this module # lives. If __file__ is not a filesystem path then # it is assumed not to live in a git repo at all. if _get_repo_path(__file__, levels=len(_packagename.split('.'))): version = update_git_devstr(_last_generated_version, path=__file__) githash = get_git_devstr(sha=True, show_warning=False, path=__file__) or _last_githash else: # The file does not appear to live in a git repo so don't bother # invoking git version = _last_generated_version githash = _last_githash major = 1 minor = 1 bugfix = 0 release = False timestamp = datetime.datetime(2018, 12, 12, 22, 14, 47, 756626) debug = False try: from ._compiler import compiler except ImportError: compiler = "unknown" try: from .cython_version import cython_version except ImportError: cython_version = "unknown"
PypiClean
/pymzml-2.5.1.tar.gz/pymzml-2.5.1/README.rst
############ Introduction ############ .. image:: https://travis-ci.org/pymzml/pymzML.svg?branch=master :target: https://travis-ci.org/pymzml/pymzML .. image:: https://ci.appveyor.com/api/projects/status/e5reb5xw74jfqk2v/branch/dev?svg=true :target: https://ci.appveyor.com/api/projects/status/e5reb5xw74jfqk2v/branch/dev?svg=true :alt: AppVeyor CI status .. image:: https://readthedocs.org/projects/pymzml/badge/?version=latest :target: http://pymzml.readthedocs.io/en/latest/?badge=latest :alt: Documentation Status .. image:: https://codecov.io/gh/pymzml/pymzml/branch/master/graph/badge.svg :target: https://codecov.io/gh/pymzml/pymzml :alt: Code Coverage .. image:: https://img.shields.io/pypi/v/pymzML.svg :target: https://pypi.org/project/pymzML/ .. image:: https://pepy.tech/badge/pymzml :target: https://pepy.tech/project/pymzml .. image:: https://img.shields.io/badge/code%20style-black-000000.svg :target: https://github.com/psf/black :alt: As long it is black .. image:: http://depsy.org/api/package/pypi/pymzML/badge.svg :target: http://depsy.org/package/python/pymzML :alt: Research software impact ******************* General information ******************* Module to parse mzML data in Python based on cElementTree Copyright 2010-2021 by: | M. Kösters, | J. Leufken, | T. Bald, | A. Niehues, | S. Schulze, | K. Sugimoto, | R.P. Zahedi, | M. Hippler, | S.A. Leidel, | C. Fufezan, =================== Contact information =================== Please refer to: | Dr. Christian Fufezan | Group Leader Experimental Bioinformatics | Cellzome GmbH | R&D Platform Technology & Science | GSK | Germany | eMail: [email protected] | | https://fufezan.net ******* Summary ******* pymzML is an extension to Python that offers * a) easy access to mass spectrometry (MS) data that allows the rapid development of tools * b) a very fast parser for mzML data, the standard mass spectrometry data format * c) a set of functions to compare and/or handle spectra * d) random access in compressed files * e) interactive data visualization ************** Implementation ************** pymzML requires Python3.7+. The module is freely available on pymzml.github.com or pypi, published under MIT license and only requires numpy and regex, however there are several optional dependencies for extended functionality like interactive plotting and deconvolution. ******** Download ******** Get the latest version via github | https://github.com/pymzml/pymzML The complete Documentation can be found as pdf | http://pymzml.github.com/dist/pymzml.pdf ******** Citation ******** M Kösters, J Leufken, S Schulze, K Sugimoto, J Klein, R P Zahedi, M Hippler, S A Leidel, C Fufezan; pymzML v2.0: introducing a highly compressed and seekable gzip format, Bioinformatics, doi: https://doi.org/10.1093/bioinformatics/bty046 ************ Installation ************ pymzML requires `Python`_ 3.7 or higher. .. note:: Consider to use a Python virtual environment for easy installation and use. Further, usage of python3.7+ is recommended. Download pymzML using `GitHub`_ **or** the zip file: * GitHub version: Start by cloning the GitHub repository:: user@localhost:~$ git clone https://github.com/pymzML/pymzml.git user@localhost:~$ cd pymzml user@localhost:~$ pip install -r requirements.txt user@localhost:~$ python setup.py install .. _Python: https://www.python.org/downloads/ .. _GitHub: https://github.com/pymzML/pymzml * pypi version:: user@localhost:~$ pip install pymzml # install standard version user@localhost:~$ pip install "pymzml[plot]" # with plotting support user@localhost:~$ pip install "pymzml[pynumpress]" # with pynumpress support user@localhost:~$ pip install "pymzml[deconvolution]" # with deconvolution support using ms_deisotope user@localhost:~$ pip install "pymzml[full]" # full featured If you have troubles installing the dependencies, install numpy first separately, since pynumpress requires numpy to be installed. If you use Windows 7 please use the 'SDK7.1 command prompt' for installation of pymzML to assure correct compiling of the C extensions. ======= Testing ======= To test the package and correct installation:: tox ************* Contributing ************* Please read the contribution guidelines before contributing `here </CONTRIBUTING.rst>`_ **************** Code of Conduct **************** Since pymzML is an open source project maintained by the community, we established a code of conduct in order to facilitate an inclusive environment for all users, contributors and project memebers. Before contributing to pymzML, please read the code of conduct `here </CODE_OF_CONDUCT.md>`_
PypiClean
/h2o_pysparkling_3.1-3.42.0.2.post1.tar.gz/h2o_pysparkling_3.1-3.42.0.2.post1/ai/h2o/sparkling/ml/models/H2OMOJOModel.py
from pyspark.ml.util import _jvm from py4j.java_gateway import JavaObject from ai.h2o.sparkling.Initializer import Initializer from ai.h2o.sparkling.ml.models.H2OMOJOSettings import H2OMOJOSettings from ai.h2o.sparkling.ml.models.H2OMOJOModelBase import H2OMOJOModelBase from ai.h2o.sparkling.ml.params.H2OMOJOModelParams import H2OTreeBasedSupervisedMOJOModelParams from ai.h2o.sparkling.ml.params.H2OMOJOModelParams import H2OTreeBasedUnsupervisedMOJOModelParams from ai.h2o.sparkling.ml.params.H2OMOJOModelParams import H2OSupervisedMOJOModelParams from ai.h2o.sparkling.ml.params.H2OMOJOModelParams import H2OUnsupervisedMOJOModelParams from ai.h2o.sparkling.ml.params.H2OMOJOModelParams import H2OAlgorithmMOJOModelParams from ai.h2o.sparkling.ml.params.H2OMOJOModelParams import H2OFeatureMOJOModelParams from ai.h2o.sparkling.ml.params.H2OMOJOModelParams import H2OMOJOModelParams from ai.h2o.sparkling.ml.models.H2OXGBoostMOJOModel import H2OXGBoostMOJOModel from ai.h2o.sparkling.ml.models.H2OGBMMOJOModel import H2OGBMMOJOModel from ai.h2o.sparkling.ml.models.H2ODRFMOJOModel import H2ODRFMOJOModel from ai.h2o.sparkling.ml.models.H2OGLMMOJOModel import H2OGLMMOJOModel from ai.h2o.sparkling.ml.models.H2OGAMMOJOModel import H2OGAMMOJOModel from ai.h2o.sparkling.ml.models.H2ODeepLearningMOJOModel import H2ODeepLearningMOJOModel from ai.h2o.sparkling.ml.models.H2ORuleFitMOJOModel import H2ORuleFitMOJOModel from ai.h2o.sparkling.ml.models.H2OKMeansMOJOModel import H2OKMeansMOJOModel from ai.h2o.sparkling.ml.models.H2OCoxPHMOJOModel import H2OCoxPHMOJOModel from ai.h2o.sparkling.ml.models.H2OIsolationForestMOJOModel import H2OIsolationForestMOJOModel from ai.h2o.sparkling.ml.models.H2OExtendedIsolationForestMOJOModel import H2OExtendedIsolationForestMOJOModel from ai.h2o.sparkling.ml.models.H2OAutoEncoderMOJOModel import H2OAutoEncoderMOJOModel from ai.h2o.sparkling.ml.models.H2OPCAMOJOModel import H2OPCAMOJOModel from ai.h2o.sparkling.ml.models.H2OGLRMMOJOModel import H2OGLRMMOJOModel from ai.h2o.sparkling.ml.models.H2OWord2VecMOJOModel import H2OWord2VecMOJOModel from ai.h2o.sparkling.ml.models.H2OStackedEnsembleMOJOModel import H2OStackedEnsembleMOJOModel class H2OMOJOModelFactory: @staticmethod def createFromMojo(pathToMojo, settings=H2OMOJOSettings.default()): # We need to make sure that Sparkling Water classes are available on the Spark driver and executor paths Initializer.load_sparkling_jar() javaModel = _jvm().ai.h2o.sparkling.ml.models.H2OMOJOModel.createFromMojo(pathToMojo, settings.toJavaObject()) return H2OMOJOModelFactory.createSpecificMOJOModel(javaModel) @staticmethod def createSpecificMOJOModel(javaModel): className = javaModel.getClass().getSimpleName() if className == "H2OTreeBasedSupervisedMOJOModel": return H2OTreeBasedSupervisedMOJOModel(javaModel) elif className == "H2OTreeBasedUnsupervisedMOJOModel": return H2OTreeBasedUnsupervisedMOJOModel(javaModel) elif className == "H2OSupervisedMOJOModel": return H2OSupervisedMOJOModel(javaModel) elif className == "H2OUnsupervisedMOJOModel": return H2OUnsupervisedMOJOModel(javaModel) elif className == "H2OAlgorithmMOJOModel": return H2OAlgorithmMOJOModel(javaModel) elif className == "H2OFeatureMOJOModel": return H2OFeatureMOJOModel(javaModel) elif className == "H2OXGBoostMOJOModel": return H2OXGBoostMOJOModel(javaModel) elif className == "H2OGBMMOJOModel": return H2OGBMMOJOModel(javaModel) elif className == "H2ODRFMOJOModel": return H2ODRFMOJOModel(javaModel) elif className == "H2OGLMMOJOModel": return H2OGLMMOJOModel(javaModel) elif className == "H2OGAMMOJOModel": return H2OGAMMOJOModel(javaModel) elif className == "H2ODeepLearningMOJOModel": return H2ODeepLearningMOJOModel(javaModel) elif className == "H2ORuleFitMOJOModel": return H2ORuleFitMOJOModel(javaModel) elif className == "H2OKMeansMOJOModel": return H2OKMeansMOJOModel(javaModel) elif className == "H2OCoxPHMOJOModel": return H2OCoxPHMOJOModel(javaModel) elif className == "H2OIsolationForestMOJOModel": return H2OIsolationForestMOJOModel(javaModel) elif className == "H2OExtendedIsolationForestMOJOModel": return H2OExtendedIsolationForestMOJOModel(javaModel) elif className == "H2OAutoEncoderMOJOModel": return H2OAutoEncoderMOJOModel(javaModel) elif className == "H2OPCAMOJOModel": return H2OPCAMOJOModel(javaModel) elif className == "H2OGLRMMOJOModel": return H2OGLRMMOJOModel(javaModel) elif className == "H2OWord2VecMOJOModel": return H2OWord2VecMOJOModel(javaModel) elif className == "H2OStackedEnsembleMOJOModel": return H2OStackedEnsembleMOJOModel(javaModel) else: return H2OMOJOModel(javaModel) class WithCVModels(H2OMOJOModelFactory): def getCrossValidationModels(self): cvModels = self._java_obj.getCrossValidationModelsAsArray() if cvModels is None: return None elif isinstance(cvModels, JavaObject): return [createSpecificMOJOModel(v) for v in cvModels] else: raise TypeError("Invalid type.") class H2OMOJOModel(H2OMOJOModelParams, H2OMOJOModelBase, WithCVModels): pass class H2OAlgorithmMOJOModel(H2OAlgorithmMOJOModelParams, WithCVModels): pass class H2OFeatureMOJOModel(H2OFeatureMOJOModelParams, WithCVModels): pass class H2OUnsupervisedMOJOModel(H2OUnsupervisedMOJOModelParams, WithCVModels): pass class H2OSupervisedMOJOModel(H2OSupervisedMOJOModelParams, WithCVModels): pass class H2OTreeBasedUnsupervisedMOJOModel(H2OTreeBasedUnsupervisedMOJOModelParams, WithCVModels): pass class H2OTreeBasedSupervisedMOJOModel(H2OTreeBasedSupervisedMOJOModelParams, WithCVModels): pass
PypiClean