id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
/gaohn-common-utils-0.0.112.tar.gz/gaohn-common-utils-0.0.112/common_utils/core/decorators/decorators.py | import functools
import os
import threading
import time
from datetime import datetime
from typing import Any, Callable, Dict, TypeVar
import numpy as np
import psutil
from fastapi import Request
from prettytable import PrettyTable
from rich.pretty import pprint
# callable that takes any number of arguments and returns any value.
F = TypeVar("F", bound=Callable[..., Any])
def construct_response(func: F) -> F:
"""Construct a JSON response for an endpoint.
Supported Frameworks:
- FastAPI
To support Flask and Django.
Reference:
https://madewithml.com/courses/mlops/api/#decorators
"""
@functools.wraps(func)
def wrap(request: Request, *args: Any, **kwargs: Dict[str, Any]) -> Dict[str, Any]:
results = func(request, *args, **kwargs)
response = {
"message": results["message"],
"method": request.method,
"status-code": results["status-code"],
"timestamp": datetime.now().isoformat(),
"url": request.url, # ._url
}
if "data" in results:
response["data"] = results["data"]
return response
return wrap
# TODO: For memory usage, consider checking out memory_profiler.
# Coding it my own way is not good as it does not take into account
# a lot of minute details, and it does not work for multithreading and
# multiprocessing.
def record_memory_usage(func: F) -> F:
"""Memory usage decorator."""
@functools.wraps(func)
def wrapper(*args: Any, **kwargs: Dict[str, Any]) -> Any:
# Get initial memory usage
process = psutil.Process(os.getpid())
mem_info_before = process.memory_info()
initial_memory = mem_info_before.rss
result = func(*args, **kwargs)
# Get final memory usage
mem_info_after = process.memory_info()
final_memory = mem_info_after.rss
memory_used = final_memory - initial_memory
table = PrettyTable()
table.field_names = ["Function Name", "Bytes", "Megabytes", "Gigabytes"]
table.add_row(
[
func.__name__,
f"{memory_used}",
f"{memory_used / 1024 / 1024}",
f"{memory_used / 1024 / 1024 / 1024}",
]
)
pprint(table)
return result
return wrapper
class MemoryMonitor:
def __init__(self, interval=1):
self.interval = interval # Time interval in seconds between each check
self.keep_monitoring = True
def monitor_memory(self):
process = psutil.Process(os.getpid())
while self.keep_monitoring:
mem_info = process.memory_info()
print(f"Current memory usage: {mem_info.rss / 1024 / 1024} MB")
time.sleep(self.interval)
def start(self):
self.thread = threading.Thread(target=self.monitor_memory)
self.keep_monitoring = True
self.thread.start()
def stop(self):
self.keep_monitoring = False
self.thread.join()
def monitor_memory_usage(func):
@functools.wraps(func)
def wrapper(*args: Any, **kwargs: Dict[str, Any]) -> Any:
monitor = MemoryMonitor()
monitor.start()
result = func(*args, **kwargs)
monitor.stop()
return result
return wrapper
@record_memory_usage
@monitor_memory_usage
def increase_memory_usage():
data = []
for _ in range(100000):
data.append("x" * 1000000) # Increase memory usage by 1 MB each iteration
# time.sleep(0.1) # Sleep for a bit to slow down the loop
if __name__ == "__main__":
increase_memory_usage() | PypiClean |
/ArkPrice-0.1.1.tar.gz/ArkPrice-0.1.1/README.md | [![Build Status](https://travis-ci.org/Highjhacker/arkprice.svg?branch=master)](https://travis-ci.org/Highjhacker/arkprice)
[![HitCount](http://hits.dwyl.io/Highjhacker/priceark.svg)](http://hits.dwyl.io/Highjhacker/priceark) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
# ArkPrice
Fetch the price of Ark in any (crypto)currency.
## Built with
- [Python](https://www.python.org/)
- [Requests](http://docs.python-requests.org/en/master/)
## Installation
```shell
$ pip install arkprice
```
## Usage
### Fetching the price
You can fetch the price without any parameter, by default the return value will be in USD,
or specify multiples (crypto)currency you want to use.
```python
from ArkPrice import get_price
print(get_price("eur"))
print(get_price("eur", "btc", "usd"))
>>> {'EUR': 4.41}
>>> {'EUR': 4.41, 'BTC': 0.0004976, 'USD': 5.39}
```
### Output the price
Output the price directly in the console with the correct symbol of the currency.
```python
from ArkPrice import output_price
output_price("eur", "btc", "usd")
>>> 4.46 €
>>> 0.0004994 Ƀ
>>> 5.43 $
```
## TODOS
- [x] Core code.
- [ ] Write documentation.
- [x] Unit testing.
- [x] Package it.
- [x] Travis.
- [ ] Missing support for python 3.2.
- [ ] OSX Support ?
- [ ] Windows support ?
- [ ] More markets ?
- [x] CryptoCompare
- [ ] CoinMarketCap
- ...
## Authors
- Jolan Beer - Highjhacker
## License
ArkPrice is under MIT license. See the [LICENSE file](https://github.com/Highjhacker/arkprice/blob/master/LICENSE) for more informations. | PypiClean |
/nebula2-python-2.6.1.tar.gz/nebula2-python-2.6.1/README.md | # nebula-python
This directory holds the Python API for Nebula Graph. It is used to connect with Nebula Graph 2.0.
## Before you start
Before you start, please read this section to choose the right branch for you. In branch v1.0, the API works only for Nebula Graph 1.0. In the master branch, the API works only for Nebula Graph 2.0.
## The directory structure
```text
|--nebula-python
|
|-- nebula2 // client code
| |-- fbthrift // the fbthrift lib code
| |-- common
| |-- data
| |-- graph
| |-- meta
| |-- net // the net code for graph client
| |-- storage
| |-- Config.py // the pool config
| |__ Exception.py // the define exception
|
|-- examples
| |-- GraphClientMultiThreadExample.py // the multi thread example
| |-- GraphClientSimpleExample.py // the simple example
| |__ ScanVertexEdgeExample.py
|
|-- tests // the test code
|
|-- setup.py // used to install or package
|
|__ README.md // the introduction of nebula2-python
```
## How to get nebula2-python
### Option one: clone from GitHub
- Clone from GitHub
```bash
git clone https://github.com/vesoft-inc/nebula-python.git
cd nebula-python
```
- Install
```python
sudo python3 setup.py install
```
When your environment cannot access `pypi`, you need to install the following packages manually.
```
pip install -r requirements.txt
```
dev version
```
pip install -r requirements-dev.txt
```
### Option two: using pip
```python
pip install nebula2-python==$version
```
## Quick example to use graph-client to connect graphd
```python
from nebula2.gclient.net import ConnectionPool
from nebula2.Config import Config
# define a config
config = Config()
config.max_connection_pool_size = 10
# init connection pool
connection_pool = ConnectionPool()
# if the given servers are ok, return true, else return false
ok = connection_pool.init([('127.0.0.1', 9669)], config)
# option 1 control the connection release yourself
# get session from the pool
session = connection_pool.get_session('root', 'nebula')
# select space
session.execute('USE nba')
# show tags
result = session.execute('SHOW TAGS')
print(result)
# release session
session.release()
# option 2 with session_context, session will be released automatically
with connection_pool.session_context('root', 'nebula') as session:
session.execute('USE nba')
result = session.execute('SHOW TAGS')
print(result)
# close the pool
connection_pool.close()
```
## Quick example to use storage-client to scan vertex and edge
You should make sure the scan client can connect to the address of storage which see from `SHOW HOSTS`
```python
from nebula2.mclient import MetaCache, HostAddr
from nebula2.sclient.GraphStorageClient import GraphStorageClient
# the metad servers's address
meta_cache = MetaCache([('172.28.1.1', 9559),
('172.28.1.2', 9559),
('172.28.1.3', 9559)],
50000)
# option 1 metad usually discover the storage address automatically
graph_storage_client = GraphStorageClient(meta_cache)
# option 2 manually specify the storage address
storage_addrs = [HostAddr(host='172.28.1.4', port=9779),
HostAddr(host='172.28.1.5', port=9779),
HostAddr(host='172.28.1.6', port=9779)]
graph_storage_client = GraphStorageClient(meta_cache, storage_addrs)
resp = graph_storage_client.scan_vertex(
space_name='ScanSpace',
tag_name='person')
while resp.has_next():
result = resp.next()
for vertex_data in result:
print(vertex_data)
resp = graph_storage_client.scan_edge(
space_name='ScanSpace',
edge_name='friend')
while resp.has_next():
result = resp.next()
for edge_data in result:
print(edge_data)
```
## How to choose nebula-python
| Nebula2-Python Version | NebulaGraph Version |
|---|---|
| 2.0.0b1 | 2.0.0beta |
| 2.0.0rc1 | 2.0.0-rc1 |
| 2.0.0 | 2.0.0/2.0.1 |
| 2.5.0 | 2.5.0 |
| 2.6.0 | 2.6.0 |
| PypiClean |
/jintian-architecture-code-test-1.2.tar.gz/jintian-architecture-code-test-1.2/com/fw/base/base_dao.py | from com.fw.utils.id_util import IDUtils
from com.fw.base.base_exception import BaseException
import inspect
'''
主要是做dao数据库低层封装
1:get set str
2:字段反射
3:常用属性
'''
class BaseDao(object):
table_name = None
service_name = None
def __init__(self, id=None):
if not id:
id = IDUtils.get_primary_key()
self.id = id
def get_value(self, key):
return self.__dict__.get(key)
def set_value(self, key, value):
self.__dict__[key] = value;
def __str__(self):
print(self.__dict__)
def get_keys(self):
return self.__dict__.keys()
def __check_param(self):
data_dict = self.__dict__
if not data_dict:
raise BaseException("THE DAO NO ATTRIBUTES")
if "id" not in data_dict or not data_dict["id"]:
raise BaseException("THE DAO PRIMARY KEY IS NONE")
if len(data_dict) <= 1:
raise BaseException("THE DAO ATTRIBUTE IS TOO LESS")
def get_dict_value(self):
result = self.__dict__.copy()
if "mongo_dao" in result.keys():
result.pop("mongo_dao")
result.pop("mysql_dao")
return result
@staticmethod
def get_dao_fileds(T):
data = inspect.signature(T.__init__).parameters
result = []
result.append("id")
for key, val in data.items():
if key != "self" and key != 'args' and key != 'kwargs' and key != 'mongo_dao' and key != 'mysql_dao':
result.append(key)
return result
@staticmethod
def dict_to_dao(data: dict):
dao = BaseDao()
for key, val in data.items():
dao.set_value(key, val)
return dao
def delete(self, db="mysql", **kwargs):
from com.fw.db.mongo_db import mongo_dao
from com.fw.db.mysql_db import mysql_dao
if not id or id == "":
raise BaseException("缺少必要参数")
'''
删除根据id
:param id:
:return:
'''
try:
if db == "mysql":
mysql_dao.delete_by_id(self.table_name, self.id, **kwargs)
else:
mongo_dao.remove_by_id(self.table_name, self.id, **kwargs)
except Exception as e:
raise BaseException("删除{}数据失败:".format(self.service_name if self.service_name else self.table_name), e)
def insert(self, db="mysql", **kwargs):
from com.fw.db.mongo_db import mongo_dao
from com.fw.db.mysql_db import mysql_dao
'''
插入数据
:param dao:
:return:
'''
try:
if db == "mysql":
mysql_dao.insert(self, **kwargs)
else:
mongo_dao.save(self, **kwargs)
except Exception as e:
raise BaseException("保存{}失败".format(self.service_name if self.service_name else self.table_name), e)
def update_dao(self, db="mysql", **kwargs):
from com.fw.db.mongo_db import mongo_dao
from com.fw.db.mysql_db import mysql_dao
'''
修改数据
:param dao:
:return:
'''
if self.id == None or self.id == "":
raise BaseException("缺少必要参数")
try:
if db == "mysql":
mysql_dao.update_dao(self, **kwargs)
else:
mongo_dao.save(self, **kwargs)
except Exception as e:
raise BaseException("修改{}数据失败".format(self.service_name if self.service_name else self.table_name), e) | PypiClean |
/confine-controller-1.0.2.tar.gz/confine-controller-1.0.2/controller/management/commands/upgradecontroller.py | import functools
import os
import random
import string
from optparse import make_option
from django.core.management import call_command
from django.core.management.base import BaseCommand, CommandError
from controller import get_version
from controller.utils import decode_version
from controller.utils import get_existing_pip_installation
from controller.utils.system import run, check_root
r = functools.partial(run, silent=False)
def validate_controller_version(version):
if not version or version in ['beta', 'dev']:
return
try:
decode_version(version)
except ValueError as e:
raise CommandError(e)
class Command(BaseCommand):
def __init__(self, *args, **kwargs):
super(Command, self).__init__(*args, **kwargs)
self.option_list = BaseCommand.option_list + (
make_option('--pip_only', action='store_true', dest='pip_only', default=False,
help='Only run "pip install confine-controller --upgrade"'),
make_option('--controller_version', dest='version', default=False,
help='Specifies what version of the controller you want to install'),
make_option('--proxy', dest='proxy',
help='Specify a proxy in the form [user:passwd@]proxy.server:port.'),
)
option_list = BaseCommand.option_list
help = "Upgrading controller's installation"
can_import_settings = False
leave_locale_alone = True
@check_root
def handle(self, *args, **options):
current_version = get_version()
current_path = get_existing_pip_installation()
proxy = '--proxy %s' % options.get('proxy') if options.get('proxy') else ''
if current_path is not None:
desired_version = options.get('version')
validate_controller_version(desired_version)
if current_version == desired_version:
msg = "Not upgrading, you already have version %s installed"
raise CommandError(msg % desired_version)
# Create a backup of current installation
base_path = os.path.abspath(os.path.join(current_path, '..'))
char_set = string.ascii_uppercase + string.digits
rand_name = ''.join(random.sample(char_set, 6))
backup = os.path.join(base_path, 'controller.' + rand_name)
run("mv %s %s" % (current_path, backup))
# collect existing eggs previous to the installation
eggs_regex = os.path.join(base_path, 'confine_controller-*.egg-info')
eggs = run('ls -d %s' % eggs_regex)
eggs = eggs.stdout.splitlines()
try:
if desired_version:
if desired_version == 'dev':
r('pip install -e git+http://git.confine-project.eu/confine/controller.git@master#egg=confine-controller')
elif desired_version == 'beta':
r('pip install -e git+http://git.confine-project.eu/confine/controller.git@beta#egg=confine-controller')
else:
r('pip %s install confine-controller==%s' % (proxy, desired_version))
else:
# Did I mentioned how I hate PIP?
if run('pip --version|cut -d" " -f2').stdout == '1.0':
r('pip %s install confine-controller --upgrade' % proxy)
else:
# (Fucking pip)^2, it returns exit code 0 even when fails
# because requirement already up-to-date
r('pip %s install confine-controller --upgrade --force' % proxy)
except CommandError:
# Restore backup
run('rm -rf %s' % current_path)
run('mv %s %s' % (backup, current_path))
raise CommandError("Problem runing pip upgrade, aborting...")
else:
# Some old versions of pip do not performe this cleaning ...
# Remove all backups
run('rm -fr %s' % os.path.join(base_path, 'controller\.*'))
# Clean old egg files, yeah, cleaning PIP shit :P
c_version = 'from controller import get_version; print get_version()'
version = run('python -c "%s;"' % c_version).stdout
for egg in eggs:
# Do not remove the actual egg file when upgrading twice the same version
if egg.split('/')[-1] != "confine_controller-%s.egg-info" % version:
run('rm -fr %s' % egg)
else:
raise CommandError("You don't seem to have any previous PIP installation")
# version specific upgrade operations
if not options.get('pip_only'):
call_command("postupgradecontroller", version=current_version,
proxy=options.get('proxy')) | PypiClean |
/breakpoint-2.1.4.tar.gz/breakpoint-2.1.4/.lib/setuptools/command/install_egg_info.py | from distutils import log, dir_util
import os
from setuptools import Command
from setuptools.archive_util import unpack_archive
import pkg_resources
class install_egg_info(Command):
"""Install an .egg-info directory for the package"""
description = "Install an .egg-info directory for the package"
user_options = [
('install-dir=', 'd', "directory to install to"),
]
def initialize_options(self):
self.install_dir = None
def finalize_options(self):
self.set_undefined_options('install_lib',
('install_dir', 'install_dir'))
ei_cmd = self.get_finalized_command("egg_info")
basename = pkg_resources.Distribution(
None, None, ei_cmd.egg_name, ei_cmd.egg_version
).egg_name() + '.egg-info'
self.source = ei_cmd.egg_info
self.target = os.path.join(self.install_dir, basename)
self.outputs = [self.target]
def run(self):
self.run_command('egg_info')
if os.path.isdir(self.target) and not os.path.islink(self.target):
dir_util.remove_tree(self.target, dry_run=self.dry_run)
elif os.path.exists(self.target):
self.execute(os.unlink, (self.target,), "Removing " + self.target)
if not self.dry_run:
pkg_resources.ensure_directory(self.target)
self.execute(
self.copytree, (), "Copying %s to %s" % (self.source, self.target)
)
self.install_namespaces()
def get_outputs(self):
return self.outputs
def copytree(self):
# Copy the .egg-info tree to site-packages
def skimmer(src, dst):
# filter out source-control directories; note that 'src' is always
# a '/'-separated path, regardless of platform. 'dst' is a
# platform-specific path.
for skip in '.svn/', 'CVS/':
if src.startswith(skip) or '/' + skip in src:
return None
self.outputs.append(dst)
log.debug("Copying %s to %s", src, dst)
return dst
unpack_archive(self.source, self.target, skimmer)
def install_namespaces(self):
nsp = self._get_all_ns_packages()
if not nsp:
return
filename, ext = os.path.splitext(self.target)
filename += '-nspkg.pth'
self.outputs.append(filename)
log.info("Installing %s", filename)
lines = map(self._gen_nspkg_line, nsp)
if self.dry_run:
# always generate the lines, even in dry run
list(lines)
return
with open(filename, 'wt') as f:
f.writelines(lines)
_nspkg_tmpl = (
"import sys, types, os",
"p = os.path.join(sys._getframe(1).f_locals['sitedir'], *%(pth)r)",
"ie = os.path.exists(os.path.join(p,'__init__.py'))",
"m = not ie and "
"sys.modules.setdefault(%(pkg)r, types.ModuleType(%(pkg)r))",
"mp = (m or []) and m.__dict__.setdefault('__path__',[])",
"(p not in mp) and mp.append(p)",
)
"lines for the namespace installer"
_nspkg_tmpl_multi = (
'm and setattr(sys.modules[%(parent)r], %(child)r, m)',
)
"additional line(s) when a parent package is indicated"
@classmethod
def _gen_nspkg_line(cls, pkg):
# ensure pkg is not a unicode string under Python 2.7
pkg = str(pkg)
pth = tuple(pkg.split('.'))
tmpl_lines = cls._nspkg_tmpl
parent, sep, child = pkg.rpartition('.')
if parent:
tmpl_lines += cls._nspkg_tmpl_multi
return ';'.join(tmpl_lines) % locals() + '\n'
def _get_all_ns_packages(self):
"""Return sorted list of all package namespaces"""
nsp = set()
for pkg in self.distribution.namespace_packages or []:
pkg = pkg.split('.')
while pkg:
nsp.add('.'.join(pkg))
pkg.pop()
return sorted(nsp) | PypiClean |
/lianai-kouyu-2022.10.11.0.tar.gz/lianai-kouyu-2022.10.11.0/LianaiKouyu/js/reader.js | (function (global, factory) {
typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports) :
typeof define === 'function' && define.amd ? define(['exports'], factory) :
(factory((global.RSVP = global.RSVP || {})));
}(this, (function (exports) { 'use strict';
function indexOf(callbacks, callback) {
for (var i = 0, l = callbacks.length; i < l; i++) {
if (callbacks[i] === callback) {
return i;
}
}
return -1;
}
function callbacksFor(object) {
var callbacks = object._promiseCallbacks;
if (!callbacks) {
callbacks = object._promiseCallbacks = {};
}
return callbacks;
}
/**
@class RSVP.EventTarget
*/
var EventTarget = {
/**
`RSVP.EventTarget.mixin` extends an object with EventTarget methods. For
Example:
```javascript
let object = {};
RSVP.EventTarget.mixin(object);
object.on('finished', function(event) {
// handle event
});
object.trigger('finished', { detail: value });
```
`EventTarget.mixin` also works with prototypes:
```javascript
let Person = function() {};
RSVP.EventTarget.mixin(Person.prototype);
let yehuda = new Person();
let tom = new Person();
yehuda.on('poke', function(event) {
console.log('Yehuda says OW');
});
tom.on('poke', function(event) {
console.log('Tom says OW');
});
yehuda.trigger('poke');
tom.trigger('poke');
```
@method mixin
@for RSVP.EventTarget
@private
@param {Object} object object to extend with EventTarget methods
*/
mixin: function (object) {
object['on'] = this['on'];
object['off'] = this['off'];
object['trigger'] = this['trigger'];
object._promiseCallbacks = undefined;
return object;
},
/**
Registers a callback to be executed when `eventName` is triggered
```javascript
object.on('event', function(eventInfo){
// handle the event
});
object.trigger('event');
```
@method on
@for RSVP.EventTarget
@private
@param {String} eventName name of the event to listen for
@param {Function} callback function to be called when the event is triggered.
*/
on: function (eventName, callback) {
if (typeof callback !== 'function') {
throw new TypeError('Callback must be a function');
}
var allCallbacks = callbacksFor(this),
callbacks = void 0;
callbacks = allCallbacks[eventName];
if (!callbacks) {
callbacks = allCallbacks[eventName] = [];
}
if (indexOf(callbacks, callback) === -1) {
callbacks.push(callback);
}
},
/**
You can use `off` to stop firing a particular callback for an event:
```javascript
function doStuff() { // do stuff! }
object.on('stuff', doStuff);
object.trigger('stuff'); // doStuff will be called
// Unregister ONLY the doStuff callback
object.off('stuff', doStuff);
object.trigger('stuff'); // doStuff will NOT be called
```
If you don't pass a `callback` argument to `off`, ALL callbacks for the
event will not be executed when the event fires. For example:
```javascript
let callback1 = function(){};
let callback2 = function(){};
object.on('stuff', callback1);
object.on('stuff', callback2);
object.trigger('stuff'); // callback1 and callback2 will be executed.
object.off('stuff');
object.trigger('stuff'); // callback1 and callback2 will not be executed!
```
@method off
@for RSVP.EventTarget
@private
@param {String} eventName event to stop listening to
@param {Function} callback optional argument. If given, only the function
given will be removed from the event's callback queue. If no `callback`
argument is given, all callbacks will be removed from the event's callback
queue.
*/
off: function (eventName, callback) {
var allCallbacks = callbacksFor(this),
callbacks = void 0,
index = void 0;
if (!callback) {
allCallbacks[eventName] = [];
return;
}
callbacks = allCallbacks[eventName];
index = indexOf(callbacks, callback);
if (index !== -1) {
callbacks.splice(index, 1);
}
},
/**
Use `trigger` to fire custom events. For example:
```javascript
object.on('foo', function(){
console.log('foo event happened!');
});
object.trigger('foo');
// 'foo event happened!' logged to the console
```
You can also pass a value as a second argument to `trigger` that will be
passed as an argument to all event listeners for the event:
```javascript
object.on('foo', function(value){
console.log(value.name);
});
object.trigger('foo', { name: 'bar' });
// 'bar' logged to the console
```
@method trigger
@for RSVP.EventTarget
@private
@param {String} eventName name of the event to be triggered
@param {*} options optional value to be passed to any event handlers for
the given `eventName`
*/
trigger: function (eventName, options, label) {
var allCallbacks = callbacksFor(this),
callbacks = void 0,
callback = void 0;
if (callbacks = allCallbacks[eventName]) {
// Don't cache the callbacks.length since it may grow
for (var i = 0; i < callbacks.length; i++) {
callback = callbacks[i];
callback(options, label);
}
}
}
};
var config = {
instrument: false
};
EventTarget['mixin'](config);
function configure(name, value) {
if (arguments.length === 2) {
config[name] = value;
} else {
return config[name];
}
}
function objectOrFunction(x) {
var type = typeof x;
return x !== null && (type === 'object' || type === 'function');
}
function isFunction(x) {
return typeof x === 'function';
}
function isObject(x) {
return x !== null && typeof x === 'object';
}
function isMaybeThenable(x) {
return x !== null && typeof x === 'object';
}
var _isArray = void 0;
if (Array.isArray) {
_isArray = Array.isArray;
} else {
_isArray = function (x) {
return Object.prototype.toString.call(x) === '[object Array]';
};
}
var isArray = _isArray;
// Date.now is not available in browsers < IE9
// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/now#Compatibility
var now = Date.now || function () {
return new Date().getTime();
};
var queue = [];
function scheduleFlush() {
setTimeout(function () {
for (var i = 0; i < queue.length; i++) {
var entry = queue[i];
var payload = entry.payload;
payload.guid = payload.key + payload.id;
payload.childGuid = payload.key + payload.childId;
if (payload.error) {
payload.stack = payload.error.stack;
}
config['trigger'](entry.name, entry.payload);
}
queue.length = 0;
}, 50);
}
function instrument(eventName, promise, child) {
if (1 === queue.push({
name: eventName,
payload: {
key: promise._guidKey,
id: promise._id,
eventName: eventName,
detail: promise._result,
childId: child && child._id,
label: promise._label,
timeStamp: now(),
error: config["instrument-with-stack"] ? new Error(promise._label) : null
} })) {
scheduleFlush();
}
}
/**
`RSVP.Promise.resolve` returns a promise that will become resolved with the
passed `value`. It is shorthand for the following:
```javascript
let promise = new RSVP.Promise(function(resolve, reject){
resolve(1);
});
promise.then(function(value){
// value === 1
});
```
Instead of writing the above, your code now simply becomes the following:
```javascript
let promise = RSVP.Promise.resolve(1);
promise.then(function(value){
// value === 1
});
```
@method resolve
@static
@param {*} object value that the returned promise will be resolved with
@param {String} label optional string for identifying the returned promise.
Useful for tooling.
@return {Promise} a promise that will become fulfilled with the given
`value`
*/
function resolve$1(object, label) {
/*jshint validthis:true */
var Constructor = this;
if (object && typeof object === 'object' && object.constructor === Constructor) {
return object;
}
var promise = new Constructor(noop, label);
resolve(promise, object);
return promise;
}
function withOwnPromise() {
return new TypeError('A promises callback cannot return that same promise.');
}
function noop() {}
var PENDING = void 0;
var FULFILLED = 1;
var REJECTED = 2;
var GET_THEN_ERROR = new ErrorObject();
function getThen(promise) {
try {
return promise.then;
} catch (error) {
GET_THEN_ERROR.error = error;
return GET_THEN_ERROR;
}
}
function tryThen(then$$1, value, fulfillmentHandler, rejectionHandler) {
try {
then$$1.call(value, fulfillmentHandler, rejectionHandler);
} catch (e) {
return e;
}
}
function handleForeignThenable(promise, thenable, then$$1) {
config.async(function (promise) {
var sealed = false;
var error = tryThen(then$$1, thenable, function (value) {
if (sealed) {
return;
}
sealed = true;
if (thenable !== value) {
resolve(promise, value, undefined);
} else {
fulfill(promise, value);
}
}, function (reason) {
if (sealed) {
return;
}
sealed = true;
reject(promise, reason);
}, 'Settle: ' + (promise._label || ' unknown promise'));
if (!sealed && error) {
sealed = true;
reject(promise, error);
}
}, promise);
}
function handleOwnThenable(promise, thenable) {
if (thenable._state === FULFILLED) {
fulfill(promise, thenable._result);
} else if (thenable._state === REJECTED) {
thenable._onError = null;
reject(promise, thenable._result);
} else {
subscribe(thenable, undefined, function (value) {
if (thenable !== value) {
resolve(promise, value, undefined);
} else {
fulfill(promise, value);
}
}, function (reason) {
return reject(promise, reason);
});
}
}
function handleMaybeThenable(promise, maybeThenable, then$$1) {
var isOwnThenable = maybeThenable.constructor === promise.constructor && then$$1 === then && promise.constructor.resolve === resolve$1;
if (isOwnThenable) {
handleOwnThenable(promise, maybeThenable);
} else if (then$$1 === GET_THEN_ERROR) {
reject(promise, GET_THEN_ERROR.error);
GET_THEN_ERROR.error = null;
} else if (isFunction(then$$1)) {
handleForeignThenable(promise, maybeThenable, then$$1);
} else {
fulfill(promise, maybeThenable);
}
}
function resolve(promise, value) {
if (promise === value) {
fulfill(promise, value);
} else if (objectOrFunction(value)) {
handleMaybeThenable(promise, value, getThen(value));
} else {
fulfill(promise, value);
}
}
function publishRejection(promise) {
if (promise._onError) {
promise._onError(promise._result);
}
publish(promise);
}
function fulfill(promise, value) {
if (promise._state !== PENDING) {
return;
}
promise._result = value;
promise._state = FULFILLED;
if (promise._subscribers.length === 0) {
if (config.instrument) {
instrument('fulfilled', promise);
}
} else {
config.async(publish, promise);
}
}
function reject(promise, reason) {
if (promise._state !== PENDING) {
return;
}
promise._state = REJECTED;
promise._result = reason;
config.async(publishRejection, promise);
}
function subscribe(parent, child, onFulfillment, onRejection) {
var subscribers = parent._subscribers;
var length = subscribers.length;
parent._onError = null;
subscribers[length] = child;
subscribers[length + FULFILLED] = onFulfillment;
subscribers[length + REJECTED] = onRejection;
if (length === 0 && parent._state) {
config.async(publish, parent);
}
}
function publish(promise) {
var subscribers = promise._subscribers;
var settled = promise._state;
if (config.instrument) {
instrument(settled === FULFILLED ? 'fulfilled' : 'rejected', promise);
}
if (subscribers.length === 0) {
return;
}
var child = void 0,
callback = void 0,
result = promise._result;
for (var i = 0; i < subscribers.length; i += 3) {
child = subscribers[i];
callback = subscribers[i + settled];
if (child) {
invokeCallback(settled, child, callback, result);
} else {
callback(result);
}
}
promise._subscribers.length = 0;
}
function ErrorObject() {
this.error = null;
}
var TRY_CATCH_ERROR = new ErrorObject();
function tryCatch(callback, result) {
try {
return callback(result);
} catch (e) {
TRY_CATCH_ERROR.error = e;
return TRY_CATCH_ERROR;
}
}
function invokeCallback(state, promise, callback, result) {
var hasCallback = isFunction(callback);
var value = void 0,
error = void 0;
if (hasCallback) {
value = tryCatch(callback, result);
if (value === TRY_CATCH_ERROR) {
error = value.error;
value.error = null; // release
} else if (value === promise) {
reject(promise, withOwnPromise());
return;
}
} else {
value = result;
}
if (promise._state !== PENDING) {
// noop
} else if (hasCallback && error === undefined) {
resolve(promise, value);
} else if (error !== undefined) {
reject(promise, error);
} else if (state === FULFILLED) {
fulfill(promise, value);
} else if (state === REJECTED) {
reject(promise, value);
}
}
function initializePromise(promise, resolver) {
var resolved = false;
try {
resolver(function (value) {
if (resolved) {
return;
}
resolved = true;
resolve(promise, value);
}, function (reason) {
if (resolved) {
return;
}
resolved = true;
reject(promise, reason);
});
} catch (e) {
reject(promise, e);
}
}
function then(onFulfillment, onRejection, label) {
var parent = this;
var state = parent._state;
if (state === FULFILLED && !onFulfillment || state === REJECTED && !onRejection) {
config.instrument && instrument('chained', parent, parent);
return parent;
}
parent._onError = null;
var child = new parent.constructor(noop, label);
var result = parent._result;
config.instrument && instrument('chained', parent, child);
if (state === PENDING) {
subscribe(parent, child, onFulfillment, onRejection);
} else {
var callback = state === FULFILLED ? onFulfillment : onRejection;
config.async(function () {
return invokeCallback(state, child, callback, result);
});
}
return child;
}
var Enumerator = function () {
function Enumerator(Constructor, input, abortOnReject, label) {
this._instanceConstructor = Constructor;
this.promise = new Constructor(noop, label);
this._abortOnReject = abortOnReject;
this._init.apply(this, arguments);
}
Enumerator.prototype._init = function _init(Constructor, input) {
var len = input.length || 0;
this.length = len;
this._remaining = len;
this._result = new Array(len);
this._enumerate(input);
if (this._remaining === 0) {
fulfill(this.promise, this._result);
}
};
Enumerator.prototype._enumerate = function _enumerate(input) {
var length = this.length;
var promise = this.promise;
for (var i = 0; promise._state === PENDING && i < length; i++) {
this._eachEntry(input[i], i);
}
};
Enumerator.prototype._settleMaybeThenable = function _settleMaybeThenable(entry, i) {
var c = this._instanceConstructor;
var resolve$$1 = c.resolve;
if (resolve$$1 === resolve$1) {
var then$$1 = getThen(entry);
if (then$$1 === then && entry._state !== PENDING) {
entry._onError = null;
this._settledAt(entry._state, i, entry._result);
} else if (typeof then$$1 !== 'function') {
this._remaining--;
this._result[i] = this._makeResult(FULFILLED, i, entry);
} else if (c === Promise) {
var promise = new c(noop);
handleMaybeThenable(promise, entry, then$$1);
this._willSettleAt(promise, i);
} else {
this._willSettleAt(new c(function (resolve$$1) {
return resolve$$1(entry);
}), i);
}
} else {
this._willSettleAt(resolve$$1(entry), i);
}
};
Enumerator.prototype._eachEntry = function _eachEntry(entry, i) {
if (isMaybeThenable(entry)) {
this._settleMaybeThenable(entry, i);
} else {
this._remaining--;
this._result[i] = this._makeResult(FULFILLED, i, entry);
}
};
Enumerator.prototype._settledAt = function _settledAt(state, i, value) {
var promise = this.promise;
if (promise._state === PENDING) {
if (this._abortOnReject && state === REJECTED) {
reject(promise, value);
} else {
this._remaining--;
this._result[i] = this._makeResult(state, i, value);
if (this._remaining === 0) {
fulfill(promise, this._result);
}
}
}
};
Enumerator.prototype._makeResult = function _makeResult(state, i, value) {
return value;
};
Enumerator.prototype._willSettleAt = function _willSettleAt(promise, i) {
var enumerator = this;
subscribe(promise, undefined, function (value) {
return enumerator._settledAt(FULFILLED, i, value);
}, function (reason) {
return enumerator._settledAt(REJECTED, i, reason);
});
};
return Enumerator;
}();
function makeSettledResult(state, position, value) {
if (state === FULFILLED) {
return {
state: 'fulfilled',
value: value
};
} else {
return {
state: 'rejected',
reason: value
};
}
}
/**
`RSVP.Promise.all` accepts an array of promises, and returns a new promise which
is fulfilled with an array of fulfillment values for the passed promises, or
rejected with the reason of the first passed promise to be rejected. It casts all
elements of the passed iterable to promises as it runs this algorithm.
Example:
```javascript
let promise1 = RSVP.resolve(1);
let promise2 = RSVP.resolve(2);
let promise3 = RSVP.resolve(3);
let promises = [ promise1, promise2, promise3 ];
RSVP.Promise.all(promises).then(function(array){
// The array here would be [ 1, 2, 3 ];
});
```
If any of the `promises` given to `RSVP.all` are rejected, the first promise
that is rejected will be given as an argument to the returned promises's
rejection handler. For example:
Example:
```javascript
let promise1 = RSVP.resolve(1);
let promise2 = RSVP.reject(new Error("2"));
let promise3 = RSVP.reject(new Error("3"));
let promises = [ promise1, promise2, promise3 ];
RSVP.Promise.all(promises).then(function(array){
// Code here never runs because there are rejected promises!
}, function(error) {
// error.message === "2"
});
```
@method all
@static
@param {Array} entries array of promises
@param {String} label optional string for labeling the promise.
Useful for tooling.
@return {Promise} promise that is fulfilled when all `promises` have been
fulfilled, or rejected if any of them become rejected.
@static
*/
function all(entries, label) {
if (!isArray(entries)) {
return this.reject(new TypeError("Promise.all must be called with an array"), label);
}
return new Enumerator(this, entries, true /* abort on reject */, label).promise;
}
/**
`RSVP.Promise.race` returns a new promise which is settled in the same way as the
first passed promise to settle.
Example:
```javascript
let promise1 = new RSVP.Promise(function(resolve, reject){
setTimeout(function(){
resolve('promise 1');
}, 200);
});
let promise2 = new RSVP.Promise(function(resolve, reject){
setTimeout(function(){
resolve('promise 2');
}, 100);
});
RSVP.Promise.race([promise1, promise2]).then(function(result){
// result === 'promise 2' because it was resolved before promise1
// was resolved.
});
```
`RSVP.Promise.race` is deterministic in that only the state of the first
settled promise matters. For example, even if other promises given to the
`promises` array argument are resolved, but the first settled promise has
become rejected before the other promises became fulfilled, the returned
promise will become rejected:
```javascript
let promise1 = new RSVP.Promise(function(resolve, reject){
setTimeout(function(){
resolve('promise 1');
}, 200);
});
let promise2 = new RSVP.Promise(function(resolve, reject){
setTimeout(function(){
reject(new Error('promise 2'));
}, 100);
});
RSVP.Promise.race([promise1, promise2]).then(function(result){
// Code here never runs
}, function(reason){
// reason.message === 'promise 2' because promise 2 became rejected before
// promise 1 became fulfilled
});
```
An example real-world use case is implementing timeouts:
```javascript
RSVP.Promise.race([ajax('foo.json'), timeout(5000)])
```
@method race
@static
@param {Array} entries array of promises to observe
@param {String} label optional string for describing the promise returned.
Useful for tooling.
@return {Promise} a promise which settles in the same way as the first passed
promise to settle.
*/
function race(entries, label) {
/*jshint validthis:true */
var Constructor = this;
var promise = new Constructor(noop, label);
if (!isArray(entries)) {
reject(promise, new TypeError('Promise.race must be called with an array'));
return promise;
}
for (var i = 0; promise._state === PENDING && i < entries.length; i++) {
subscribe(Constructor.resolve(entries[i]), undefined, function (value) {
return resolve(promise, value);
}, function (reason) {
return reject(promise, reason);
});
}
return promise;
}
/**
`RSVP.Promise.reject` returns a promise rejected with the passed `reason`.
It is shorthand for the following:
```javascript
let promise = new RSVP.Promise(function(resolve, reject){
reject(new Error('WHOOPS'));
});
promise.then(function(value){
// Code here doesn't run because the promise is rejected!
}, function(reason){
// reason.message === 'WHOOPS'
});
```
Instead of writing the above, your code now simply becomes the following:
```javascript
let promise = RSVP.Promise.reject(new Error('WHOOPS'));
promise.then(function(value){
// Code here doesn't run because the promise is rejected!
}, function(reason){
// reason.message === 'WHOOPS'
});
```
@method reject
@static
@param {*} reason value that the returned promise will be rejected with.
@param {String} label optional string for identifying the returned promise.
Useful for tooling.
@return {Promise} a promise rejected with the given `reason`.
*/
function reject$1(reason, label) {
/*jshint validthis:true */
var Constructor = this;
var promise = new Constructor(noop, label);
reject(promise, reason);
return promise;
}
var guidKey = 'rsvp_' + now() + '-';
var counter = 0;
function needsResolver() {
throw new TypeError('You must pass a resolver function as the first argument to the promise constructor');
}
function needsNew() {
throw new TypeError("Failed to construct 'Promise': Please use the 'new' operator, this object constructor cannot be called as a function.");
}
/**
Promise objects represent the eventual result of an asynchronous operation. The
primary way of interacting with a promise is through its `then` method, which
registers callbacks to receive either a promise’s eventual value or the reason
why the promise cannot be fulfilled.
Terminology
-----------
- `promise` is an object or function with a `then` method whose behavior conforms to this specification.
- `thenable` is an object or function that defines a `then` method.
- `value` is any legal JavaScript value (including undefined, a thenable, or a promise).
- `exception` is a value that is thrown using the throw statement.
- `reason` is a value that indicates why a promise was rejected.
- `settled` the final resting state of a promise, fulfilled or rejected.
A promise can be in one of three states: pending, fulfilled, or rejected.
Promises that are fulfilled have a fulfillment value and are in the fulfilled
state. Promises that are rejected have a rejection reason and are in the
rejected state. A fulfillment value is never a thenable.
Promises can also be said to *resolve* a value. If this value is also a
promise, then the original promise's settled state will match the value's
settled state. So a promise that *resolves* a promise that rejects will
itself reject, and a promise that *resolves* a promise that fulfills will
itself fulfill.
Basic Usage:
------------
```js
let promise = new Promise(function(resolve, reject) {
// on success
resolve(value);
// on failure
reject(reason);
});
promise.then(function(value) {
// on fulfillment
}, function(reason) {
// on rejection
});
```
Advanced Usage:
---------------
Promises shine when abstracting away asynchronous interactions such as
`XMLHttpRequest`s.
```js
function getJSON(url) {
return new Promise(function(resolve, reject){
let xhr = new XMLHttpRequest();
xhr.open('GET', url);
xhr.onreadystatechange = handler;
xhr.responseType = 'json';
xhr.setRequestHeader('Accept', 'application/json');
xhr.send();
function handler() {
if (this.readyState === this.DONE) {
if (this.status === 200) {
resolve(this.response);
} else {
reject(new Error('getJSON: `' + url + '` failed with status: [' + this.status + ']'));
}
}
};
});
}
getJSON('/posts.json').then(function(json) {
// on fulfillment
}, function(reason) {
// on rejection
});
```
Unlike callbacks, promises are great composable primitives.
```js
Promise.all([
getJSON('/posts'),
getJSON('/comments')
]).then(function(values){
values[0] // => postsJSON
values[1] // => commentsJSON
return values;
});
```
@class RSVP.Promise
@param {function} resolver
@param {String} label optional string for labeling the promise.
Useful for tooling.
@constructor
*/
var Promise = function () {
function Promise(resolver, label) {
this._id = counter++;
this._label = label;
this._state = undefined;
this._result = undefined;
this._subscribers = [];
config.instrument && instrument('created', this);
if (noop !== resolver) {
typeof resolver !== 'function' && needsResolver();
this instanceof Promise ? initializePromise(this, resolver) : needsNew();
}
}
Promise.prototype._onError = function _onError(reason) {
var _this = this;
config.after(function () {
if (_this._onError) {
config.trigger('error', reason, _this._label);
}
});
};
/**
`catch` is simply sugar for `then(undefined, onRejection)` which makes it the same
as the catch block of a try/catch statement.
```js
function findAuthor(){
throw new Error('couldn\'t find that author');
}
// synchronous
try {
findAuthor();
} catch(reason) {
// something went wrong
}
// async with promises
findAuthor().catch(function(reason){
// something went wrong
});
```
@method catch
@param {Function} onRejection
@param {String} label optional string for labeling the promise.
Useful for tooling.
@return {Promise}
*/
Promise.prototype.catch = function _catch(onRejection, label) {
return this.then(undefined, onRejection, label);
};
/**
`finally` will be invoked regardless of the promise's fate just as native
try/catch/finally behaves
Synchronous example:
```js
findAuthor() {
if (Math.random() > 0.5) {
throw new Error();
}
return new Author();
}
try {
return findAuthor(); // succeed or fail
} catch(error) {
return findOtherAuthor();
} finally {
// always runs
// doesn't affect the return value
}
```
Asynchronous example:
```js
findAuthor().catch(function(reason){
return findOtherAuthor();
}).finally(function(){
// author was either found, or not
});
```
@method finally
@param {Function} callback
@param {String} label optional string for labeling the promise.
Useful for tooling.
@return {Promise}
*/
Promise.prototype.finally = function _finally(callback, label) {
var promise = this;
var constructor = promise.constructor;
return promise.then(function (value) {
return constructor.resolve(callback()).then(function () {
return value;
});
}, function (reason) {
return constructor.resolve(callback()).then(function () {
throw reason;
});
}, label);
};
return Promise;
}();
Promise.cast = resolve$1; // deprecated
Promise.all = all;
Promise.race = race;
Promise.resolve = resolve$1;
Promise.reject = reject$1;
Promise.prototype._guidKey = guidKey;
/**
The primary way of interacting with a promise is through its `then` method,
which registers callbacks to receive either a promise's eventual value or the
reason why the promise cannot be fulfilled.
```js
findUser().then(function(user){
// user is available
}, function(reason){
// user is unavailable, and you are given the reason why
});
```
Chaining
--------
The return value of `then` is itself a promise. This second, 'downstream'
promise is resolved with the return value of the first promise's fulfillment
or rejection handler, or rejected if the handler throws an exception.
```js
findUser().then(function (user) {
return user.name;
}, function (reason) {
return 'default name';
}).then(function (userName) {
// If `findUser` fulfilled, `userName` will be the user's name, otherwise it
// will be `'default name'`
});
findUser().then(function (user) {
throw new Error('Found user, but still unhappy');
}, function (reason) {
throw new Error('`findUser` rejected and we\'re unhappy');
}).then(function (value) {
// never reached
}, function (reason) {
// if `findUser` fulfilled, `reason` will be 'Found user, but still unhappy'.
// If `findUser` rejected, `reason` will be '`findUser` rejected and we\'re unhappy'.
});
```
If the downstream promise does not specify a rejection handler, rejection reasons will be propagated further downstream.
```js
findUser().then(function (user) {
throw new PedagogicalException('Upstream error');
}).then(function (value) {
// never reached
}).then(function (value) {
// never reached
}, function (reason) {
// The `PedgagocialException` is propagated all the way down to here
});
```
Assimilation
------------
Sometimes the value you want to propagate to a downstream promise can only be
retrieved asynchronously. This can be achieved by returning a promise in the
fulfillment or rejection handler. The downstream promise will then be pending
until the returned promise is settled. This is called *assimilation*.
```js
findUser().then(function (user) {
return findCommentsByAuthor(user);
}).then(function (comments) {
// The user's comments are now available
});
```
If the assimliated promise rejects, then the downstream promise will also reject.
```js
findUser().then(function (user) {
return findCommentsByAuthor(user);
}).then(function (comments) {
// If `findCommentsByAuthor` fulfills, we'll have the value here
}, function (reason) {
// If `findCommentsByAuthor` rejects, we'll have the reason here
});
```
Simple Example
--------------
Synchronous Example
```javascript
let result;
try {
result = findResult();
// success
} catch(reason) {
// failure
}
```
Errback Example
```js
findResult(function(result, err){
if (err) {
// failure
} else {
// success
}
});
```
Promise Example;
```javascript
findResult().then(function(result){
// success
}, function(reason){
// failure
});
```
Advanced Example
--------------
Synchronous Example
```javascript
let author, books;
try {
author = findAuthor();
books = findBooksByAuthor(author);
// success
} catch(reason) {
// failure
}
```
Errback Example
```js
function foundBooks(books) {
}
function failure(reason) {
}
findAuthor(function(author, err){
if (err) {
failure(err);
// failure
} else {
try {
findBoooksByAuthor(author, function(books, err) {
if (err) {
failure(err);
} else {
try {
foundBooks(books);
} catch(reason) {
failure(reason);
}
}
});
} catch(error) {
failure(err);
}
// success
}
});
```
Promise Example;
```javascript
findAuthor().
then(findBooksByAuthor).
then(function(books){
// found books
}).catch(function(reason){
// something went wrong
});
```
@method then
@param {Function} onFulfillment
@param {Function} onRejection
@param {String} label optional string for labeling the promise.
Useful for tooling.
@return {Promise}
*/
Promise.prototype.then = then;
function Result() {
this.value = undefined;
}
var ERROR = new Result();
var GET_THEN_ERROR$1 = new Result();
function getThen$1(obj) {
try {
return obj.then;
} catch (error) {
ERROR.value = error;
return ERROR;
}
}
function tryApply(f, s, a) {
try {
f.apply(s, a);
} catch (error) {
ERROR.value = error;
return ERROR;
}
}
function makeObject(_, argumentNames) {
var obj = {};
var length = _.length;
var args = new Array(length);
for (var x = 0; x < length; x++) {
args[x] = _[x];
}
for (var i = 0; i < argumentNames.length; i++) {
var name = argumentNames[i];
obj[name] = args[i + 1];
}
return obj;
}
function arrayResult(_) {
var length = _.length;
var args = new Array(length - 1);
for (var i = 1; i < length; i++) {
args[i - 1] = _[i];
}
return args;
}
function wrapThenable(then, promise) {
return {
then: function (onFulFillment, onRejection) {
return then.call(promise, onFulFillment, onRejection);
}
};
}
/**
`RSVP.denodeify` takes a 'node-style' function and returns a function that
will return an `RSVP.Promise`. You can use `denodeify` in Node.js or the
browser when you'd prefer to use promises over using callbacks. For example,
`denodeify` transforms the following:
```javascript
let fs = require('fs');
fs.readFile('myfile.txt', function(err, data){
if (err) return handleError(err);
handleData(data);
});
```
into:
```javascript
let fs = require('fs');
let readFile = RSVP.denodeify(fs.readFile);
readFile('myfile.txt').then(handleData, handleError);
```
If the node function has multiple success parameters, then `denodeify`
just returns the first one:
```javascript
let request = RSVP.denodeify(require('request'));
request('http://example.com').then(function(res) {
// ...
});
```
However, if you need all success parameters, setting `denodeify`'s
second parameter to `true` causes it to return all success parameters
as an array:
```javascript
let request = RSVP.denodeify(require('request'), true);
request('http://example.com').then(function(result) {
// result[0] -> res
// result[1] -> body
});
```
Or if you pass it an array with names it returns the parameters as a hash:
```javascript
let request = RSVP.denodeify(require('request'), ['res', 'body']);
request('http://example.com').then(function(result) {
// result.res
// result.body
});
```
Sometimes you need to retain the `this`:
```javascript
let app = require('express')();
let render = RSVP.denodeify(app.render.bind(app));
```
The denodified function inherits from the original function. It works in all
environments, except IE 10 and below. Consequently all properties of the original
function are available to you. However, any properties you change on the
denodeified function won't be changed on the original function. Example:
```javascript
let request = RSVP.denodeify(require('request')),
cookieJar = request.jar(); // <- Inheritance is used here
request('http://example.com', {jar: cookieJar}).then(function(res) {
// cookieJar.cookies holds now the cookies returned by example.com
});
```
Using `denodeify` makes it easier to compose asynchronous operations instead
of using callbacks. For example, instead of:
```javascript
let fs = require('fs');
fs.readFile('myfile.txt', function(err, data){
if (err) { ... } // Handle error
fs.writeFile('myfile2.txt', data, function(err){
if (err) { ... } // Handle error
console.log('done')
});
});
```
you can chain the operations together using `then` from the returned promise:
```javascript
let fs = require('fs');
let readFile = RSVP.denodeify(fs.readFile);
let writeFile = RSVP.denodeify(fs.writeFile);
readFile('myfile.txt').then(function(data){
return writeFile('myfile2.txt', data);
}).then(function(){
console.log('done')
}).catch(function(error){
// Handle error
});
```
@method denodeify
@static
@for RSVP
@param {Function} nodeFunc a 'node-style' function that takes a callback as
its last argument. The callback expects an error to be passed as its first
argument (if an error occurred, otherwise null), and the value from the
operation as its second argument ('function(err, value){ }').
@param {Boolean|Array} [options] An optional paramter that if set
to `true` causes the promise to fulfill with the callback's success arguments
as an array. This is useful if the node function has multiple success
paramters. If you set this paramter to an array with names, the promise will
fulfill with a hash with these names as keys and the success parameters as
values.
@return {Function} a function that wraps `nodeFunc` to return an
`RSVP.Promise`
@static
*/
function denodeify(nodeFunc, options) {
var fn = function () {
var self = this;
var l = arguments.length;
var args = new Array(l + 1);
var promiseInput = false;
for (var i = 0; i < l; ++i) {
var arg = arguments[i];
if (!promiseInput) {
// TODO: clean this up
promiseInput = needsPromiseInput(arg);
if (promiseInput === GET_THEN_ERROR$1) {
var p = new Promise(noop);
reject(p, GET_THEN_ERROR$1.value);
return p;
} else if (promiseInput && promiseInput !== true) {
arg = wrapThenable(promiseInput, arg);
}
}
args[i] = arg;
}
var promise = new Promise(noop);
args[l] = function (err, val) {
if (err) reject(promise, err);else if (options === undefined) resolve(promise, val);else if (options === true) resolve(promise, arrayResult(arguments));else if (isArray(options)) resolve(promise, makeObject(arguments, options));else resolve(promise, val);
};
if (promiseInput) {
return handlePromiseInput(promise, args, nodeFunc, self);
} else {
return handleValueInput(promise, args, nodeFunc, self);
}
};
fn.__proto__ = nodeFunc;
return fn;
}
function handleValueInput(promise, args, nodeFunc, self) {
var result = tryApply(nodeFunc, self, args);
if (result === ERROR) {
reject(promise, result.value);
}
return promise;
}
function handlePromiseInput(promise, args, nodeFunc, self) {
return Promise.all(args).then(function (args) {
var result = tryApply(nodeFunc, self, args);
if (result === ERROR) {
reject(promise, result.value);
}
return promise;
});
}
function needsPromiseInput(arg) {
if (arg && typeof arg === 'object') {
if (arg.constructor === Promise) {
return true;
} else {
return getThen$1(arg);
}
} else {
return false;
}
}
/**
This is a convenient alias for `RSVP.Promise.all`.
@method all
@static
@for RSVP
@param {Array} array Array of promises.
@param {String} label An optional label. This is useful
for tooling.
*/
function all$1(array, label) {
return Promise.all(array, label);
}
function _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); } return call && (typeof call === "object" || typeof call === "function") ? call : self; }
function _inherits(subClass, superClass) { if (typeof superClass !== "function" && superClass !== null) { throw new TypeError("Super expression must either be null or a function, not " + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; }
var AllSettled = function (_Enumerator) {
_inherits(AllSettled, _Enumerator);
function AllSettled(Constructor, entries, label) {
return _possibleConstructorReturn(this, _Enumerator.call(this, Constructor, entries, false /* don't abort on reject */, label));
}
return AllSettled;
}(Enumerator);
AllSettled.prototype._makeResult = makeSettledResult;
/**
`RSVP.allSettled` is similar to `RSVP.all`, but instead of implementing
a fail-fast method, it waits until all the promises have returned and
shows you all the results. This is useful if you want to handle multiple
promises' failure states together as a set.
Returns a promise that is fulfilled when all the given promises have been
settled. The return promise is fulfilled with an array of the states of
the promises passed into the `promises` array argument.
Each state object will either indicate fulfillment or rejection, and
provide the corresponding value or reason. The states will take one of
the following formats:
```javascript
{ state: 'fulfilled', value: value }
or
{ state: 'rejected', reason: reason }
```
Example:
```javascript
let promise1 = RSVP.Promise.resolve(1);
let promise2 = RSVP.Promise.reject(new Error('2'));
let promise3 = RSVP.Promise.reject(new Error('3'));
let promises = [ promise1, promise2, promise3 ];
RSVP.allSettled(promises).then(function(array){
// array == [
// { state: 'fulfilled', value: 1 },
// { state: 'rejected', reason: Error },
// { state: 'rejected', reason: Error }
// ]
// Note that for the second item, reason.message will be '2', and for the
// third item, reason.message will be '3'.
}, function(error) {
// Not run. (This block would only be called if allSettled had failed,
// for instance if passed an incorrect argument type.)
});
```
@method allSettled
@static
@for RSVP
@param {Array} entries
@param {String} label - optional string that describes the promise.
Useful for tooling.
@return {Promise} promise that is fulfilled with an array of the settled
states of the constituent promises.
*/
function allSettled(entries, label) {
if (!isArray(entries)) {
return Promise.reject(new TypeError("Promise.allSettled must be called with an array"), label);
}
return new AllSettled(Promise, entries, label).promise;
}
/**
This is a convenient alias for `RSVP.Promise.race`.
@method race
@static
@for RSVP
@param {Array} array Array of promises.
@param {String} label An optional label. This is useful
for tooling.
*/
function race$1(array, label) {
return Promise.race(array, label);
}
function _possibleConstructorReturn$1(self, call) { if (!self) { throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); } return call && (typeof call === "object" || typeof call === "function") ? call : self; }
function _inherits$1(subClass, superClass) { if (typeof superClass !== "function" && superClass !== null) { throw new TypeError("Super expression must either be null or a function, not " + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; }
var hasOwnProperty = Object.prototype.hasOwnProperty;
var PromiseHash = function (_Enumerator) {
_inherits$1(PromiseHash, _Enumerator);
function PromiseHash(Constructor, object) {
var abortOnReject = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : true;
var label = arguments[3];
return _possibleConstructorReturn$1(this, _Enumerator.call(this, Constructor, object, abortOnReject, label));
}
PromiseHash.prototype._init = function _init(Constructor, object) {
this._result = {};
this._enumerate(object);
if (this._remaining === 0) {
fulfill(this.promise, this._result);
}
};
PromiseHash.prototype._enumerate = function _enumerate(input) {
var promise = this.promise;
var results = [];
for (var key in input) {
if (hasOwnProperty.call(input, key)) {
results.push({
position: key,
entry: input[key]
});
}
}
var length = results.length;
this._remaining = length;
var result = void 0;
for (var i = 0; promise._state === PENDING && i < length; i++) {
result = results[i];
this._eachEntry(result.entry, result.position);
}
};
return PromiseHash;
}(Enumerator);
/**
`RSVP.hash` is similar to `RSVP.all`, but takes an object instead of an array
for its `promises` argument.
Returns a promise that is fulfilled when all the given promises have been
fulfilled, or rejected if any of them become rejected. The returned promise
is fulfilled with a hash that has the same key names as the `promises` object
argument. If any of the values in the object are not promises, they will
simply be copied over to the fulfilled object.
Example:
```javascript
let promises = {
myPromise: RSVP.resolve(1),
yourPromise: RSVP.resolve(2),
theirPromise: RSVP.resolve(3),
notAPromise: 4
};
RSVP.hash(promises).then(function(hash){
// hash here is an object that looks like:
// {
// myPromise: 1,
// yourPromise: 2,
// theirPromise: 3,
// notAPromise: 4
// }
});
````
If any of the `promises` given to `RSVP.hash` are rejected, the first promise
that is rejected will be given as the reason to the rejection handler.
Example:
```javascript
let promises = {
myPromise: RSVP.resolve(1),
rejectedPromise: RSVP.reject(new Error('rejectedPromise')),
anotherRejectedPromise: RSVP.reject(new Error('anotherRejectedPromise')),
};
RSVP.hash(promises).then(function(hash){
// Code here never runs because there are rejected promises!
}, function(reason) {
// reason.message === 'rejectedPromise'
});
```
An important note: `RSVP.hash` is intended for plain JavaScript objects that
are just a set of keys and values. `RSVP.hash` will NOT preserve prototype
chains.
Example:
```javascript
function MyConstructor(){
this.example = RSVP.resolve('Example');
}
MyConstructor.prototype = {
protoProperty: RSVP.resolve('Proto Property')
};
let myObject = new MyConstructor();
RSVP.hash(myObject).then(function(hash){
// protoProperty will not be present, instead you will just have an
// object that looks like:
// {
// example: 'Example'
// }
//
// hash.hasOwnProperty('protoProperty'); // false
// 'undefined' === typeof hash.protoProperty
});
```
@method hash
@static
@for RSVP
@param {Object} object
@param {String} label optional string that describes the promise.
Useful for tooling.
@return {Promise} promise that is fulfilled when all properties of `promises`
have been fulfilled, or rejected if any of them become rejected.
*/
function hash(object, label) {
if (!isObject(object)) {
return Promise.reject(new TypeError("Promise.hash must be called with an object"), label);
}
return new PromiseHash(Promise, object, label).promise;
}
function _possibleConstructorReturn$2(self, call) { if (!self) { throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); } return call && (typeof call === "object" || typeof call === "function") ? call : self; }
function _inherits$2(subClass, superClass) { if (typeof superClass !== "function" && superClass !== null) { throw new TypeError("Super expression must either be null or a function, not " + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; }
var HashSettled = function (_PromiseHash) {
_inherits$2(HashSettled, _PromiseHash);
function HashSettled(Constructor, object, label) {
return _possibleConstructorReturn$2(this, _PromiseHash.call(this, Constructor, object, false, label));
}
return HashSettled;
}(PromiseHash);
HashSettled.prototype._makeResult = makeSettledResult;
/**
`RSVP.hashSettled` is similar to `RSVP.allSettled`, but takes an object
instead of an array for its `promises` argument.
Unlike `RSVP.all` or `RSVP.hash`, which implement a fail-fast method,
but like `RSVP.allSettled`, `hashSettled` waits until all the
constituent promises have returned and then shows you all the results
with their states and values/reasons. This is useful if you want to
handle multiple promises' failure states together as a set.
Returns a promise that is fulfilled when all the given promises have been
settled, or rejected if the passed parameters are invalid.
The returned promise is fulfilled with a hash that has the same key names as
the `promises` object argument. If any of the values in the object are not
promises, they will be copied over to the fulfilled object and marked with state
'fulfilled'.
Example:
```javascript
let promises = {
myPromise: RSVP.Promise.resolve(1),
yourPromise: RSVP.Promise.resolve(2),
theirPromise: RSVP.Promise.resolve(3),
notAPromise: 4
};
RSVP.hashSettled(promises).then(function(hash){
// hash here is an object that looks like:
// {
// myPromise: { state: 'fulfilled', value: 1 },
// yourPromise: { state: 'fulfilled', value: 2 },
// theirPromise: { state: 'fulfilled', value: 3 },
// notAPromise: { state: 'fulfilled', value: 4 }
// }
});
```
If any of the `promises` given to `RSVP.hash` are rejected, the state will
be set to 'rejected' and the reason for rejection provided.
Example:
```javascript
let promises = {
myPromise: RSVP.Promise.resolve(1),
rejectedPromise: RSVP.Promise.reject(new Error('rejection')),
anotherRejectedPromise: RSVP.Promise.reject(new Error('more rejection')),
};
RSVP.hashSettled(promises).then(function(hash){
// hash here is an object that looks like:
// {
// myPromise: { state: 'fulfilled', value: 1 },
// rejectedPromise: { state: 'rejected', reason: Error },
// anotherRejectedPromise: { state: 'rejected', reason: Error },
// }
// Note that for rejectedPromise, reason.message == 'rejection',
// and for anotherRejectedPromise, reason.message == 'more rejection'.
});
```
An important note: `RSVP.hashSettled` is intended for plain JavaScript objects that
are just a set of keys and values. `RSVP.hashSettled` will NOT preserve prototype
chains.
Example:
```javascript
function MyConstructor(){
this.example = RSVP.Promise.resolve('Example');
}
MyConstructor.prototype = {
protoProperty: RSVP.Promise.resolve('Proto Property')
};
let myObject = new MyConstructor();
RSVP.hashSettled(myObject).then(function(hash){
// protoProperty will not be present, instead you will just have an
// object that looks like:
// {
// example: { state: 'fulfilled', value: 'Example' }
// }
//
// hash.hasOwnProperty('protoProperty'); // false
// 'undefined' === typeof hash.protoProperty
});
```
@method hashSettled
@for RSVP
@param {Object} object
@param {String} label optional string that describes the promise.
Useful for tooling.
@return {Promise} promise that is fulfilled when when all properties of `promises`
have been settled.
@static
*/
function hashSettled(object, label) {
if (!isObject(object)) {
return Promise.reject(new TypeError("RSVP.hashSettled must be called with an object"), label);
}
return new HashSettled(Promise, object, false, label).promise;
}
/**
`RSVP.rethrow` will rethrow an error on the next turn of the JavaScript event
loop in order to aid debugging.
Promises A+ specifies that any exceptions that occur with a promise must be
caught by the promises implementation and bubbled to the last handler. For
this reason, it is recommended that you always specify a second rejection
handler function to `then`. However, `RSVP.rethrow` will throw the exception
outside of the promise, so it bubbles up to your console if in the browser,
or domain/cause uncaught exception in Node. `rethrow` will also throw the
error again so the error can be handled by the promise per the spec.
```javascript
function throws(){
throw new Error('Whoops!');
}
let promise = new RSVP.Promise(function(resolve, reject){
throws();
});
promise.catch(RSVP.rethrow).then(function(){
// Code here doesn't run because the promise became rejected due to an
// error!
}, function (err){
// handle the error here
});
```
The 'Whoops' error will be thrown on the next turn of the event loop
and you can watch for it in your console. You can also handle it using a
rejection handler given to `.then` or `.catch` on the returned promise.
@method rethrow
@static
@for RSVP
@param {Error} reason reason the promise became rejected.
@throws Error
@static
*/
function rethrow(reason) {
setTimeout(function () {
throw reason;
});
throw reason;
}
/**
`RSVP.defer` returns an object similar to jQuery's `$.Deferred`.
`RSVP.defer` should be used when porting over code reliant on `$.Deferred`'s
interface. New code should use the `RSVP.Promise` constructor instead.
The object returned from `RSVP.defer` is a plain object with three properties:
* promise - an `RSVP.Promise`.
* reject - a function that causes the `promise` property on this object to
become rejected
* resolve - a function that causes the `promise` property on this object to
become fulfilled.
Example:
```javascript
let deferred = RSVP.defer();
deferred.resolve("Success!");
deferred.promise.then(function(value){
// value here is "Success!"
});
```
@method defer
@static
@for RSVP
@param {String} label optional string for labeling the promise.
Useful for tooling.
@return {Object}
*/
function defer(label) {
var deferred = { resolve: undefined, reject: undefined };
deferred.promise = new Promise(function (resolve, reject) {
deferred.resolve = resolve;
deferred.reject = reject;
}, label);
return deferred;
}
/**
`RSVP.map` is similar to JavaScript's native `map` method, except that it
waits for all promises to become fulfilled before running the `mapFn` on
each item in given to `promises`. `RSVP.map` returns a promise that will
become fulfilled with the result of running `mapFn` on the values the promises
become fulfilled with.
For example:
```javascript
let promise1 = RSVP.resolve(1);
let promise2 = RSVP.resolve(2);
let promise3 = RSVP.resolve(3);
let promises = [ promise1, promise2, promise3 ];
let mapFn = function(item){
return item + 1;
};
RSVP.map(promises, mapFn).then(function(result){
// result is [ 2, 3, 4 ]
});
```
If any of the `promises` given to `RSVP.map` are rejected, the first promise
that is rejected will be given as an argument to the returned promise's
rejection handler. For example:
```javascript
let promise1 = RSVP.resolve(1);
let promise2 = RSVP.reject(new Error('2'));
let promise3 = RSVP.reject(new Error('3'));
let promises = [ promise1, promise2, promise3 ];
let mapFn = function(item){
return item + 1;
};
RSVP.map(promises, mapFn).then(function(array){
// Code here never runs because there are rejected promises!
}, function(reason) {
// reason.message === '2'
});
```
`RSVP.map` will also wait if a promise is returned from `mapFn`. For example,
say you want to get all comments from a set of blog posts, but you need
the blog posts first because they contain a url to those comments.
```javscript
let mapFn = function(blogPost){
// getComments does some ajax and returns an RSVP.Promise that is fulfilled
// with some comments data
return getComments(blogPost.comments_url);
};
// getBlogPosts does some ajax and returns an RSVP.Promise that is fulfilled
// with some blog post data
RSVP.map(getBlogPosts(), mapFn).then(function(comments){
// comments is the result of asking the server for the comments
// of all blog posts returned from getBlogPosts()
});
```
@method map
@static
@for RSVP
@param {Array} promises
@param {Function} mapFn function to be called on each fulfilled promise.
@param {String} label optional string for labeling the promise.
Useful for tooling.
@return {Promise} promise that is fulfilled with the result of calling
`mapFn` on each fulfilled promise or value when they become fulfilled.
The promise will be rejected if any of the given `promises` become rejected.
@static
*/
function map(promises, mapFn, label) {
if (!isArray(promises)) {
return Promise.reject(new TypeError("RSVP.map must be called with an array"), label);
}
if (!isFunction(mapFn)) {
return Promise.reject(new TypeError("RSVP.map expects a function as a second argument"), label);
}
return Promise.all(promises, label).then(function (values) {
var length = values.length;
var results = new Array(length);
for (var i = 0; i < length; i++) {
results[i] = mapFn(values[i]);
}
return Promise.all(results, label);
});
}
/**
This is a convenient alias for `RSVP.Promise.resolve`.
@method resolve
@static
@for RSVP
@param {*} value value that the returned promise will be resolved with
@param {String} label optional string for identifying the returned promise.
Useful for tooling.
@return {Promise} a promise that will become fulfilled with the given
`value`
*/
function resolve$2(value, label) {
return Promise.resolve(value, label);
}
/**
This is a convenient alias for `RSVP.Promise.reject`.
@method reject
@static
@for RSVP
@param {*} reason value that the returned promise will be rejected with.
@param {String} label optional string for identifying the returned promise.
Useful for tooling.
@return {Promise} a promise rejected with the given `reason`.
*/
function reject$2(reason, label) {
return Promise.reject(reason, label);
}
/**
`RSVP.filter` is similar to JavaScript's native `filter` method, except that it
waits for all promises to become fulfilled before running the `filterFn` on
each item in given to `promises`. `RSVP.filter` returns a promise that will
become fulfilled with the result of running `filterFn` on the values the
promises become fulfilled with.
For example:
```javascript
let promise1 = RSVP.resolve(1);
let promise2 = RSVP.resolve(2);
let promise3 = RSVP.resolve(3);
let promises = [promise1, promise2, promise3];
let filterFn = function(item){
return item > 1;
};
RSVP.filter(promises, filterFn).then(function(result){
// result is [ 2, 3 ]
});
```
If any of the `promises` given to `RSVP.filter` are rejected, the first promise
that is rejected will be given as an argument to the returned promise's
rejection handler. For example:
```javascript
let promise1 = RSVP.resolve(1);
let promise2 = RSVP.reject(new Error('2'));
let promise3 = RSVP.reject(new Error('3'));
let promises = [ promise1, promise2, promise3 ];
let filterFn = function(item){
return item > 1;
};
RSVP.filter(promises, filterFn).then(function(array){
// Code here never runs because there are rejected promises!
}, function(reason) {
// reason.message === '2'
});
```
`RSVP.filter` will also wait for any promises returned from `filterFn`.
For instance, you may want to fetch a list of users then return a subset
of those users based on some asynchronous operation:
```javascript
let alice = { name: 'alice' };
let bob = { name: 'bob' };
let users = [ alice, bob ];
let promises = users.map(function(user){
return RSVP.resolve(user);
});
let filterFn = function(user){
// Here, Alice has permissions to create a blog post, but Bob does not.
return getPrivilegesForUser(user).then(function(privs){
return privs.can_create_blog_post === true;
});
};
RSVP.filter(promises, filterFn).then(function(users){
// true, because the server told us only Alice can create a blog post.
users.length === 1;
// false, because Alice is the only user present in `users`
users[0] === bob;
});
```
@method filter
@static
@for RSVP
@param {Array} promises
@param {Function} filterFn - function to be called on each resolved value to
filter the final results.
@param {String} label optional string describing the promise. Useful for
tooling.
@return {Promise}
*/
function resolveAll(promises, label) {
return Promise.all(promises, label);
}
function resolveSingle(promise, label) {
return Promise.resolve(promise, label).then(function (promises) {
return resolveAll(promises, label);
});
}
function filter(promises, filterFn, label) {
if (!isArray(promises) && !(isObject(promises) && promises.then !== undefined)) {
return Promise.reject(new TypeError("RSVP.filter must be called with an array or promise"), label);
}
if (!isFunction(filterFn)) {
return Promise.reject(new TypeError("RSVP.filter expects function as a second argument"), label);
}
var promise = isArray(promises) ? resolveAll(promises, label) : resolveSingle(promises, label);
return promise.then(function (values) {
var length = values.length;
var filtered = new Array(length);
for (var i = 0; i < length; i++) {
filtered[i] = filterFn(values[i]);
}
return resolveAll(filtered, label).then(function (filtered) {
var results = new Array(length);
var newLength = 0;
for (var _i = 0; _i < length; _i++) {
if (filtered[_i]) {
results[newLength] = values[_i];
newLength++;
}
}
results.length = newLength;
return results;
});
});
}
var len = 0;
var vertxNext = void 0;
function asap(callback, arg) {
queue$1[len] = callback;
queue$1[len + 1] = arg;
len += 2;
if (len === 2) {
// If len is 1, that means that we need to schedule an async flush.
// If additional callbacks are queued before the queue is flushed, they
// will be processed by this flush that we are scheduling.
scheduleFlush$1();
}
}
var browserWindow = typeof window !== 'undefined' ? window : undefined;
var browserGlobal = browserWindow || {};
var BrowserMutationObserver = browserGlobal.MutationObserver || browserGlobal.WebKitMutationObserver;
var isNode = typeof self === 'undefined' && typeof process !== 'undefined' && {}.toString.call(process) === '[object process]';
// test for web worker but not in IE10
var isWorker = typeof Uint8ClampedArray !== 'undefined' && typeof importScripts !== 'undefined' && typeof MessageChannel !== 'undefined';
// node
function useNextTick() {
var nextTick = process.nextTick;
// node version 0.10.x displays a deprecation warning when nextTick is used recursively
// setImmediate should be used instead instead
var version = process.versions.node.match(/^(?:(\d+)\.)?(?:(\d+)\.)?(\*|\d+)$/);
if (Array.isArray(version) && version[1] === '0' && version[2] === '10') {
nextTick = setImmediate;
}
return function () {
return nextTick(flush);
};
}
// vertx
function useVertxTimer() {
if (typeof vertxNext !== 'undefined') {
return function () {
vertxNext(flush);
};
}
return useSetTimeout();
}
function useMutationObserver() {
var iterations = 0;
var observer = new BrowserMutationObserver(flush);
var node = document.createTextNode('');
observer.observe(node, { characterData: true });
return function () {
return node.data = iterations = ++iterations % 2;
};
}
// web worker
function useMessageChannel() {
var channel = new MessageChannel();
channel.port1.onmessage = flush;
return function () {
return channel.port2.postMessage(0);
};
}
function useSetTimeout() {
return function () {
return setTimeout(flush, 1);
};
}
var queue$1 = new Array(1000);
function flush() {
for (var i = 0; i < len; i += 2) {
var callback = queue$1[i];
var arg = queue$1[i + 1];
callback(arg);
queue$1[i] = undefined;
queue$1[i + 1] = undefined;
}
len = 0;
}
function attemptVertex() {
try {
var r = require;
var vertx = r('vertx');
vertxNext = vertx.runOnLoop || vertx.runOnContext;
return useVertxTimer();
} catch (e) {
return useSetTimeout();
}
}
var scheduleFlush$1 = void 0;
// Decide what async method to use to triggering processing of queued callbacks:
if (isNode) {
scheduleFlush$1 = useNextTick();
} else if (BrowserMutationObserver) {
scheduleFlush$1 = useMutationObserver();
} else if (isWorker) {
scheduleFlush$1 = useMessageChannel();
} else if (browserWindow === undefined && typeof require === 'function') {
scheduleFlush$1 = attemptVertex();
} else {
scheduleFlush$1 = useSetTimeout();
}
var platform = void 0;
/* global self */
if (typeof self === 'object') {
platform = self;
/* global global */
} else if (typeof global === 'object') {
platform = global;
} else {
throw new Error('no global: `self` or `global` found');
}
var _asap$cast$Promise$Ev;
function _defineProperty(obj, key, value) { if (key in obj) { Object.defineProperty(obj, key, { value: value, enumerable: true, configurable: true, writable: true }); } else { obj[key] = value; } return obj; }
// defaults
config.async = asap;
config.after = function (cb) {
return setTimeout(cb, 0);
};
var cast = resolve$2;
var async = function (callback, arg) {
return config.async(callback, arg);
};
function on() {
config['on'].apply(config, arguments);
}
function off() {
config['off'].apply(config, arguments);
}
// Set up instrumentation through `window.__PROMISE_INTRUMENTATION__`
if (typeof window !== 'undefined' && typeof window['__PROMISE_INSTRUMENTATION__'] === 'object') {
var callbacks = window['__PROMISE_INSTRUMENTATION__'];
configure('instrument', true);
for (var eventName in callbacks) {
if (callbacks.hasOwnProperty(eventName)) {
on(eventName, callbacks[eventName]);
}
}
}
// the default export here is for backwards compat:
// https://github.com/tildeio/rsvp.js/issues/434
var rsvp = (_asap$cast$Promise$Ev = {
asap: asap,
cast: cast,
Promise: Promise,
EventTarget: EventTarget,
all: all$1,
allSettled: allSettled,
race: race$1,
hash: hash,
hashSettled: hashSettled,
rethrow: rethrow,
defer: defer,
denodeify: denodeify,
configure: configure,
on: on,
off: off,
resolve: resolve$2,
reject: reject$2,
map: map
}, _defineProperty(_asap$cast$Promise$Ev, 'async', async), _defineProperty(_asap$cast$Promise$Ev, 'filter', filter), _asap$cast$Promise$Ev);
exports['default'] = rsvp;
exports.asap = asap;
exports.cast = cast;
exports.Promise = Promise;
exports.EventTarget = EventTarget;
exports.all = all$1;
exports.allSettled = allSettled;
exports.race = race$1;
exports.hash = hash;
exports.hashSettled = hashSettled;
exports.rethrow = rethrow;
exports.defer = defer;
exports.denodeify = denodeify;
exports.configure = configure;
exports.on = on;
exports.off = off;
exports.resolve = resolve$2;
exports.reject = reject$2;
exports.map = map;
exports.async = async;
exports.filter = filter;
Object.defineProperty(exports, '__esModule', { value: true });
})));
//
var EPUBJS = EPUBJS || {};
EPUBJS.core = {};
var ELEMENT_NODE = 1;
var TEXT_NODE = 3;
var COMMENT_NODE = 8;
var DOCUMENT_NODE = 9;
//-- Get a element for an id
EPUBJS.core.getEl = function(elem) {
return document.getElementById(elem);
};
//-- Get all elements for a class
EPUBJS.core.getEls = function(classes) {
return document.getElementsByClassName(classes);
};
EPUBJS.core.request = function(url, type, withCredentials) {
var supportsURL = window.URL;
var BLOB_RESPONSE = supportsURL ? "blob" : "arraybuffer";
var deferred = new RSVP.defer();
var xhr = new XMLHttpRequest();
var uri;
//-- Check from PDF.js:
// https://github.com/mozilla/pdf.js/blob/master/web/compatibility.js
var xhrPrototype = XMLHttpRequest.prototype;
var handler = function() {
var r;
if (this.readyState != this.DONE) return;
if ((this.status === 200 || this.status === 0) && this.response) { // Android & Firefox reporting 0 for local & blob urls
if (type == 'xml'){
// If this.responseXML wasn't set, try to parse using a DOMParser from text
if(!this.responseXML) {
r = new DOMParser().parseFromString(this.response, "application/xml");
} else {
r = this.responseXML;
}
} else if (type == 'xhtml') {
if (!this.responseXML){
r = new DOMParser().parseFromString(this.response, "application/xhtml+xml");
} else {
r = this.responseXML;
}
} else if (type == 'html') {
if (!this.responseXML){
r = new DOMParser().parseFromString(this.response, "text/html");
} else {
r = this.responseXML;
}
} else if (type == 'json') {
r = JSON.parse(this.response);
} else if (type == 'blob') {
if (supportsURL) {
r = this.response;
} else {
//-- Safari doesn't support responseType blob, so create a blob from arraybuffer
r = new Blob([this.response]);
}
} else {
r = this.response;
}
deferred.resolve(r);
} else {
deferred.reject({
message : this.response,
stack : new Error().stack
});
}
};
if (!('overrideMimeType' in xhrPrototype)) {
// IE10 might have response, but not overrideMimeType
Object.defineProperty(xhrPrototype, 'overrideMimeType', {
value: function xmlHttpRequestOverrideMimeType(mimeType) {}
});
}
xhr.onreadystatechange = handler;
xhr.open("GET", url, true);
if(withCredentials) {
xhr.withCredentials = true;
}
// If type isn't set, determine it from the file extension
if(!type) {
uri = EPUBJS.core.uri(url);
type = uri.extension;
type = {
'htm': 'html'
}[type] || type;
}
if(type == 'blob'){
xhr.responseType = BLOB_RESPONSE;
}
if(type == "json") {
xhr.setRequestHeader("Accept", "application/json");
}
if(type == 'xml') {
xhr.responseType = "document";
xhr.overrideMimeType('text/xml'); // for OPF parsing
}
if(type == 'xhtml') {
xhr.responseType = "document";
}
if(type == 'html') {
xhr.responseType = "document";
}
if(type == "binary") {
xhr.responseType = "arraybuffer";
}
xhr.send();
return deferred.promise;
};
EPUBJS.core.toArray = function(obj) {
var arr = [];
for (var member in obj) {
var newitm;
if ( obj.hasOwnProperty(member) ) {
newitm = obj[member];
newitm.ident = member;
arr.push(newitm);
}
}
return arr;
};
//-- Parse the different parts of a url, returning a object
EPUBJS.core.uri = function(url){
var uri = {
protocol : '',
host : '',
path : '',
origin : '',
directory : '',
base : '',
filename : '',
extension : '',
fragment : '',
href : url
},
blob = url.indexOf('blob:'),
doubleSlash = url.indexOf('://'),
search = url.indexOf('?'),
fragment = url.indexOf("#"),
withoutProtocol,
dot,
firstSlash;
if(blob === 0) {
uri.protocol = "blob";
uri.base = url.indexOf(0, fragment);
return uri;
}
if(fragment != -1) {
uri.fragment = url.slice(fragment + 1);
url = url.slice(0, fragment);
}
if(search != -1) {
uri.search = url.slice(search + 1);
url = url.slice(0, search);
href = uri.href;
}
if(doubleSlash != -1) {
uri.protocol = url.slice(0, doubleSlash);
withoutProtocol = url.slice(doubleSlash+3);
firstSlash = withoutProtocol.indexOf('/');
if(firstSlash === -1) {
uri.host = uri.path;
uri.path = "";
} else {
uri.host = withoutProtocol.slice(0, firstSlash);
uri.path = withoutProtocol.slice(firstSlash);
}
uri.origin = uri.protocol + "://" + uri.host;
uri.directory = EPUBJS.core.folder(uri.path);
uri.base = uri.origin + uri.directory;
// return origin;
} else {
uri.path = url;
uri.directory = EPUBJS.core.folder(url);
uri.base = uri.directory;
}
//-- Filename
uri.filename = url.replace(uri.base, '');
dot = uri.filename.lastIndexOf('.');
if(dot != -1) {
uri.extension = uri.filename.slice(dot+1);
}
return uri;
};
//-- Parse out the folder, will return everything before the last slash
EPUBJS.core.folder = function(url){
var lastSlash = url.lastIndexOf('/');
if(lastSlash == -1) var folder = '';
folder = url.slice(0, lastSlash + 1);
return folder;
};
//-- https://github.com/ebidel/filer.js/blob/master/src/filer.js#L128
EPUBJS.core.dataURLToBlob = function(dataURL) {
var BASE64_MARKER = ';base64,',
parts, contentType, raw, rawLength, uInt8Array;
if (dataURL.indexOf(BASE64_MARKER) == -1) {
parts = dataURL.split(',');
contentType = parts[0].split(':')[1];
raw = parts[1];
return new Blob([raw], {type: contentType});
}
parts = dataURL.split(BASE64_MARKER);
contentType = parts[0].split(':')[1];
raw = window.atob(parts[1]);
rawLength = raw.length;
uInt8Array = new Uint8Array(rawLength);
for (var i = 0; i < rawLength; ++i) {
uInt8Array[i] = raw.charCodeAt(i);
}
return new Blob([uInt8Array], {type: contentType});
};
//-- Load scripts async: http://stackoverflow.com/questions/7718935/load-scripts-asynchronously
EPUBJS.core.addScript = function(src, callback, target) {
var s, r;
r = false;
s = document.createElement('script');
s.type = 'text/javascript';
s.async = false;
s.src = src;
s.onload = s.onreadystatechange = function() {
if ( !r && (!this.readyState || this.readyState == 'complete') ) {
r = true;
if(callback) callback();
}
};
target = target || document.body;
target.appendChild(s);
};
EPUBJS.core.addScripts = function(srcArr, callback, target) {
var total = srcArr.length,
curr = 0,
cb = function(){
curr++;
if(total == curr){
if(callback) callback();
}else{
EPUBJS.core.addScript(srcArr[curr], cb, target);
}
};
EPUBJS.core.addScript(srcArr[curr], cb, target);
};
EPUBJS.core.addCss = function(src, callback, target) {
var s, r;
r = false;
s = document.createElement('link');
s.type = 'text/css';
s.rel = "stylesheet";
s.href = src;
s.onload = s.onreadystatechange = function() {
if ( !r && (!this.readyState || this.readyState == 'complete') ) {
r = true;
if(callback) callback();
}
};
target = target || document.body;
target.appendChild(s);
};
EPUBJS.core.prefixed = function(unprefixed) {
var vendors = ["Webkit", "Moz", "O", "ms" ],
prefixes = ['-Webkit-', '-moz-', '-o-', '-ms-'],
upper = unprefixed[0].toUpperCase() + unprefixed.slice(1),
length = vendors.length;
if (typeof(document.documentElement.style[unprefixed]) != 'undefined') {
return unprefixed;
}
for ( var i=0; i < length; i++ ) {
if (typeof(document.documentElement.style[vendors[i] + upper]) != 'undefined') {
return vendors[i] + upper;
}
}
return unprefixed;
};
EPUBJS.core.resolveUrl = function(base, path) {
var url,
segments = [],
uri = EPUBJS.core.uri(path),
folders = base.split("/"),
paths;
if(uri.host) {
return path;
}
folders.pop();
paths = path.split("/");
paths.forEach(function(p){
if(p === ".."){
folders.pop();
}else{
segments.push(p);
}
});
url = folders.concat(segments);
return url.join("/");
};
// http://stackoverflow.com/questions/105034/how-to-create-a-guid-uuid-in-javascript
EPUBJS.core.uuid = function() {
var d = new Date().getTime();
var uuid = 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, function(c) {
var r = (d + Math.random()*16)%16 | 0;
d = Math.floor(d/16);
return (c=='x' ? r : (r&0x7|0x8)).toString(16);
});
return uuid;
};
// Fast quicksort insert for sorted array -- based on:
// http://stackoverflow.com/questions/1344500/efficient-way-to-insert-a-number-into-a-sorted-array-of-numbers
EPUBJS.core.insert = function(item, array, compareFunction) {
var location = EPUBJS.core.locationOf(item, array, compareFunction);
array.splice(location, 0, item);
return location;
};
EPUBJS.core.locationOf = function(item, array, compareFunction, _start, _end) {
var start = _start || 0;
var end = _end || array.length;
var pivot = parseInt(start + (end - start) / 2);
var compared;
if(!compareFunction){
compareFunction = function(a, b) {
if(a > b) return 1;
if(a < b) return -1;
if(a = b) return 0;
};
}
if(end-start <= 0) {
return pivot;
}
compared = compareFunction(array[pivot], item);
if(end-start === 1) {
return compared > 0 ? pivot : pivot + 1;
}
if(compared === 0) {
return pivot;
}
if(compared === -1) {
return EPUBJS.core.locationOf(item, array, compareFunction, pivot, end);
} else{
return EPUBJS.core.locationOf(item, array, compareFunction, start, pivot);
}
};
EPUBJS.core.indexOfSorted = function(item, array, compareFunction, _start, _end) {
var start = _start || 0;
var end = _end || array.length;
var pivot = parseInt(start + (end - start) / 2);
var compared;
if(!compareFunction){
compareFunction = function(a, b) {
if(a > b) return 1;
if(a < b) return -1;
if(a = b) return 0;
};
}
if(end-start <= 0) {
return -1; // Not found
}
compared = compareFunction(array[pivot], item);
if(end-start === 1) {
return compared === 0 ? pivot : -1;
}
if(compared === 0) {
return pivot; // Found
}
if(compared === -1) {
return EPUBJS.core.indexOfSorted(item, array, compareFunction, pivot, end);
} else{
return EPUBJS.core.indexOfSorted(item, array, compareFunction, start, pivot);
}
};
EPUBJS.core.queue = function(_scope){
var _q = [];
var scope = _scope;
// Add an item to the queue
var enqueue = function(funcName, args, context) {
_q.push({
"funcName" : funcName,
"args" : args,
"context" : context
});
return _q;
};
// Run one item
var dequeue = function(){
var inwait;
if(_q.length) {
inwait = _q.shift();
// Defer to any current tasks
// setTimeout(function(){
scope[inwait.funcName].apply(inwait.context || scope, inwait.args);
// }, 0);
}
};
// Run All
var flush = function(){
while(_q.length) {
dequeue();
}
};
// Clear all items in wait
var clear = function(){
_q = [];
};
var length = function(){
return _q.length;
};
return {
"enqueue" : enqueue,
"dequeue" : dequeue,
"flush" : flush,
"clear" : clear,
"length" : length
};
};
// From: https://code.google.com/p/fbug/source/browse/branches/firebug1.10/content/firebug/lib/xpath.js
/**
* Gets an XPath for an element which describes its hierarchical location.
*/
EPUBJS.core.getElementXPath = function(element) {
if (element && element.id) {
return '//*[@id="' + element.id + '"]';
} else {
return EPUBJS.core.getElementTreeXPath(element);
}
};
EPUBJS.core.getElementTreeXPath = function(element) {
var paths = [];
var isXhtml = (element.ownerDocument.documentElement.getAttribute('xmlns') === "http://www.w3.org/1999/xhtml");
var index, nodeName, tagName, pathIndex;
if(element.nodeType === Node.TEXT_NODE){
// index = Array.prototype.indexOf.call(element.parentNode.childNodes, element) + 1;
index = EPUBJS.core.indexOfTextNode(element) + 1;
paths.push("text()["+index+"]");
element = element.parentNode;
}
// Use nodeName (instead of localName) so namespace prefix is included (if any).
for (; element && element.nodeType == 1; element = element.parentNode)
{
index = 0;
for (var sibling = element.previousSibling; sibling; sibling = sibling.previousSibling)
{
// Ignore document type declaration.
if (sibling.nodeType == Node.DOCUMENT_TYPE_NODE) {
continue;
}
if (sibling.nodeName == element.nodeName) {
++index;
}
}
nodeName = element.nodeName.toLowerCase();
tagName = (isXhtml ? "xhtml:" + nodeName : nodeName);
pathIndex = (index ? "[" + (index+1) + "]" : "");
paths.splice(0, 0, tagName + pathIndex);
}
return paths.length ? "./" + paths.join("/") : null;
};
EPUBJS.core.nsResolver = function(prefix) {
var ns = {
'xhtml' : 'http://www.w3.org/1999/xhtml',
'epub': 'http://www.idpf.org/2007/ops'
};
return ns[prefix] || null;
};
//https://stackoverflow.com/questions/13482352/xquery-looking-for-text-with-single-quote/13483496#13483496
EPUBJS.core.cleanStringForXpath = function(str) {
var parts = str.match(/[^'"]+|['"]/g);
parts = parts.map(function(part){
if (part === "'") {
return '\"\'\"'; // output "'"
}
if (part === '"') {
return "\'\"\'"; // output '"'
}
return "\'" + part + "\'";
});
return "concat(\'\'," + parts.join(",") + ")";
};
EPUBJS.core.indexOfTextNode = function(textNode){
var parent = textNode.parentNode;
var children = parent.childNodes;
var sib;
var index = -1;
for (var i = 0; i < children.length; i++) {
sib = children[i];
if(sib.nodeType === Node.TEXT_NODE){
index++;
}
if(sib == textNode) break;
}
return index;
};
// Underscore
EPUBJS.core.defaults = function(obj) {
for (var i = 1, length = arguments.length; i < length; i++) {
var source = arguments[i];
for (var prop in source) {
if (obj[prop] === void 0) obj[prop] = source[prop];
}
}
return obj;
};
EPUBJS.core.extend = function(target) {
var sources = [].slice.call(arguments, 1);
sources.forEach(function (source) {
if(!source) return;
Object.getOwnPropertyNames(source).forEach(function(propName) {
Object.defineProperty(target, propName, Object.getOwnPropertyDescriptor(source, propName));
});
});
return target;
};
EPUBJS.core.clone = function(obj) {
return EPUBJS.core.isArray(obj) ? obj.slice() : EPUBJS.core.extend({}, obj);
};
EPUBJS.core.isElement = function(obj) {
return !!(obj && obj.nodeType == 1);
};
EPUBJS.core.isNumber = function(n) {
return !isNaN(parseFloat(n)) && isFinite(n);
};
EPUBJS.core.isString = function(str) {
return (typeof str === 'string' || str instanceof String);
};
EPUBJS.core.isArray = Array.isArray || function(obj) {
return Object.prototype.toString.call(obj) === '[object Array]';
};
// Lodash
EPUBJS.core.values = function(object) {
var index = -1;
var props, length, result;
if(!object) return [];
props = Object.keys(object);
length = props.length;
result = Array(length);
while (++index < length) {
result[index] = object[props[index]];
}
return result;
};
EPUBJS.core.indexOfNode = function(node, typeId) {
var parent = node.parentNode;
var children = parent.childNodes;
var sib;
var index = -1;
for (var i = 0; i < children.length; i++) {
sib = children[i];
if (sib.nodeType === typeId) {
index++;
}
if (sib == node) break;
}
return index;
}
EPUBJS.core.indexOfTextNode = function(textNode) {
return EPUBJS.core.indexOfNode(textNode, TEXT_NODE);
}
EPUBJS.core.indexOfElementNode = function(elementNode) {
return EPUBJS.core.indexOfNode(elementNode, ELEMENT_NODE);
}
var EPUBJS = EPUBJS || {};
EPUBJS.reader = {};
EPUBJS.reader.plugins = {}; //-- Attach extra Controllers as plugins (like search?)
(function(root, $) {
var previousReader = root.ePubReader || {};
var ePubReader = root.ePubReader = function(path, options) {
return new EPUBJS.Reader(path, options);
};
//exports to multiple environments
if (typeof define === 'function' && define.amd) {
//AMD
define(function(){ return Reader; });
} else if (typeof module != "undefined" && module.exports) {
//Node
module.exports = ePubReader;
}
})(window, jQuery);
EPUBJS.Reader = function(bookPath, _options) {
var reader = this;
var book;
var plugin;
var $viewer = $("#viewer");
var search = window.location.search;
var parameters;
this.settings = EPUBJS.core.defaults(_options || {}, {
bookPath : bookPath,
restore : false,
reload : false,
bookmarks : undefined,
annotations : undefined,
contained : undefined,
bookKey : undefined,
styles : undefined,
sidebarReflow: false,
generatePagination: false,
history: true
});
// Overide options with search parameters
if(search) {
parameters = search.slice(1).split("&");
parameters.forEach(function(p){
var split = p.split("=");
var name = split[0];
var value = split[1] || '';
reader.settings[name] = decodeURIComponent(value);
});
}
this.setBookKey(this.settings.bookPath); //-- This could be username + path or any unique string
if(this.settings.restore && this.isSaved()) {
this.applySavedSettings();
}
this.settings.styles = this.settings.styles || {
fontSize : "100%"
};
this.book = book = new ePub(this.settings.bookPath, this.settings);
this.offline = false;
this.sidebarOpen = false;
if(!this.settings.bookmarks) {
this.settings.bookmarks = [];
}
if(!this.settings.annotations) {
this.settings.annotations = [];
}
if(this.settings.generatePagination) {
book.generatePagination($viewer.width(), $viewer.height());
}
this.rendition = book.renderTo("viewer", {
ignoreClass: "annotator-hl",
width: "100%",
height: "100%"
});
if(this.settings.previousLocationCfi) {
this.displayed = this.rendition.display(this.settings.previousLocationCfi);
} else {
this.displayed = this.rendition.display();
}
book.ready.then(function () {
reader.ReaderController = EPUBJS.reader.ReaderController.call(reader, book);
reader.SettingsController = EPUBJS.reader.SettingsController.call(reader, book);
reader.ControlsController = EPUBJS.reader.ControlsController.call(reader, book);
reader.SidebarController = EPUBJS.reader.SidebarController.call(reader, book);
reader.BookmarksController = EPUBJS.reader.BookmarksController.call(reader, book);
reader.NotesController = EPUBJS.reader.NotesController.call(reader, book);
window.addEventListener("hashchange", this.hashChanged.bind(this), false);
document.addEventListener('keydown', this.adjustFontSize.bind(this), false);
this.rendition.on("keydown", this.adjustFontSize.bind(this));
this.rendition.on("keydown", reader.ReaderController.arrowKeys.bind(this));
this.rendition.on("selected", this.selectedRange.bind(this));
}.bind(this)).then(function() {
reader.ReaderController.hideLoader();
}.bind(this));
// Call Plugins
for(plugin in EPUBJS.reader.plugins) {
if(EPUBJS.reader.plugins.hasOwnProperty(plugin)) {
reader[plugin] = EPUBJS.reader.plugins[plugin].call(reader, book);
}
}
book.loaded.metadata.then(function(meta) {
reader.MetaController = EPUBJS.reader.MetaController.call(reader, meta);
});
book.loaded.navigation.then(function(navigation) {
reader.TocController = EPUBJS.reader.TocController.call(reader, navigation);
});
window.addEventListener("beforeunload", this.unload.bind(this), false);
return this;
};
EPUBJS.Reader.prototype.adjustFontSize = function(e) {
var fontSize;
var interval = 2;
var PLUS = 187;
var MINUS = 189;
var ZERO = 48;
var MOD = (e.ctrlKey || e.metaKey );
if(!this.settings.styles) return;
if(!this.settings.styles.fontSize) {
this.settings.styles.fontSize = "100%";
}
fontSize = parseInt(this.settings.styles.fontSize.slice(0, -1));
if(MOD && e.keyCode == PLUS) {
e.preventDefault();
this.book.setStyle("fontSize", (fontSize + interval) + "%");
}
if(MOD && e.keyCode == MINUS){
e.preventDefault();
this.book.setStyle("fontSize", (fontSize - interval) + "%");
}
if(MOD && e.keyCode == ZERO){
e.preventDefault();
this.book.setStyle("fontSize", "100%");
}
};
EPUBJS.Reader.prototype.addBookmark = function(cfi) {
var present = this.isBookmarked(cfi);
if(present > -1 ) return;
this.settings.bookmarks.push(cfi);
this.trigger("reader:bookmarked", cfi);
};
EPUBJS.Reader.prototype.removeBookmark = function(cfi) {
var bookmark = this.isBookmarked(cfi);
if( bookmark === -1 ) return;
this.settings.bookmarks.splice(bookmark, 1);
this.trigger("reader:unbookmarked", bookmark);
};
EPUBJS.Reader.prototype.isBookmarked = function(cfi) {
var bookmarks = this.settings.bookmarks;
return bookmarks.indexOf(cfi);
};
/*
EPUBJS.Reader.prototype.searchBookmarked = function(cfi) {
var bookmarks = this.settings.bookmarks,
len = bookmarks.length,
i;
for(i = 0; i < len; i++) {
if (bookmarks[i]['cfi'] === cfi) return i;
}
return -1;
};
*/
EPUBJS.Reader.prototype.clearBookmarks = function() {
this.settings.bookmarks = [];
};
//-- Notes
EPUBJS.Reader.prototype.addNote = function(note) {
this.settings.annotations.push(note);
};
EPUBJS.Reader.prototype.removeNote = function(note) {
var index = this.settings.annotations.indexOf(note);
if( index === -1 ) return;
delete this.settings.annotations[index];
};
EPUBJS.Reader.prototype.clearNotes = function() {
this.settings.annotations = [];
};
//-- Settings
EPUBJS.Reader.prototype.setBookKey = function(identifier){
if(!this.settings.bookKey) {
this.settings.bookKey = "epubjsreader:" + EPUBJS.VERSION + ":" + window.location.host + ":" + identifier;
}
return this.settings.bookKey;
};
//-- Checks if the book setting can be retrieved from localStorage
EPUBJS.Reader.prototype.isSaved = function(bookPath) {
var storedSettings;
if(!localStorage) {
return false;
}
storedSettings = localStorage.getItem(this.settings.bookKey);
if(storedSettings === null) {
return false;
} else {
return true;
}
};
EPUBJS.Reader.prototype.removeSavedSettings = function() {
if(!localStorage) {
return false;
}
localStorage.removeItem(this.settings.bookKey);
};
EPUBJS.Reader.prototype.applySavedSettings = function() {
var stored;
if(!localStorage) {
return false;
}
try {
stored = JSON.parse(localStorage.getItem(this.settings.bookKey));
} catch (e) { // parsing error of localStorage
return false;
}
if(stored) {
// Merge styles
if(stored.styles) {
this.settings.styles = EPUBJS.core.defaults(this.settings.styles || {}, stored.styles);
}
// Merge the rest
this.settings = EPUBJS.core.defaults(this.settings, stored);
return true;
} else {
return false;
}
};
EPUBJS.Reader.prototype.saveSettings = function(){
if(this.book) {
this.settings.previousLocationCfi = this.rendition.currentLocation().start.cfi;
}
if(!localStorage) {
return false;
}
localStorage.setItem(this.settings.bookKey, JSON.stringify(this.settings));
};
EPUBJS.Reader.prototype.unload = function(){
if(this.settings.restore && localStorage) {
this.saveSettings();
}
};
EPUBJS.Reader.prototype.hashChanged = function(){
var hash = window.location.hash.slice(1);
this.rendition.display(hash);
};
EPUBJS.Reader.prototype.selectedRange = function(cfiRange){
var cfiFragment = "#"+cfiRange;
// Update the History Location
if(this.settings.history &&
window.location.hash != cfiFragment) {
// Add CFI fragment to the history
history.pushState({}, '', cfiFragment);
this.currentLocationCfi = cfiRange;
}
};
//-- Enable binding events to reader
RSVP.EventTarget.mixin(EPUBJS.Reader.prototype);
EPUBJS.reader.BookmarksController = function() {
var reader = this;
var book = this.book;
var rendition = this.rendition;
var $bookmarks = $("#bookmarksView"),
$list = $bookmarks.find("#bookmarks");
var docfrag = document.createDocumentFragment();
var show = function() {
$bookmarks.show();
};
var hide = function() {
$bookmarks.hide();
};
var counter = 0;
var createBookmarkItem = function(cfi) {
var listitem = document.createElement("li"),
link = document.createElement("a");
listitem.id = "bookmark-"+counter;
listitem.classList.add('list_item');
var spineItem = book.spine.get(cfi);
var tocItem;
if (spineItem.index in book.navigation.toc) {
tocItem = book.navigation.toc[spineItem.index];
link.textContent = tocItem.label;
} else {
link.textContent = cfi;
}
link.href = cfi;
link.classList.add('bookmark_link');
link.addEventListener("click", function(event){
var cfi = this.getAttribute('href');
rendition.display(cfi);
event.preventDefault();
}, false);
listitem.appendChild(link);
counter++;
return listitem;
};
this.settings.bookmarks.forEach(function(cfi) {
var bookmark = createBookmarkItem(cfi);
docfrag.appendChild(bookmark);
});
$list.append(docfrag);
this.on("reader:bookmarked", function(cfi) {
var item = createBookmarkItem(cfi);
$list.append(item);
});
this.on("reader:unbookmarked", function(index) {
var $item = $("#bookmark-"+index);
$item.remove();
});
return {
"show" : show,
"hide" : hide
};
};
EPUBJS.reader.ControlsController = function(book) {
var reader = this;
var rendition = this.rendition;
var $store = $("#store"),
$fullscreen = $("#fullscreen"),
$fullscreenicon = $("#fullscreenicon"),
$cancelfullscreenicon = $("#cancelfullscreenicon"),
$slider = $("#slider"),
$main = $("#main"),
$sidebar = $("#sidebar"),
$settings = $("#setting"),
$bookmark = $("#bookmark");
/*
var goOnline = function() {
reader.offline = false;
// $store.attr("src", $icon.data("save"));
};
var goOffline = function() {
reader.offline = true;
// $store.attr("src", $icon.data("saved"));
};
var fullscreen = false;
book.on("book:online", goOnline);
book.on("book:offline", goOffline);
*/
$slider.on("click", function () {
if(reader.sidebarOpen) {
reader.SidebarController.hide();
$slider.addClass("icon-menu");
$slider.removeClass("icon-right");
} else {
reader.SidebarController.show();
$slider.addClass("icon-right");
$slider.removeClass("icon-menu");
}
});
if(typeof screenfull !== 'undefined') {
$fullscreen.on("click", function() {
screenfull.toggle($('#container')[0]);
});
if(screenfull.raw) {
document.addEventListener(screenfull.raw.fullscreenchange, function() {
fullscreen = screenfull.isFullscreen;
if(fullscreen) {
$fullscreen
.addClass("icon-resize-small")
.removeClass("icon-resize-full");
} else {
$fullscreen
.addClass("icon-resize-full")
.removeClass("icon-resize-small");
}
});
}
}
$settings.on("click", function() {
reader.SettingsController.show();
});
$bookmark.on("click", function() {
var cfi = reader.rendition.currentLocation().start.cfi;
var bookmarked = reader.isBookmarked(cfi);
if(bookmarked === -1) { //-- Add bookmark
reader.addBookmark(cfi);
$bookmark
.addClass("icon-bookmark")
.removeClass("icon-bookmark-empty");
} else { //-- Remove Bookmark
reader.removeBookmark(cfi);
$bookmark
.removeClass("icon-bookmark")
.addClass("icon-bookmark-empty");
}
});
rendition.on('relocated', function(location){
var cfi = location.start.cfi;
var cfiFragment = "#" + cfi;
//-- Check if bookmarked
var bookmarked = reader.isBookmarked(cfi);
if(bookmarked === -1) { //-- Not bookmarked
$bookmark
.removeClass("icon-bookmark")
.addClass("icon-bookmark-empty");
} else { //-- Bookmarked
$bookmark
.addClass("icon-bookmark")
.removeClass("icon-bookmark-empty");
}
reader.currentLocationCfi = cfi;
// Update the History Location
if(reader.settings.history &&
window.location.hash != cfiFragment) {
// Add CFI fragment to the history
history.pushState({}, '', cfiFragment);
}
});
return {
};
};
EPUBJS.reader.MetaController = function(meta) {
var title = meta.title,
author = meta.creator;
var $title = $("#book-title"),
$author = $("#chapter-title"),
$dash = $("#title-seperator");
document.title = title+" – "+author;
$title.html(title);
$author.html(author);
$dash.show();
};
EPUBJS.reader.NotesController = function() {
var book = this.book;
var rendition = this.rendition;
var reader = this;
var $notesView = $("#notesView");
var $notes = $("#notes");
var $text = $("#note-text");
var $anchor = $("#note-anchor");
var annotations = reader.settings.annotations;
var renderer = book.renderer;
var popups = [];
var epubcfi = new ePub.CFI();
var show = function() {
$notesView.show();
};
var hide = function() {
$notesView.hide();
}
var insertAtPoint = function(e) {
var range;
var textNode;
var offset;
var doc = book.renderer.doc;
var cfi;
var annotation;
// standard
if (doc.caretPositionFromPoint) {
range = doc.caretPositionFromPoint(e.clientX, e.clientY);
textNode = range.offsetNode;
offset = range.offset;
// WebKit
} else if (doc.caretRangeFromPoint) {
range = doc.caretRangeFromPoint(e.clientX, e.clientY);
textNode = range.startContainer;
offset = range.startOffset;
}
if (textNode.nodeType !== 3) {
for (var i=0; i < textNode.childNodes.length; i++) {
if (textNode.childNodes[i].nodeType == 3) {
textNode = textNode.childNodes[i];
break;
}
}
}
// Find the end of the sentance
offset = textNode.textContent.indexOf(".", offset);
if(offset === -1){
offset = textNode.length; // Last item
} else {
offset += 1; // After the period
}
cfi = epubcfi.generateCfiFromTextNode(textNode, offset, book.renderer.currentChapter.cfiBase);
annotation = {
annotatedAt: new Date(),
anchor: cfi,
body: $text.val()
}
// add to list
reader.addNote(annotation);
// attach
addAnnotation(annotation);
placeMarker(annotation);
// clear
$text.val('');
$anchor.text("Attach");
$text.prop("disabled", false);
rendition.off("click", insertAtPoint);
};
var addAnnotation = function(annotation){
var note = document.createElement("li");
var link = document.createElement("a");
note.innerHTML = annotation.body;
// note.setAttribute("ref", annotation.anchor);
link.innerHTML = " context »";
link.href = "#"+annotation.anchor;
link.onclick = function(){
rendition.display(annotation.anchor);
return false;
};
note.appendChild(link);
$notes.append(note);
};
var placeMarker = function(annotation){
var doc = book.renderer.doc;
var marker = document.createElement("span");
var mark = document.createElement("a");
marker.classList.add("footnotesuperscript", "reader_generated");
marker.style.verticalAlign = "super";
marker.style.fontSize = ".75em";
// marker.style.position = "relative";
marker.style.lineHeight = "1em";
// mark.style.display = "inline-block";
mark.style.padding = "2px";
mark.style.backgroundColor = "#fffa96";
mark.style.borderRadius = "5px";
mark.style.cursor = "pointer";
marker.id = "note-"+EPUBJS.core.uuid();
mark.innerHTML = annotations.indexOf(annotation) + 1 + "[Reader]";
marker.appendChild(mark);
epubcfi.addMarker(annotation.anchor, doc, marker);
markerEvents(marker, annotation.body);
}
var markerEvents = function(item, txt){
var id = item.id;
var showPop = function(){
var poppos,
iheight = renderer.height,
iwidth = renderer.width,
tip,
pop,
maxHeight = 225,
itemRect,
left,
top,
pos;
//-- create a popup with endnote inside of it
if(!popups[id]) {
popups[id] = document.createElement("div");
popups[id].setAttribute("class", "popup");
pop_content = document.createElement("div");
popups[id].appendChild(pop_content);
pop_content.innerHTML = txt;
pop_content.setAttribute("class", "pop_content");
renderer.render.document.body.appendChild(popups[id]);
//-- TODO: will these leak memory? - Fred
popups[id].addEventListener("mouseover", onPop, false);
popups[id].addEventListener("mouseout", offPop, false);
//-- Add hide on page change
rendition.on("locationChanged", hidePop, this);
rendition.on("locationChanged", offPop, this);
// chapter.book.on("renderer:chapterDestroy", hidePop, this);
}
pop = popups[id];
//-- get location of item
itemRect = item.getBoundingClientRect();
left = itemRect.left;
top = itemRect.top;
//-- show the popup
pop.classList.add("show");
//-- locations of popup
popRect = pop.getBoundingClientRect();
//-- position the popup
pop.style.left = left - popRect.width / 2 + "px";
pop.style.top = top + "px";
//-- Adjust max height
if(maxHeight > iheight / 2.5) {
maxHeight = iheight / 2.5;
pop_content.style.maxHeight = maxHeight + "px";
}
//-- switch above / below
if(popRect.height + top >= iheight - 25) {
pop.style.top = top - popRect.height + "px";
pop.classList.add("above");
}else{
pop.classList.remove("above");
}
//-- switch left
if(left - popRect.width <= 0) {
pop.style.left = left + "px";
pop.classList.add("left");
}else{
pop.classList.remove("left");
}
//-- switch right
if(left + popRect.width / 2 >= iwidth) {
//-- TEMP MOVE: 300
pop.style.left = left - 300 + "px";
popRect = pop.getBoundingClientRect();
pop.style.left = left - popRect.width + "px";
//-- switch above / below again
if(popRect.height + top >= iheight - 25) {
pop.style.top = top - popRect.height + "px";
pop.classList.add("above");
}else{
pop.classList.remove("above");
}
pop.classList.add("right");
}else{
pop.classList.remove("right");
}
}
var onPop = function(){
popups[id].classList.add("on");
}
var offPop = function(){
popups[id].classList.remove("on");
}
var hidePop = function(){
setTimeout(function(){
popups[id].classList.remove("show");
}, 100);
}
var openSidebar = function(){
reader.ReaderController.slideOut();
show();
};
item.addEventListener("mouseover", showPop, false);
item.addEventListener("mouseout", hidePop, false);
item.addEventListener("click", openSidebar, false);
}
$anchor.on("click", function(e){
$anchor.text("Cancel");
$text.prop("disabled", "true");
// listen for selection
rendition.on("click", insertAtPoint);
});
annotations.forEach(function(note) {
addAnnotation(note);
});
/*
renderer.registerHook("beforeChapterDisplay", function(callback, renderer){
var chapter = renderer.currentChapter;
annotations.forEach(function(note) {
var cfi = epubcfi.parse(note.anchor);
if(cfi.spinePos === chapter.spinePos) {
try {
placeMarker(note);
} catch(e) {
console.log("anchoring failed", note.anchor);
}
}
});
callback();
}, true);
*/
return {
"show" : show,
"hide" : hide
};
};
EPUBJS.reader.ReaderController = function(book) {
var $main = $("#main"),
$divider = $("#divider"),
$loader = $("#loader"),
$next = $("#next"),
$prev = $("#prev");
var reader = this;
var book = this.book;
var rendition = this.rendition;
var slideIn = function() {
var currentPosition = rendition.currentLocation().start.cfi;
if (reader.settings.sidebarReflow){
$main.removeClass('single');
$main.one("transitionend", function(){
rendition.resize();
});
} else {
$main.removeClass("closed");
}
};
var slideOut = function() {
var location = rendition.currentLocation();
if (!location) {
return;
}
var currentPosition = location.start.cfi;
if (reader.settings.sidebarReflow){
$main.addClass('single');
$main.one("transitionend", function(){
rendition.resize();
});
} else {
$main.addClass("closed");
}
};
var showLoader = function() {
$loader.show();
hideDivider();
};
var hideLoader = function() {
$loader.hide();
//-- If the book is using spreads, show the divider
// if(book.settings.spreads) {
// showDivider();
// }
};
var showDivider = function() {
$divider.addClass("show");
};
var hideDivider = function() {
$divider.removeClass("show");
};
var keylock = false;
var arrowKeys = function(e) {
if(e.keyCode == 37) {
if(book.package.metadata.direction === "rtl") {
rendition.next();
} else {
rendition.prev();
}
$prev.addClass("active");
keylock = true;
setTimeout(function(){
keylock = false;
$prev.removeClass("active");
}, 100);
e.preventDefault();
}
if(e.keyCode == 39) {
if(book.package.metadata.direction === "rtl") {
rendition.prev();
} else {
rendition.next();
}
$next.addClass("active");
keylock = true;
setTimeout(function(){
keylock = false;
$next.removeClass("active");
}, 100);
e.preventDefault();
}
}
document.addEventListener('keydown', arrowKeys, false);
$next.on("click", function(e){
if(book.package.metadata.direction === "rtl") {
rendition.prev();
} else {
rendition.next();
}
e.preventDefault();
});
$prev.on("click", function(e){
if(book.package.metadata.direction === "rtl") {
rendition.next();
} else {
rendition.prev();
}
e.preventDefault();
});
rendition.on("layout", function(props){
if(props.spread === true) {
showDivider();
} else {
hideDivider();
}
});
rendition.on('relocated', function(location){
if (location.atStart) {
$prev.addClass("disabled");
}
if (location.atEnd) {
$next.addClass("disabled");
}
});
return {
"slideOut" : slideOut,
"slideIn" : slideIn,
"showLoader" : showLoader,
"hideLoader" : hideLoader,
"showDivider" : showDivider,
"hideDivider" : hideDivider,
"arrowKeys" : arrowKeys
};
};
EPUBJS.reader.SettingsController = function() {
var book = this.book;
var reader = this;
var $settings = $("#settings-modal"),
$overlay = $(".overlay");
var show = function() {
$settings.addClass("md-show");
};
var hide = function() {
$settings.removeClass("md-show");
};
var $sidebarReflowSetting = $('#sidebarReflow');
$sidebarReflowSetting.on('click', function() {
reader.settings.sidebarReflow = !reader.settings.sidebarReflow;
});
$settings.find(".closer").on("click", function() {
hide();
});
$overlay.on("click", function() {
hide();
});
return {
"show" : show,
"hide" : hide
};
};
EPUBJS.reader.SidebarController = function(book) {
var reader = this;
var $sidebar = $("#sidebar"),
$panels = $("#panels");
var activePanel = "Toc";
var changePanelTo = function(viewName) {
var controllerName = viewName + "Controller";
if(activePanel == viewName || typeof reader[controllerName] === 'undefined' ) return;
reader[activePanel+ "Controller"].hide();
reader[controllerName].show();
activePanel = viewName;
$panels.find('.active').removeClass("active");
$panels.find("#show-" + viewName ).addClass("active");
};
var getActivePanel = function() {
return activePanel;
};
var show = function() {
reader.sidebarOpen = true;
reader.ReaderController.slideOut();
$sidebar.addClass("open");
}
var hide = function() {
reader.sidebarOpen = false;
reader.ReaderController.slideIn();
$sidebar.removeClass("open");
}
$panels.find(".show_view").on("click", function(event) {
var view = $(this).data("view");
changePanelTo(view);
event.preventDefault();
});
return {
'show' : show,
'hide' : hide,
'getActivePanel' : getActivePanel,
'changePanelTo' : changePanelTo
};
};
EPUBJS.reader.TocController = function(toc) {
var book = this.book;
var rendition = this.rendition;
var $list = $("#tocView"),
docfrag = document.createDocumentFragment();
var currentChapter = false;
var generateTocItems = function(toc, level) {
var container = document.createElement("ul");
if(!level) level = 1;
toc.forEach(function(chapter) {
var listitem = document.createElement("li"),
link = document.createElement("a");
toggle = document.createElement("a");
var subitems;
listitem.id = "toc-"+chapter.id;
listitem.classList.add('list_item');
link.textContent = chapter.label;
link.href = chapter.href;
link.classList.add('toc_link');
listitem.appendChild(link);
if(chapter.subitems && chapter.subitems.length > 0) {
level++;
subitems = generateTocItems(chapter.subitems, level);
toggle.classList.add('toc_toggle');
listitem.insertBefore(toggle, link);
listitem.appendChild(subitems);
}
container.appendChild(listitem);
});
return container;
};
var onShow = function() {
$list.show();
};
var onHide = function() {
$list.hide();
};
var chapterChange = function(e) {
var id = e.id,
$item = $list.find("#toc-"+id),
$current = $list.find(".currentChapter"),
$open = $list.find('.openChapter');
if($item.length){
if($item != $current && $item.has(currentChapter).length > 0) {
$current.removeClass("currentChapter");
}
$item.addClass("currentChapter");
// $open.removeClass("openChapter");
$item.parents('li').addClass("openChapter");
}
};
rendition.on('renderered', chapterChange);
var tocitems = generateTocItems(toc);
docfrag.appendChild(tocitems);
$list.append(docfrag);
$list.find(".toc_link").on("click", function(event){
var url = this.getAttribute('href');
event.preventDefault();
//-- Provide the Book with the url to show
// The Url must be found in the books manifest
rendition.display(url);
$list.find(".currentChapter")
.addClass("openChapter")
.removeClass("currentChapter");
$(this).parent('li').addClass("currentChapter");
});
$list.find(".toc_toggle").on("click", function(event){
var $el = $(this).parent('li'),
open = $el.hasClass("openChapter");
event.preventDefault();
if(open){
$el.removeClass("openChapter");
} else {
$el.addClass("openChapter");
}
});
return {
"show" : onShow,
"hide" : onHide
};
};
//# sourceMappingURL=reader.js.map | PypiClean |
/streamlit_icon_component-0.0.2.tar.gz/streamlit_icon_component-0.0.2/streamlit_icon_component/__init__.py | import os
import streamlit.components.v1 as components
# Create a _RELEASE constant. We'll set this to False while we're developing
# the component, and True when we're ready to package and distribute it.
# (This is, of course, optional - there are innumerable ways to manage your
# release process.)
_RELEASE = True
# Declare a Streamlit component. `declare_component` returns a function
# that is used to create instances of the component. We're naming this
# function "_component_func", with an underscore prefix, because we don't want
# to expose it directly to users. Instead, we will create a custom wrapper
# function, below, that will serve as our component's public API.
# It's worth noting that this call to `declare_component` is the
# *only thing* you need to do to create the binding between Streamlit and
# your component frontend. Everything else we do in this file is simply a
# best practice.
if not _RELEASE:
_component_func = components.declare_component(
# We give the component a simple, descriptive name ("my_component"
# does not fit this bill, so please choose something better for your
# own component :)
"streamlit_icon",
# Pass `url` here to tell Streamlit that the component will be served
# by the local dev server that you run via `npm run start`.
# (This is useful while your component is in development.)
url="http://localhost:3001",
)
else:
# When we're distributing a production version of the component, we'll
# replace the `url` param with `path`, and point it to to the component's
# build directory:
parent_dir = os.path.dirname(os.path.abspath(__file__))
build_dir = os.path.join(parent_dir, "frontend/build")
_component_func = components.declare_component("streamlit_icon", path=build_dir)
# Create a wrapper function for the component. This is an optional
# best practice - we could simply expose the component function returned by
# `declare_component` and call it done. The wrapper allows us to customize
# our component's API: we can pre-process its input args, post-process its
# output value, and add a docstring for users.
def streamlit_icon(icon_name, icon_button=False, disabled=False, icon_size="medium"):
"""Create a new instance of "streamlit_icon".
streamlit_pagination
Parameters
----------
name: str
The name of the thing we're saying hello to. The component will display
the text "Hello, {name}!"
key: str or None
An optional key that uniquely identifies this component. If this is
None, and the component's arguments are changed, the component will
be re-mounted in the Streamlit frontend and lose its current state.
Returns
-------
int
The number of times the component's "Click Me" button has been clicked.
(This is the value passed to `Streamlit.setComponentValue` on the
frontend.)
"""
# Call through to our private component function. Arguments we pass here
# will be sent to the frontend, where they'll be available in an "args"
# dictionary.
#
# "default" is a special argument that specifies the initial return
# value of the component before the user has interacted with it.
component_value = _component_func(icon_name=icon_name, icon_button=icon_button, disabled=disabled, icon_size=icon_size)
# We could modify the value returned from the component if we wanted.
# There's no need to do this in our simple example - but it's an option.
return component_value
# Add some test code to play with the component while it's in development.
# During development, we can run this just as we would any other Streamlit
# app: `$ streamlit run my_component/__init__.py`
if not _RELEASE:
import streamlit as st
# Create an instance of our component with a constant `name` arg, and
# print its output value.
# num_clicks = streamlit_pagination("World")
# st.markdown("You've clicked %s times!" % int(num_clicks))
# st.markdown("---")
# st.subheader("Component with variable args")
st.header("Icon Sizes")
streamlit_icon("delete_icon", icon_size="small")
streamlit_icon("delete_icon", icon_size="medium")
streamlit_icon("delete_icon", icon_size="large")
st.header("Button Icon")
if (streamlit_icon("delete_icon", icon_button=True, icon_size="small")):
st.write("CLICKED")
st.header("Disabled Button Icon")
streamlit_icon("delete_icon", icon_button=True, disabled=True, icon_size="small") | PypiClean |
/azure_functions_worker-1.1.9-py3-none-any.whl/azure_functions_worker/protos/FunctionRpc_pb2_grpc.py | """Client and server classes corresponding to protobuf-defined services."""
import grpc
from azure_functions_worker.protos import FunctionRpc_pb2 as FunctionRpc__pb2
class FunctionRpcStub(object):
"""Interface exported by the server.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.EventStream = channel.stream_stream(
'/AzureFunctionsRpcMessages.FunctionRpc/EventStream',
request_serializer=FunctionRpc__pb2.StreamingMessage.SerializeToString,
response_deserializer=FunctionRpc__pb2.StreamingMessage.FromString,
)
class FunctionRpcServicer(object):
"""Interface exported by the server.
"""
def EventStream(self, request_iterator, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_FunctionRpcServicer_to_server(servicer, server):
rpc_method_handlers = {
'EventStream': grpc.stream_stream_rpc_method_handler(
servicer.EventStream,
request_deserializer=FunctionRpc__pb2.StreamingMessage.FromString,
response_serializer=FunctionRpc__pb2.StreamingMessage.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'AzureFunctionsRpcMessages.FunctionRpc', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class FunctionRpc(object):
"""Interface exported by the server.
"""
@staticmethod
def EventStream(request_iterator,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.stream_stream(request_iterator, target, '/AzureFunctionsRpcMessages.FunctionRpc/EventStream',
FunctionRpc__pb2.StreamingMessage.SerializeToString,
FunctionRpc__pb2.StreamingMessage.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata) | PypiClean |
/xdis-5.0.12.tar.gz/xdis-5.0.12/pytest/stackeffect/se25.py |
[
-100, # 0
-1, # 1,
0, # 2,
0, # 3,
1, # 4,
0, # 5,
-100, # 6
-100, # 7
-100, # 8
-100, # 9
0, # 10,
0, # 11,
0, # 12,
0, # 13,
-100, # 14
0, # 15,
-100, # 16
-100, # 17
-2, # 18,
-1, # 19,
-1, # 20,
-1, # 21,
-1, # 22,
-1, # 23,
-1, # 24,
-1, # 25,
-1, # 26,
-1, # 27,
-1, # 28,
-1, # 29,
1, # 30,
0, # 31,
0, # 32,
-1, # 33,
-100, # 34
-100, # 35
-100, # 36
-100, # 37
-100, # 38
-100, # 39
-2, # 40,
-3, # 41,
-3, # 42,
-4, # 43,
-100, # 44
-100, # 45
-100, # 46
-100, # 47
-100, # 48
-100, # 49
-1, # 50,
-2, # 51,
-2, # 52,
-3, # 53,
-100, # 54
-1, # 55,
-1, # 56,
-1, # 57,
-1, # 58,
-1, # 59,
-3, # 60,
-2, # 61,
-1, # 62,
-1, # 63,
-1, # 64,
-1, # 65,
-1, # 66,
-1, # 67,
0, # 68,
-100, # 69
-1, # 70,
-1, # 71,
0, # 72,
-2, # 73,
-1, # 74,
-1, # 75,
-1, # 76,
-1, # 77,
-1, # 78,
-1, # 79,
0, # 80,
-1, # 81,
1, # 82,
-1, # 83,
-1, # 84,
-3, # 85,
0, # 86,
0, # 87,
-1, # 88,
-2, # 89,
-1, # 90,
0, # 91,
-100, # 92
1, # 93,
-100, # 94
-2, # 95,
-1, # 96,
-1, # 97,
0, # 98,
-100, # 99
1, # 100,
1, # 101,
-100, # 102
-100, # 103
1, # 104,
0, # 105,
-1, # 106,
0, # 107,
1, # 108,
-100, # 109
0, # 110,
0, # 111,
0, # 112,
0, # 113,
-100, # 114
-100, # 115
1, # 116,
-100, # 117
-100, # 118
0, # 119,
0, # 120,
3, # 121,
3, # 122,
-100, # 123
1, # 124,
-1, # 125,
0, # 126,
-100, # 127
-100, # 128
-100, # 129
-100, # 130
-100, # 131
-100, # 132
-100, # 133
-100, # 134
1, # 135,
1, # 136,
-1, # 137,
-100, # 138
-100, # 139
-100, # 140
-100, # 141
-100, # 142
-100, # 143
-100, # 144
-100, # 145
-100, # 146
-100, # 147
-100, # 148
-100, # 149
-100, # 150
-100, # 151
-100, # 152
-100, # 153
-100, # 154
-100, # 155
-100, # 156
-100, # 157
-100, # 158
-100, # 159
-100, # 160
-100, # 161
-100, # 162
-100, # 163
-100, # 164
-100, # 165
-100, # 166
-100, # 167
-100, # 168
-100, # 169
-100, # 170
-100, # 171
-100, # 172
-100, # 173
-100, # 174
-100, # 175
-100, # 176
-100, # 177
-100, # 178
-100, # 179
-100, # 180
-100, # 181
-100, # 182
-100, # 183
-100, # 184
-100, # 185
-100, # 186
-100, # 187
-100, # 188
-100, # 189
-100, # 190
-100, # 191
-100, # 192
-100, # 193
-100, # 194
-100, # 195
-100, # 196
-100, # 197
-100, # 198
-100, # 199
-100, # 200
-100, # 201
-100, # 202
-100, # 203
-100, # 204
-100, # 205
-100, # 206
-100, # 207
-100, # 208
-100, # 209
-100, # 210
-100, # 211
-100, # 212
-100, # 213
-100, # 214
-100, # 215
-100, # 216
-100, # 217
-100, # 218
-100, # 219
-100, # 220
-100, # 221
-100, # 222
-100, # 223
-100, # 224
-100, # 225
-100, # 226
-100, # 227
-100, # 228
-100, # 229
-100, # 230
-100, # 231
-100, # 232
-100, # 233
-100, # 234
-100, # 235
-100, # 236
-100, # 237
-100, # 238
-100, # 239
-100, # 240
-100, # 241
-100, # 242
-100, # 243
-100, # 244
-100, # 245
-100, # 246
-100, # 247
-100, # 248
-100, # 249
-100, # 250
-100, # 251
-100, # 252
-100, # 253
-100, # 254
-100, # 255
] | PypiClean |
/askbot-tuan-1.5.tar.gz/askbot-tuan-1.5/askbot/setup_templates/static/default/media/js/tinymce/plugins/template/editor_plugin_src.js | (function() {
var each = tinymce.each;
tinymce.create('tinymce.plugins.TemplatePlugin', {
init : function(ed, url) {
var t = this;
t.editor = ed;
// Register commands
ed.addCommand('mceTemplate', function(ui) {
ed.windowManager.open({
file : url + '/template.htm',
width : ed.getParam('template_popup_width', 750),
height : ed.getParam('template_popup_height', 600),
inline : 1
}, {
plugin_url : url
});
});
ed.addCommand('mceInsertTemplate', t._insertTemplate, t);
// Register buttons
ed.addButton('template', {title : 'template.desc', cmd : 'mceTemplate'});
ed.onPreProcess.add(function(ed, o) {
var dom = ed.dom;
each(dom.select('div', o.node), function(e) {
if (dom.hasClass(e, 'mceTmpl')) {
each(dom.select('*', e), function(e) {
if (dom.hasClass(e, ed.getParam('template_mdate_classes', 'mdate').replace(/\s+/g, '|')))
e.innerHTML = t._getDateTime(new Date(), ed.getParam("template_mdate_format", ed.getLang("template.mdate_format")));
});
t._replaceVals(e);
}
});
});
},
getInfo : function() {
return {
longname : 'Template plugin',
author : 'Moxiecode Systems AB',
authorurl : 'http://www.moxiecode.com',
infourl : 'http://wiki.moxiecode.com/index.php/TinyMCE:Plugins/template',
version : tinymce.majorVersion + "." + tinymce.minorVersion
};
},
_insertTemplate : function(ui, v) {
var t = this, ed = t.editor, h, el, dom = ed.dom, sel = ed.selection.getContent();
h = v.content;
each(t.editor.getParam('template_replace_values'), function(v, k) {
if (typeof(v) != 'function')
h = h.replace(new RegExp('\\{\\$' + k + '\\}', 'g'), v);
});
el = dom.create('div', null, h);
// Find template element within div
n = dom.select('.mceTmpl', el);
if (n && n.length > 0) {
el = dom.create('div', null);
el.appendChild(n[0].cloneNode(true));
}
function hasClass(n, c) {
return new RegExp('\\b' + c + '\\b', 'g').test(n.className);
};
each(dom.select('*', el), function(n) {
// Replace cdate
if (hasClass(n, ed.getParam('template_cdate_classes', 'cdate').replace(/\s+/g, '|')))
n.innerHTML = t._getDateTime(new Date(), ed.getParam("template_cdate_format", ed.getLang("template.cdate_format")));
// Replace mdate
if (hasClass(n, ed.getParam('template_mdate_classes', 'mdate').replace(/\s+/g, '|')))
n.innerHTML = t._getDateTime(new Date(), ed.getParam("template_mdate_format", ed.getLang("template.mdate_format")));
// Replace selection
if (hasClass(n, ed.getParam('template_selected_content_classes', 'selcontent').replace(/\s+/g, '|')))
n.innerHTML = sel;
});
t._replaceVals(el);
ed.execCommand('mceInsertContent', false, el.innerHTML);
ed.addVisual();
},
_replaceVals : function(e) {
var dom = this.editor.dom, vl = this.editor.getParam('template_replace_values');
each(dom.select('*', e), function(e) {
each(vl, function(v, k) {
if (dom.hasClass(e, k)) {
if (typeof(vl[k]) == 'function')
vl[k](e);
}
});
});
},
_getDateTime : function(d, fmt) {
if (!fmt)
return "";
function addZeros(value, len) {
var i;
value = "" + value;
if (value.length < len) {
for (i=0; i<(len-value.length); i++)
value = "0" + value;
}
return value;
}
fmt = fmt.replace("%D", "%m/%d/%y");
fmt = fmt.replace("%r", "%I:%M:%S %p");
fmt = fmt.replace("%Y", "" + d.getFullYear());
fmt = fmt.replace("%y", "" + d.getYear());
fmt = fmt.replace("%m", addZeros(d.getMonth()+1, 2));
fmt = fmt.replace("%d", addZeros(d.getDate(), 2));
fmt = fmt.replace("%H", "" + addZeros(d.getHours(), 2));
fmt = fmt.replace("%M", "" + addZeros(d.getMinutes(), 2));
fmt = fmt.replace("%S", "" + addZeros(d.getSeconds(), 2));
fmt = fmt.replace("%I", "" + ((d.getHours() + 11) % 12 + 1));
fmt = fmt.replace("%p", "" + (d.getHours() < 12 ? "AM" : "PM"));
fmt = fmt.replace("%B", "" + this.editor.getLang("template_months_long").split(',')[d.getMonth()]);
fmt = fmt.replace("%b", "" + this.editor.getLang("template_months_short").split(',')[d.getMonth()]);
fmt = fmt.replace("%A", "" + this.editor.getLang("template_day_long").split(',')[d.getDay()]);
fmt = fmt.replace("%a", "" + this.editor.getLang("template_day_short").split(',')[d.getDay()]);
fmt = fmt.replace("%%", "%");
return fmt;
}
});
// Register plugin
tinymce.PluginManager.add('template', tinymce.plugins.TemplatePlugin);
})(); | PypiClean |
/py_bigdata_util-0.1.0-py3-none-any.whl/bigdata_util/util/base_config.py |
from . import get_logger
from pyhocon import ConfigFactory, HOCONConverter, ConfigTree
import os
from configobj import ConfigObj
import pydash
logger = get_logger(__file__)
class BaseConfig(object):
_config = None
_needed_config_attr = []
_config_file_path = 'config.conf'
def get(self, name, default_value=''):
return self._config.get(name, default_value)
def put(self, name, value):
return self._config.put(name, value)
def __init__(self, config=None, config_file_path='config.conf'):
"""
可以传入config,也可以从文件中读取
:param config:
:param config_file_path:
"""
config_from_file = self.__load_config_from_file(config_file_path)
if config_from_file is None:
config_from_file = []
config_from_parameter = ConfigFactory.from_dict(config)
if config_from_parameter is None:
config_from_parameter = []
self._config_file_path = config_file_path
self._config = ConfigTree.merge_configs(ConfigTree(config_from_file), ConfigTree(config_from_parameter))
self._abs_config_file_path = os.path.abspath(self._config_file_path)
self.__check_config()
pass
@staticmethod
def __load_config_from_file(config_file_path):
"""
从文件中load配置
:param config_file_path:
:return:
"""
return ConfigFactory.parse_file(config_file_path, required=False)
def __check_config(self):
"""
检查config是否齐全,不齐全时,报错提示
:return:
"""
attr_temp_place_holder = 'TODO: <!- please add config here. ->'
missing_config_attr = []
append_to_file = []
for config_attr in self._needed_config_attr:
if config_attr not in self._config or self._config[config_attr] == attr_temp_place_holder:
if config_attr not in self._config:
append_to_file.append('\n' + config_attr + ' = "' + attr_temp_place_holder + '"')
# self._config.put(config_attr, attr_temp_place_holder)
missing_config_attr.append('\n - "' + config_attr + '"')
if len(missing_config_attr) > 0:
if not os.path.isfile(self._config_file_path):
with open(self._config_file_path, 'w') as writer:
if HOCONConverter.to_hocon(self._config) != '[]':
writer.write(HOCONConverter.to_hocon(self._config))
for line in append_to_file:
writer.write(line)
elif len(append_to_file) > 0:
with open(self._config_file_path, 'a') as writer:
for line in append_to_file:
writer.write(line)
raise Exception('''
Config needed, please edit "{config_file_path}" and fill needed config.
Please open =====> "{abs_config_file_path}" <===== and fill needed config!!!
missing configs:
{missing_config_keys}
'''.format(config_file_path=self._config_file_path, abs_config_file_path=self._abs_config_file_path,
missing_config_keys=' ,'.join(missing_config_attr)))
pass
@staticmethod
def save_odps_ini(odps_config, file_name):
cfg = ConfigObj(encoding='utf8')
cfg.filename = file_name
for k in odps_config.keys():
cfg[k] = odps_config[k]
cfg['project_name'] = cfg['project']
cfg['end_point'] = cfg['endpoint']
try:
cfg.write()
except Exception as e:
logger.error(e)
pass
pass | PypiClean |
/avirtech_report_metashape_2-0.0.9-py3-none-any.whl/process_foto/process_foto.py | import os
import csv
import shutil
class process_foto:
"""
This is a function to generating 9 photos per each folder, please pass the params to process the function"""
def __init__(self, location,chunk_shape_dictionary):
self.location = location
self.chunk_shape_dictionary = chunk_shape_dictionary
def process_foto(location, chunk_shape_dictionary):
path_new = []
for key in chunk_shape_dictionary:
path_new_append = str(location) + "/" + str(key)
path_new.append(path_new_append)
path_new_2 = []
for path in path_new:
foldernames_2 = os.listdir(path)
for folder in foldernames_2:
path_new_2.append(str(path) + "/" + str(folder))
arranged_dict = {}
for path_foto in path_new_2:
path_foto_arrange = []
foldernames_3 = os.listdir(path_foto)
if len(foldernames_3) > 0:
folder_names_4 = foldernames_3[0:9]
for file in folder_names_4:
path_foto_arrange.append(str(file))
arranged_dict[path_foto] = path_foto_arrange
path_raw = []
list_b = []
for folder, files in arranged_dict.items():
path_raw.append(folder)
for file in files:
list_b.append(str(folder) + "/" + str(file))
list_a = []
for file in path_raw:
for photo in os.listdir(file):
list_a.append(str(file) + "/" + str(photo))
def non_match_elements(list_a, list_b):
non_match = []
for i in list_a:
if i not in list_b:
non_match.append(i)
return non_match
non_match = non_match_elements(list_a,list_b)
for deleted in non_match:
os.remove(deleted)
print("Deleting File ", non_match)
for (root,dirs,files) in os.walk(location):
print(root) | PypiClean |
/kraken-core-0.3.6.tar.gz/kraken-core-0.3.6/src/kraken/core/utils.py | from __future__ import annotations
import contextlib
import enum
import importlib
import os
import shutil
import sys
import tempfile
from pathlib import Path
from typing import IO, AnyStr, BinaryIO, ContextManager, Iterable, Iterator, TextIO, TypeVar, overload
from typing_extensions import Literal
T = TypeVar("T")
def flatten(it: Iterable[Iterable[T]]) -> Iterable[T]:
for item in it:
yield from item
def not_none(v: T | None) -> T:
if v is None:
raise RuntimeError("expected not-None")
return v
class NotSet(enum.Enum):
Value = 1
@overload
def atomic_file_swap(
path: str | Path,
mode: Literal["w"],
always_revert: bool = ...,
create_dirs: bool = ...,
) -> ContextManager[TextIO]:
...
@overload
def atomic_file_swap(
path: str | Path,
mode: Literal["wb"],
always_revert: bool = ...,
create_dirs: bool = ...,
) -> ContextManager[BinaryIO]:
...
@contextlib.contextmanager # type: ignore
def atomic_file_swap(
path: str | Path,
mode: Literal["w", "wb"],
always_revert: bool = False,
create_dirs: bool = False,
) -> Iterator[IO[AnyStr]]:
"""Performs an atomic write to a file while temporarily moving the original file to a different random location.
Args:
path: The path to replace.
mode: The open mode for the file (text or binary).
always_revert: If enabled, swap the old file back into place even if the with context has no errors.
create_dirs: If the file does not exist, and neither do its parent directories, create the directories.
The directory will be removed if the operation is reverted.
"""
path = Path(path)
with contextlib.ExitStack() as exit_stack:
if path.is_file():
old = exit_stack.enter_context(
tempfile.NamedTemporaryFile(
mode,
prefix=path.stem + "~",
suffix="~" + path.suffix,
dir=path.parent,
)
)
old.close()
os.rename(path, old.name)
else:
old = None
def _revert() -> None:
assert isinstance(path, Path)
if path.is_file():
path.unlink()
if old is not None:
os.rename(old.name, path)
if not path.parent.is_dir() and create_dirs:
path.parent.mkdir(exist_ok=True)
_old_revert = _revert
def _revert() -> None:
assert isinstance(path, Path)
try:
shutil.rmtree(path.parent)
finally:
_old_revert()
try:
with path.open(mode) as new:
yield new
except BaseException:
_revert()
raise
else:
if always_revert:
_revert()
else:
if old is not None:
os.remove(old.name)
@overload
def import_class(fqn: str) -> type:
...
@overload
def import_class(fqn: str, base_type: type[T]) -> type[T]:
...
def import_class(fqn: str, base_type: type[T] | None = None) -> type[T]:
mod_name, cls_name = fqn.rpartition(".")[::2]
module = importlib.import_module(mod_name)
cls = getattr(module, cls_name)
if not isinstance(cls, type):
raise TypeError(f"expected type object at {fqn!r}, got {type(cls).__name__}")
if base_type is not None and not issubclass(cls, base_type):
raise TypeError(f"expected subclass of {base_type} at {fqn!r}, got {cls}")
return cls
def get_terminal_width(default: int = 80) -> int:
"""Returns the terminal width through :func:`os.get_terminal_size`, falling back to the `COLUMNS`
environment variable. If neither is available, return *default*."""
try:
terminal_width = os.get_terminal_size().columns
except OSError:
try:
terminal_width = int(os.getenv("COLUMNS", ""))
except ValueError:
terminal_width = default
return terminal_width
def is_relative_to(apath: Path, bpath: Path) -> bool:
"""Checks if *apath* is a path relative to *bpath*."""
if sys.version_info[:2] < (3, 9):
try:
apath.relative_to(bpath)
return True
except ValueError:
return False
else:
return apath.is_relative_to(bpath) | PypiClean |
/Editra-0.7.20.tar.gz/Editra-0.7.20/src/syntax/_masm.py | __author__ = "Cody Precord <[email protected]>"
__svnid__ = "$Id: _masm.py 68798 2011-08-20 17:17:05Z CJP $"
__revision__ = "$Revision: 68798 $"
#-----------------------------------------------------------------------------#
# Imports
import wx.stc as stc
# Local Imports
import syndata
#-----------------------------------------------------------------------------#
#---- Keyword Definitions ----#
# MASM CPU Instructions/Operators
MASM_CPU_INST = (0, "aaa aad aam aas adc and arpl bound bsf bsr bswap bt btc "
"btr bts call cdw cdq clc cld cli clts cmc cmp cmps cmpsb "
"cmpsw cmpsd cmpxchng cwd cwde daa das enter in ins insb "
"insw insd int into invd invlpg iret iretd ja jae jb jbe "
"jc jcxz jecxz je jz jg jge jl jle jna jnae jnb jnbe jnc "
"jne jng jnge jnl jnle jno jnp jns jnz jo jp jpe jpo js jz "
"jmp lahf lar lea leave lgdt lidt lgs lss lfs lods lodsb "
"lodsw lodsd loop loope loopz loone loopne retf retn lds "
"les lldt lmsw lock lsl ltr mov movs movsb movsw movsd "
"movsx movzx neg nop not or out outs outsb outsw outsd "
"pop popa popd popf popfd push pusha pushad pushf pushfd "
"rcl rcr rol roro rep repe repz repne repnz ret sahf sal "
"sar shl shr sbb scas scasb scasw scasd seta setae setb "
"setbe setc sete setg setge setl setle setna setnae setnb "
"setnbe setnc setne setng setnge setnl setnle setno setnp "
"setns setnz seto setp setpe setpo ses setz sgdt sidt shld "
"shrd sldt smsw stc std sti stos stosb stosw stosd str "
"test verr verw wait wbinvd xchg xlat xlatb xor add dec "
"idiv imul inc mul sub xadd div "
# MMX/SSE/SSE2 Instructions
"cflush cpuid emms femms cmovo cmovno cmovb cmovc cmovnae "
"cmovae cmovnb cmovnc cmove cmovz cmovne cmovnz cmovbe "
"cmovna cmova cmovnbe cmovs cmovns cmovp cmovpe cmovnp "
"cmovpo cmovl cmovnge cmovge cmovnl cmovle cmovng cmovg "
"cmovnle cmpxchg486 cmpxchg8b loadall loadall286 ibts "
"icebp int1 int3 int01 int03 iretw popaw popfw pushaw "
"pushfw rdmsr rdpmc rdshr rdtsc rsdc rsldt rsm rsts salc "
"smi smint smintold svdc svldt svts syscall sysenter "
"sysexit sysret ud0 ud1 ud2 umov xbts wrmsr wrshr")
# floating point instructions
MASM_FPU_INST = (1, "f2xm1 fabs fadd faddp fbld fbstp fchs fclex fcom fcomp "
"fcompp fdecstp fdisi fdiv fdivp fdivr fdivrp feni ffree "
"fiadd ficom ficomp fidiv fidivr fild fimul fincstp finit "
"fist fistp fisub fisubr fld fld1 fldcw fldenv fldenvw "
"fldl2e fldl2t fldlg2 fldln2 fldpi fldz fmul fmulp fnclex "
"fndisi fneni fninit fnop fnsave fnsavew fnstcw fnstenv "
"fnstenvw fnstsw fpatan fprem fptan frndint frstor frstorw "
"fsave fsavew fscale fsqrt fst fstcw fstenv fstenvw fstp "
"fstsw fsub fsubp fsubr fsubrp ftst fwait fxam fxch "
"fxtract fyl2x fyl2xp1 fsetpm fcos fldenvd fnsaved "
"fnstenvd fprem1 frstord fsaved fsin fsincos fstenvd fucom "
"fucomp fucompp fcomi fcomip ffreep fcmovb fcmove fcmovbe "
"fcmovu fcmovnb fcmovne fcmovnbe fcmovnu ")
MASM_REGISTERS = (2, "ah al ax bh bl bp bx ch cl cr0 cr2 cr3 cr4 cs cx dh di "
"dl dr0 dr1 dr2 dr3 dr6 dr7 ds dx eax ebp ebx ecx edi edx "
"es esi esp fs gs si sp ss st tr3 tr4 tr5 tr6 tr7 st0 st1 "
"st2 st3 st4 st5 st6 st7 mm0 mm1 mm2 mm3 mm4 mm5 mm6 mm7 "
"xmm0 xmm1 xmm2 xmm3 xmm4 xmm5 xmm6 xmm7")
MASM_DIRECTIVES = (3, ".186 .286 .286c .286p .287 .386 .386c .386p .387 .486 "
".486p .8086 .8087 .alpha .break .code .const .continue "
".cref .data .data? .dosseg .else .elseif .endif .endw "
".err .err1 .err2 .errb .errdef .errdif .errdifi .erre "
".erridn .erridni .errnb .errndef .errnz .exit .fardata "
".fardata? .if .lall .lfcond .list .listall .listif "
".listmacro .listmacroall .model .no87 .nocref .nolist "
".nolistif .nolistmacro .radix .repeat .sall .seq "
".sfcond .stack .startup .tfcond .type .until .untilcxz "
".while .xall .xcref .xlist alias align assume catstr "
"comm comment db dd df dosseg dq dt dup dw echo else "
"elseif elseif1 elseif2 elseifb elseifdef elseifdif "
"elseifdifi elseife elseifidn elseifidni elseifnb "
"elseifndef end endif endm endp ends eq equ even exitm "
"extern externdef extrn for forc ge goto group gt high "
"highword if if1 if2 ifb ifdef ifdif ifdifi ife ifidn "
"ifidni ifnb ifndef include includelib instr invoke irp "
"irpc label le length lengthof local low lowword "
"lroffset lt macro mask mod .msfloat name ne offset "
"opattr option org %out page popcontext proc proto ptr "
"public purge pushcontext record repeat rept seg segment "
"short size sizeof sizestr struc struct substr subtitle "
"subttl textequ this title type typedef union while "
"width")
MASM_DIREC_OP = (4, "$ ? @b @f addr basic byte c carry? dword far far16 "
"fortran fword near near16 overflow? parity? pascal qword "
"real4 real8 real10 sbyte sdword sign? stdcall sword "
"syscall tbyte vararg word zero? flat near32 far32 abs all "
"assumes at casemap common compact cpu dotname emulator "
"epilogue error export expr16 expr32 farstack flat "
"forceframe huge language large listing ljmp loadds m510 "
"medium memory nearstack nodotname noemulator nokeyword "
"noljmp nom510 none nonunique nooldmacros nooldstructs "
"noreadonly noscoped nosignextend nothing notpublic "
"oldmacros oldstructs os_dos para private prologue radix "
"readonly req scoped setif2 smallstack tiny use16 use32 "
"uses")
MASM_EXT_INST = (5, "addpd addps addsd addss andpd andps andnpd andnps cmpeqpd "
"cmpltpd cmplepd cmpunordpd cmpnepd cmpnltpd cmpnlepd "
"cmpordpd cmpeqps cmpltps cmpleps cmpunordps cmpneps "
"cmpnltps cmpnleps cmpordps cmpeqsd cmpltsd cmplesd "
"cmpunordsd cmpnesd cmpnltsd cmpnlesd cmpordsd cmpeqss "
"cmpltss cmpless cmpunordss cmpness cmpnltss cmpnless "
"cmpordss comisd comiss cvtdq2pd cvtdq2ps cvtpd2dq "
"cvtpd2pi cvtpd2ps cvtpi2pd cvtpi2ps cvtps2dq cvtps2pd "
"cvtps2pi cvtss2sd cvtss2si cvtsd2si cvtsd2ss cvtsi2sd "
"cvtsi2ss cvttpd2dq cvttpd2pi cvttps2dq cvttps2pi "
"cvttsd2si cvttss2si divpd divps divsd divss fxrstor "
"fxsave ldmxscr lfence mfence maskmovdqu maskmovdq maxpd "
"maxps paxsd maxss minpd minps minsd minss movapd movaps "
"movdq2q movdqa movdqu movhlps movhpd movhps movd movq "
"movlhps movlpd movlps movmskpd movmskps movntdq movnti "
"movntpd movntps movntq movq2dq movsd movss movupd movups "
"mulpd mulps mulsd mulss orpd orps packssdw packsswb "
"packuswb paddb paddsb paddw paddsw paddd paddsiw paddq "
"paddusb paddusw pand pandn pause paveb pavgb pavgw "
"pavgusb pdistib pextrw pcmpeqb pcmpeqw pcmpeqd pcmpgtb "
"pcmpgtw pcmpgtd pf2id pf2iw pfacc pfadd pfcmpeq pfcmpge "
"pfcmpgt pfmax pfmin pfmul pmachriw pmaddwd pmagw pmaxsw "
"pmaxub pminsw pminub pmovmskb pmulhrwc pmulhriw "
"pmulhrwa pmulhuw pmulhw pmullw pmuludq pmvzb pmvnzb "
"pmvlzb pmvgezb pfnacc pfpnacc por prefetch prefetchw "
"prefetchnta prefetcht0 prefetcht1 prefetcht2 pfrcp "
"pfrcpit1 pfrcpit2 pfrsqit1 pfrsqrt pfsub pfsubr pi2fd "
"pf2iw pinsrw psadbw pshufd pshufhw pshuflw pshufw psllw "
"pslld psllq pslldq psraw psrad psrlw psrld psrlq psrldq "
"psubb psubw psubd psubq psubsb psubsw psubusb psubusw "
"psubsiw pswapd punpckhbw punpckhwd punpckhdq punpckhqdq "
"punpcklbw punpcklwd punpckldq punpcklqdq pxor rcpps "
"rcpss rsqrtps rsqrtss sfence shufpd shufps sqrtpd sqrtps "
"sqrtsd sqrtss stmxcsr subpd subps subsd subss ucomisd "
"ucomiss unpckhpd unpckhps unpcklpd unpcklps xorpd xorps")
#---- Language Styling Specs ----#
SYNTAX_ITEMS = [ (stc.STC_ASM_DEFAULT, 'default_style'),
(stc.STC_ASM_CHARACTER, 'char_style'),
(stc.STC_ASM_COMMENT, 'comment_style'),
(stc.STC_ASM_COMMENTBLOCK, 'comment_style'),
(stc.STC_ASM_CPUINSTRUCTION, 'keyword_style'),
(stc.STC_ASM_DIRECTIVE, 'keyword3_style'),
(stc.STC_ASM_DIRECTIVEOPERAND, 'keyword4_style'),
(stc.STC_ASM_EXTINSTRUCTION, 'funct_style'),
(stc.STC_ASM_IDENTIFIER, 'default_style'),
(stc.STC_ASM_MATHINSTRUCTION, 'keyword_style'),
(stc.STC_ASM_NUMBER, 'number_style'),
(stc.STC_ASM_OPERATOR, 'operator_style'),
(stc.STC_ASM_REGISTER, 'keyword2_style'),
(stc.STC_ASM_STRING, 'string_style'),
(stc.STC_ASM_STRINGEOL, 'stringeol_style') ]
#-----------------------------------------------------------------------------#
class SyntaxData(syndata.SyntaxDataBase):
"""SyntaxData object for MASM"""
def __init__(self, langid):
super(SyntaxData, self).__init__(langid)
# Setup
self.SetLexer(stc.STC_LEX_ASM)
def GetKeywords(self):
"""Returns Specified Keywords List """
return [MASM_CPU_INST, MASM_FPU_INST, MASM_REGISTERS, MASM_DIRECTIVES,
MASM_DIREC_OP, MASM_EXT_INST]
def GetSyntaxSpec(self):
"""Syntax Specifications """
return SYNTAX_ITEMS
def GetCommentPattern(self):
"""Returns a list of characters used to comment a block of code """
return [u';'] | PypiClean |
/diffpy.pdfgui-2.0.0.tar.gz/diffpy.pdfgui-2.0.0/src/diffpy/pdfgui/doc/manual/extractEquations.py | # constants
rc = {
'directory' : 'images', # output directory
'resolution' : 72, # equation images resolution
'eqns' : [], # list of raw equation codes
'tmpdir' : None, # temporary directory
}
eqlatex = r"""
\documentclass{article}
\usepackage{exscale}
\pagestyle{empty}
\setlength{\oddsidemargin}{0in}
\setlength{\textwidth}{7in}
\begin{document}
\huge
%s
\end{document}
""".lstrip()
eqmark = "@EquationMark"
##############################################################################
# business
import sys
import os
import shutil
def loadEquations():
"""Search for equation codes preceded by @EquationMark macro.
Store equation codes in rc['eqns'].
"""
lines = []
for f in sys.argv[1:]:
fhandle = open(f)
lines.extend(fhandle.readlines())
# process all lines in sequence
atmark = False
attex = False
eqlines = []
for line in lines:
bareline = line.strip().rstrip('{}')
if bareline == eqmark:
atmark = True
continue
elif atmark and bareline == "@tex":
attex = True
continue
elif attex and bareline == "@end tex":
atmark = False
attex = False
eq = ''.join(eqlines) + '\n'
rc['eqns'].append(eq)
eqlines = []
elif attex:
eqlines.append(line)
return
def writePNGFiles():
from tempfile import mkdtemp
rc['tmpdir'] = mkdtemp()
rc['directory'] = os.path.abspath(rc['directory'])
neqn = len(rc['eqns'])
for i in range(neqn):
fname = "eq-%02i.tex" % (i + 1)
fpath = os.path.join(rc['tmpdir'], fname)
fhandle = open(fpath, 'w')
s = eqlatex % rc['eqns'][i]
fhandle.write(s)
fhandle.close()
convertToPNG(fpath)
pngsrc = fpath[:-4] + ".png"
pngdst = os.path.join(rc['directory'], fname[:-4] + ".png")
shutil.copyfile(pngsrc, pngdst)
return
def convertToPNG(texfile):
"""Compile texfile and convert it to PNG.
"""
os.chdir(os.path.dirname(texfile))
texbasename = os.path.splitext(os.path.basename(texfile))[0]
cmd = "latex --interaction nonstopmode %r" % texbasename
os.system(cmd) != 0 and sys.exit(1)
cmd = "dvips %r" % texbasename
os.system(cmd) != 0 and sys.exit(1)
psfilename = texbasename + ".ps"
bb = getBoundingBox(psfilename)
pgbb = getPageBoundingBox(psfilename)
pt2px = rc['resolution'] / 72.0
xpx = pt2px * bb[0]
ypx = pt2px * (pgbb[3] - bb[3])
wpx = pt2px * (bb[2] - bb[0])
hpx = pt2px * (bb[3] - bb[1])
geometry = "%ix%i+%i+%i" % (wpx, hpx, xpx, ypx)
pngfilename = texbasename + ".png"
cmd = "convert -strip -density %i %r -crop %s +repage %r" % \
(rc['resolution'], psfilename, geometry, pngfilename)
os.system(cmd) != 0 and sys.exit(1)
return
def getBoundingBox(psfilename):
"""Run ghostscript to obtain effective bounding box of psfilename.
Return a list of bounding box coordinates.
"""
cmd = "gs -dNOPAUSE -dBATCH -q -sDEVICE=bbox %r" % psfilename
# gs sends bbox output to stderr
i, o, e = os.popen3(cmd)
i.close()
out = o.read()
o.close()
out += e.read()
e.close()
bb = [ int(w) for w in out.split()[1:5] ]
return bb
def getPageBoundingBox(psfilename):
"""Obtain bounding box value defined in psfilename.
Return a list of bounding box coordinates.
"""
import re
with open(psfilename) as fp:
s = fp.read()
bbline = re.search('^%%BoundingBox: *(.*)$', s, re.M)
pgbb = [ int(w) for w in bbline.group(1).split()[:4] ]
return pgbb
def main():
loadEquations()
writePNGFiles()
shutil.rmtree(rc['tmpdir'])
if __name__ == "__main__":
main() | PypiClean |
/django-flower-1.0.0.tar.gz/django-flower-1.0.0/flower/views/tasks.py | from __future__ import absolute_import
import copy
import logging
from functools import total_ordering
from flower.utils import login_required_admin
from django.utils.decorators import method_decorator
from flower.exceptions import HTTPError
try:
from itertools import imap
except ImportError:
imap = map
from ..views import BaseHandler
from ..utils.tasks import iter_tasks, get_task_by_id, as_dict
logger = logging.getLogger(__name__)
class TaskView(BaseHandler):
@method_decorator(login_required_admin)
def get(self, request, task_id):
try:
task = get_task_by_id(self.settings.state, task_id)
except Exception:
raise HTTPError(404, "Unknown task '%s'" % task_id)
return self.render("flower/task.html", context={'task': task})
@total_ordering
class Comparable(object):
"""
Compare two objects, one or more of which may be None. If one of the
values is None, the other will be deemed greater.
"""
def __init__(self, value):
self.value = value
def __eq__(self, other):
return self.value == other.value
def __lt__(self, other):
try:
return self.value < other.value
except TypeError:
return self.value is None
class TasksDataTable(BaseHandler):
@method_decorator(login_required_admin)
def get(self, request):
state = self.settings.state
draw = self.get_argument('draw', type=int)
start = self.get_argument('start', type=int)
length = self.get_argument('length', type=int)
search = self.get_argument('search[value]', type=str)
column = self.get_argument('order[0][column]', type=int)
sort_by = self.get_argument('columns[%s][data]' % column, type=str)
sort_order = self.get_argument('order[0][dir]', type=str) == 'desc'
def key(item):
return Comparable(getattr(item[1], sort_by))
sorted_tasks = sorted(
iter_tasks(state, search=search),
key=key,
reverse=sort_order
)
filtered_tasks = []
for task in sorted_tasks[start:start + length]:
task_dict = as_dict(self.format_task(task)[1])
if task_dict.get('worker'):
task_dict['worker'] = task_dict['worker'].hostname
filtered_tasks.append(task_dict)
return self.write(dict(draw=draw, data=filtered_tasks,
recordsTotal=len(sorted_tasks),
recordsFiltered=len(sorted_tasks)))
@method_decorator(login_required_admin)
def post(self, request):
return self.get(request)
def format_task(self, args):
uuid, task = args
custom_format_task = self.settings.format_task
if custom_format_task:
try:
task = custom_format_task(copy.copy(task))
except:
logger.exception("Failed to format '%s' task", uuid)
return uuid, task
class TasksView(BaseHandler):
@method_decorator(login_required_admin)
def get(self, request, *args, **kwargs):
settings = self.settings
app = settings.app
time = 'natural-time' if settings.natural_time else 'time'
if app.conf.CELERY_TIMEZONE:
time += '-' + str(app.conf.CELERY_TIMEZONE)
# state_config = self.get_argument("state", default=None)
context = dict(
tasks=[],
columns=settings.tasks_columns,
time=time,
)
return self.render("flower/tasks.html", context=context) | PypiClean |
/MetaGram-2.0.2.tar.gz/MetaGram-2.0.2/pyrogram/types/bots_and_keyboards/login_url.py |
from pyrogram import raw
from ..object import Object
class LoginUrl(Object):
"""Represents a parameter of the inline keyboard button used to automatically authorize a user.
Serves as a great replacement for the Telegram Login Widget when the user is coming from Telegram.
All the user needs to do is tap/click a button and confirm that they want to log in.
Parameters:
url (``str``):
An HTTP URL to be opened with user authorization data added to the query string when the button is pressed.
If the user refuses to provide authorization data, the original URL without information about the user will
be opened. The data added is the same as described in
`Receiving authorization data <https://core.telegram.org/widgets/login#receiving-authorization-data>`.
**NOTE**: You **must** always check the hash of the received data to verify the authentication and the
integrity of the data as described in
`Checking authorization <https://core.telegram.org/widgets/login#checking-authorization>`_.
forward_text (``str``, *optional*):
New text of the button in forwarded messages.
bot_username (``str``, *optional*):
Username of a bot, which will be used for user authorization.
See `Setting up a bot <https://core.telegram.org/widgets/login#setting-up-a-bot>`_ for more details.
If not specified, the current bot's username will be assumed. The url's domain must be the same as the
domain linked with the bot.
See `Linking your domain to the bot <https://core.telegram.org/widgets/login#linking-your-domain-to-the-bot>`_
for more details.
request_write_access (``str``, *optional*):
Pass True to request the permission for your bot to send messages to the user.
button_id (``int``):
Button identifier.
"""
def __init__(
self, *,
url: str,
forward_text: str = None,
bot_username: str = None,
request_write_access: str = None,
button_id: int = None
):
super().__init__()
self.url = url
self.forward_text = forward_text
self.bot_username = bot_username
self.request_write_access = request_write_access
self.button_id = button_id
@staticmethod
def read(b: "raw.types.KeyboardButtonUrlAuth") -> "LoginUrl":
return LoginUrl(
url=b.url,
forward_text=b.fwd_text,
button_id=b.button_id
)
def write(self, text: str, bot: "raw.types.InputUser"):
return raw.types.InputKeyboardButtonUrlAuth(
text=text,
url=self.url,
bot=bot,
fwd_text=self.forward_text,
request_write_access=self.request_write_access
) | PypiClean |
/ka-lite-0.17.6b4.tar.gz/ka-lite-0.17.6b4/kalite/distributed/static/js/distributed/perseus/ke/local-only/localeplanet/icu.zh-MO.js | (function() {
var dfs = {"am_pm":["上午","下午"],"day_name":["星期日","星期一","星期二","星期三","星期四","星期五","星期六"],"day_short":["週日","週一","週二","週三","週四","週五","週六"],"era":["西元前","西元"],"era_name":["西元前","西元"],"month_name":["1月","2月","3月","4月","5月","6月","7月","8月","9月","10月","11月","12月"],"month_short":["1月","2月","3月","4月","5月","6月","7月","8月","9月","10月","11月","12月"],"order_full":"YMD","order_long":"YMD","order_medium":"YMD","order_short":"YMD"};
var nfs = {"decimal_separator":".","grouping_separator":",","minus":"-"};
var df = {SHORT_PADDED_CENTURY:function(d){if(d){return(d.getFullYear()+'-'+((d.getMonth()+101)+'').substring(1)+'-'+((d.getDate()+101)+'').substring(1));}},SHORT:function(d){if(d){return((d.getFullYear()+'').substring(2)+'-'+(d.getMonth()+1)+'-'+d.getDate());}},SHORT_NOYEAR:function(d){if(d){return((d.getMonth()+1)+'-'+d.getDate());}},SHORT_NODAY:function(d){if(d){return((d.getFullYear()+'').substring(2)+'-'+(d.getMonth()+1));}},MEDIUM:function(d){if(d){return(d.getFullYear()+'-'+(d.getMonth()+1)+'-'+d.getDate());}},MEDIUM_NOYEAR:function(d){if(d){return((d.getMonth()+1)+'-'+d.getDate());}},MEDIUM_WEEKDAY_NOYEAR:function(d){if(d){return(dfs.day_short[d.getDay()]+' '+(d.getMonth()+1)+'-'+d.getDate());}},LONG_NODAY:function(d){if(d){return(d.getFullYear()+'年'+(d.getMonth()+1));}},LONG:function(d){if(d){return(d.getFullYear()+'年'+(d.getMonth()+1)+'月'+d.getDate()+'日');}},FULL:function(d){if(d){return(d.getFullYear()+'年'+(d.getMonth()+1)+'月'+d.getDate()+'日'+' '+dfs.day_name[d.getDay()]);}}};
window.icu = window.icu || new Object();
var icu = window.icu;
icu.getCountry = function() { return "MO" };
icu.getCountryName = function() { return "中華人民共和國澳門特別行政區" };
icu.getDateFormat = function(formatCode) { var retVal = {}; retVal.format = df[formatCode]; return retVal; };
icu.getDateFormats = function() { return df; };
icu.getDateFormatSymbols = function() { return dfs; };
icu.getDecimalFormat = function(places) { var retVal = {}; retVal.format = function(n) { var ns = n < 0 ? Math.abs(n).toFixed(places) : n.toFixed(places); var ns2 = ns.split('.'); s = ns2[0]; var d = ns2[1]; var rgx = /(\d+)(\d{3})/;while(rgx.test(s)){s = s.replace(rgx, '$1' + nfs["grouping_separator"] + '$2');} return (n < 0 ? nfs["minus"] : "") + s + nfs["decimal_separator"] + d;}; return retVal; };
icu.getDecimalFormatSymbols = function() { return nfs; };
icu.getIntegerFormat = function() { var retVal = {}; retVal.format = function(i) { var s = i < 0 ? Math.abs(i).toString() : i.toString(); var rgx = /(\d+)(\d{3})/;while(rgx.test(s)){s = s.replace(rgx, '$1' + nfs["grouping_separator"] + '$2');} return i < 0 ? nfs["minus"] + s : s;}; return retVal; };
icu.getLanguage = function() { return "zh" };
icu.getLanguageName = function() { return "中文" };
icu.getLocale = function() { return "null" };
icu.getLocaleName = function() { return "中文(中華人民共和國澳門特別行政區)" };
})(); | PypiClean |
/fds.sdk.ExchangeDataFeedSnapshotAPIEntireExchange-0.21.7-py3-none-any.whl/fds/sdk/ExchangeDataFeedSnapshotAPIEntireExchange/model_utils.py | from datetime import date, datetime # noqa: F401
from copy import deepcopy
import inspect
import io
import os
import pprint
import re
import tempfile
from dateutil.parser import parse
from fds.sdk.ExchangeDataFeedSnapshotAPIEntireExchange.exceptions import (
ApiKeyError,
ApiAttributeError,
ApiTypeError,
ApiValueError,
)
none_type = type(None)
file_type = io.IOBase
def convert_js_args_to_python_args(fn):
from functools import wraps
@wraps(fn)
def wrapped_init(_self, *args, **kwargs):
"""
An attribute named `self` received from the api will conflicts with the reserved `self`
parameter of a class method. During generation, `self` attributes are mapped
to `_self` in models. Here, we name `_self` instead of `self` to avoid conflicts.
"""
spec_property_naming = kwargs.get('_spec_property_naming', False)
if spec_property_naming:
kwargs = change_keys_js_to_python(kwargs, _self if isinstance(_self, type) else _self.__class__)
return fn(_self, *args, **kwargs)
return wrapped_init
class cached_property(object):
# this caches the result of the function call for fn with no inputs
# use this as a decorator on function methods that you want converted
# into cached properties
result_key = '_results'
def __init__(self, fn):
self._fn = fn
def __get__(self, instance, cls=None):
if self.result_key in vars(self):
return vars(self)[self.result_key]
else:
result = self._fn()
setattr(self, self.result_key, result)
return result
PRIMITIVE_TYPES = (list, float, int, bool, datetime, date, str, file_type)
def allows_single_value_input(cls):
"""
This function returns True if the input composed schema model or any
descendant model allows a value only input
This is true for cases where oneOf contains items like:
oneOf:
- float
- NumberWithValidation
- StringEnum
- ArrayModel
- null
TODO: lru_cache this
"""
if (
issubclass(cls, ModelSimple) or
cls in PRIMITIVE_TYPES
):
return True
elif issubclass(cls, ModelComposed):
if not cls._composed_schemas['oneOf']:
return False
return any(allows_single_value_input(c) for c in cls._composed_schemas['oneOf'])
return False
def composed_model_input_classes(cls):
"""
This function returns a list of the possible models that can be accepted as
inputs.
TODO: lru_cache this
"""
if issubclass(cls, ModelSimple) or cls in PRIMITIVE_TYPES:
return [cls]
elif issubclass(cls, ModelNormal):
if cls.discriminator is None:
return [cls]
else:
return get_discriminated_classes(cls)
elif issubclass(cls, ModelComposed):
if not cls._composed_schemas['oneOf']:
return []
if cls.discriminator is None:
input_classes = []
for c in cls._composed_schemas['oneOf']:
input_classes.extend(composed_model_input_classes(c))
return input_classes
else:
return get_discriminated_classes(cls)
return []
class OpenApiModel(object):
"""The base class for all OpenAPIModels"""
def set_attribute(self, name, value):
# this is only used to set properties on self
path_to_item = []
if self._path_to_item:
path_to_item.extend(self._path_to_item)
path_to_item.append(name)
if name in self.openapi_types:
required_types_mixed = self.openapi_types[name]
elif self.additional_properties_type is None:
raise ApiAttributeError(
"{0} has no attribute '{1}'".format(
type(self).__name__, name),
path_to_item
)
elif self.additional_properties_type is not None:
required_types_mixed = self.additional_properties_type
if get_simple_class(name) != str:
error_msg = type_error_message(
var_name=name,
var_value=name,
valid_classes=(str,),
key_type=True
)
raise ApiTypeError(
error_msg,
path_to_item=path_to_item,
valid_classes=(str,),
key_type=True
)
if self._check_type:
value = validate_and_convert_types(
value, required_types_mixed, path_to_item, self._spec_property_naming,
self._check_type, configuration=self._configuration)
if (name,) in self.allowed_values:
check_allowed_values(
self.allowed_values,
(name,),
value
)
if (name,) in self.validations:
check_validations(
self.validations,
(name,),
value,
self._configuration
)
self.__dict__['_data_store'][name] = value
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
def __setattr__(self, attr, value):
"""set the value of an attribute using dot notation: `instance.attr = val`"""
self[attr] = value
def __getattr__(self, attr):
"""get the value of an attribute using dot notation: `instance.attr`"""
return self.__getitem__(attr)
def __copy__(self):
cls = self.__class__
if self.get("_spec_property_naming", False):
return cls._new_from_openapi_data(**self.__dict__)
else:
return new_cls.__new__(cls, **self.__dict__)
def __deepcopy__(self, memo):
cls = self.__class__
if self.get("_spec_property_naming", False):
new_inst = cls._new_from_openapi_data()
else:
new_inst = cls.__new__(cls)
for k, v in self.__dict__.items():
setattr(new_inst, k, deepcopy(v, memo))
return new_inst
def __new__(cls, *args, **kwargs):
# this function uses the discriminator to
# pick a new schema/class to instantiate because a discriminator
# propertyName value was passed in
if len(args) == 1:
arg = args[0]
if arg is None and is_type_nullable(cls):
# The input data is the 'null' value and the type is nullable.
return None
if issubclass(cls, ModelComposed) and allows_single_value_input(cls):
model_kwargs = {}
oneof_instance = get_oneof_instance(cls, model_kwargs, kwargs, model_arg=arg)
return oneof_instance
visited_composed_classes = kwargs.get('_visited_composed_classes', ())
if (
cls.discriminator is None or
cls in visited_composed_classes
):
# Use case 1: this openapi schema (cls) does not have a discriminator
# Use case 2: we have already visited this class before and are sure that we
# want to instantiate it this time. We have visited this class deserializing
# a payload with a discriminator. During that process we traveled through
# this class but did not make an instance of it. Now we are making an
# instance of a composed class which contains cls in it, so this time make an instance of cls.
#
# Here's an example of use case 2: If Animal has a discriminator
# petType and we pass in "Dog", and the class Dog
# allOf includes Animal, we move through Animal
# once using the discriminator, and pick Dog.
# Then in the composed schema dog Dog, we will make an instance of the
# Animal class (because Dal has allOf: Animal) but this time we won't travel
# through Animal's discriminator because we passed in
# _visited_composed_classes = (Animal,)
return super(OpenApiModel, cls).__new__(cls)
# Get the name and value of the discriminator property.
# The discriminator name is obtained from the discriminator meta-data
# and the discriminator value is obtained from the input data.
discr_propertyname_py = list(cls.discriminator.keys())[0]
discr_propertyname_js = cls.attribute_map[discr_propertyname_py]
if discr_propertyname_js in kwargs:
discr_value = kwargs[discr_propertyname_js]
elif discr_propertyname_py in kwargs:
discr_value = kwargs[discr_propertyname_py]
else:
# The input data does not contain the discriminator property.
path_to_item = kwargs.get('_path_to_item', ())
raise ApiValueError(
"Cannot deserialize input data due to missing discriminator. "
"The discriminator property '%s' is missing at path: %s" %
(discr_propertyname_js, path_to_item)
)
# Implementation note: the last argument to get_discriminator_class
# is a list of visited classes. get_discriminator_class may recursively
# call itself and update the list of visited classes, and the initial
# value must be an empty list. Hence not using 'visited_composed_classes'
new_cls = get_discriminator_class(
cls, discr_propertyname_py, discr_value, [])
if new_cls is None:
path_to_item = kwargs.get('_path_to_item', ())
disc_prop_value = kwargs.get(
discr_propertyname_js, kwargs.get(discr_propertyname_py))
raise ApiValueError(
"Cannot deserialize input data due to invalid discriminator "
"value. The OpenAPI document has no mapping for discriminator "
"property '%s'='%s' at path: %s" %
(discr_propertyname_js, disc_prop_value, path_to_item)
)
if new_cls in visited_composed_classes:
# if we are making an instance of a composed schema Descendent
# which allOf includes Ancestor, then Ancestor contains
# a discriminator that includes Descendent.
# So if we make an instance of Descendent, we have to make an
# instance of Ancestor to hold the allOf properties.
# This code detects that use case and makes the instance of Ancestor
# For example:
# When making an instance of Dog, _visited_composed_classes = (Dog,)
# then we make an instance of Animal to include in dog._composed_instances
# so when we are here, cls is Animal
# cls.discriminator != None
# cls not in _visited_composed_classes
# new_cls = Dog
# but we know we know that we already have Dog
# because it is in visited_composed_classes
# so make Animal here
return super(OpenApiModel, cls).__new__(cls)
# Build a list containing all oneOf and anyOf descendants.
oneof_anyof_classes = None
if cls._composed_schemas is not None:
oneof_anyof_classes = (
cls._composed_schemas.get('oneOf', ()) +
cls._composed_schemas.get('anyOf', ()))
oneof_anyof_child = new_cls in oneof_anyof_classes
kwargs['_visited_composed_classes'] = visited_composed_classes + (cls,)
if cls._composed_schemas.get('allOf') and oneof_anyof_child:
# Validate that we can make self because when we make the
# new_cls it will not include the allOf validations in self
self_inst = super(OpenApiModel, cls).__new__(cls)
self_inst.__init__(*args, **kwargs)
if kwargs.get("_spec_property_naming", False):
# when true, implies new is from deserialization
new_inst = new_cls._new_from_openapi_data(*args, **kwargs)
else:
new_inst = new_cls.__new__(new_cls, *args, **kwargs)
new_inst.__init__(*args, **kwargs)
return new_inst
@classmethod
@convert_js_args_to_python_args
def _new_from_openapi_data(cls, *args, **kwargs):
# this function uses the discriminator to
# pick a new schema/class to instantiate because a discriminator
# propertyName value was passed in
if len(args) == 1:
arg = args[0]
if arg is None and is_type_nullable(cls):
# The input data is the 'null' value and the type is nullable.
return None
if issubclass(cls, ModelComposed) and allows_single_value_input(cls):
model_kwargs = {}
oneof_instance = get_oneof_instance(cls, model_kwargs, kwargs, model_arg=arg)
return oneof_instance
visited_composed_classes = kwargs.get('_visited_composed_classes', ())
if (
cls.discriminator is None or
cls in visited_composed_classes
):
# Use case 1: this openapi schema (cls) does not have a discriminator
# Use case 2: we have already visited this class before and are sure that we
# want to instantiate it this time. We have visited this class deserializing
# a payload with a discriminator. During that process we traveled through
# this class but did not make an instance of it. Now we are making an
# instance of a composed class which contains cls in it, so this time make an instance of cls.
#
# Here's an example of use case 2: If Animal has a discriminator
# petType and we pass in "Dog", and the class Dog
# allOf includes Animal, we move through Animal
# once using the discriminator, and pick Dog.
# Then in the composed schema dog Dog, we will make an instance of the
# Animal class (because Dal has allOf: Animal) but this time we won't travel
# through Animal's discriminator because we passed in
# _visited_composed_classes = (Animal,)
return cls._from_openapi_data(*args, **kwargs)
# Get the name and value of the discriminator property.
# The discriminator name is obtained from the discriminator meta-data
# and the discriminator value is obtained from the input data.
discr_propertyname_py = list(cls.discriminator.keys())[0]
discr_propertyname_js = cls.attribute_map[discr_propertyname_py]
if discr_propertyname_js in kwargs:
discr_value = kwargs[discr_propertyname_js]
elif discr_propertyname_py in kwargs:
discr_value = kwargs[discr_propertyname_py]
else:
# The input data does not contain the discriminator property.
path_to_item = kwargs.get('_path_to_item', ())
raise ApiValueError(
"Cannot deserialize input data due to missing discriminator. "
"The discriminator property '%s' is missing at path: %s" %
(discr_propertyname_js, path_to_item)
)
# Implementation note: the last argument to get_discriminator_class
# is a list of visited classes. get_discriminator_class may recursively
# call itself and update the list of visited classes, and the initial
# value must be an empty list. Hence not using 'visited_composed_classes'
new_cls = get_discriminator_class(
cls, discr_propertyname_py, discr_value, [])
if new_cls is None:
path_to_item = kwargs.get('_path_to_item', ())
disc_prop_value = kwargs.get(
discr_propertyname_js, kwargs.get(discr_propertyname_py))
raise ApiValueError(
"Cannot deserialize input data due to invalid discriminator "
"value. The OpenAPI document has no mapping for discriminator "
"property '%s'='%s' at path: %s" %
(discr_propertyname_js, disc_prop_value, path_to_item)
)
if new_cls in visited_composed_classes:
# if we are making an instance of a composed schema Descendent
# which allOf includes Ancestor, then Ancestor contains
# a discriminator that includes Descendent.
# So if we make an instance of Descendent, we have to make an
# instance of Ancestor to hold the allOf properties.
# This code detects that use case and makes the instance of Ancestor
# For example:
# When making an instance of Dog, _visited_composed_classes = (Dog,)
# then we make an instance of Animal to include in dog._composed_instances
# so when we are here, cls is Animal
# cls.discriminator != None
# cls not in _visited_composed_classes
# new_cls = Dog
# but we know we know that we already have Dog
# because it is in visited_composed_classes
# so make Animal here
return cls._from_openapi_data(*args, **kwargs)
# Build a list containing all oneOf and anyOf descendants.
oneof_anyof_classes = None
if cls._composed_schemas is not None:
oneof_anyof_classes = (
cls._composed_schemas.get('oneOf', ()) +
cls._composed_schemas.get('anyOf', ()))
oneof_anyof_child = new_cls in oneof_anyof_classes
kwargs['_visited_composed_classes'] = visited_composed_classes + (cls,)
if cls._composed_schemas.get('allOf') and oneof_anyof_child:
# Validate that we can make self because when we make the
# new_cls it will not include the allOf validations in self
self_inst = cls._from_openapi_data(*args, **kwargs)
new_inst = new_cls._new_from_openapi_data(*args, **kwargs)
return new_inst
class ModelSimple(OpenApiModel):
"""the parent class of models whose type != object in their
swagger/openapi"""
def __setitem__(self, name, value):
"""set the value of an attribute using square-bracket notation: `instance[attr] = val`"""
if name in self.required_properties:
self.__dict__[name] = value
return
self.set_attribute(name, value)
def get(self, name, default=None):
"""returns the value of an attribute or some default value if the attribute was not set"""
if name in self.required_properties:
return self.__dict__[name]
return self.__dict__['_data_store'].get(name, default)
def __getitem__(self, name):
"""get the value of an attribute using square-bracket notation: `instance[attr]`"""
if name in self:
return self.get(name)
raise ApiAttributeError(
"{0} has no attribute '{1}'".format(
type(self).__name__, name),
[e for e in [self._path_to_item, name] if e]
)
def __contains__(self, name):
"""used by `in` operator to check if an attribute value was set in an instance: `'attr' in instance`"""
if name in self.required_properties:
return name in self.__dict__
return name in self.__dict__['_data_store']
def to_str(self):
"""Returns the string representation of the model"""
return str(self.value)
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, self.__class__):
return False
this_val = self._data_store['value']
that_val = other._data_store['value']
types = set()
types.add(this_val.__class__)
types.add(that_val.__class__)
vals_equal = this_val == that_val
return vals_equal
class ModelNormal(OpenApiModel):
"""the parent class of models whose type == object in their
swagger/openapi"""
def __setitem__(self, name, value):
"""set the value of an attribute using square-bracket notation: `instance[attr] = val`"""
if name in self.required_properties:
self.__dict__[name] = value
return
self.set_attribute(name, value)
def get(self, name, default=None):
"""returns the value of an attribute or some default value if the attribute was not set"""
if name in self.required_properties:
return self.__dict__[name]
return self.__dict__['_data_store'].get(name, default)
def __getitem__(self, name):
"""get the value of an attribute using square-bracket notation: `instance[attr]`"""
if name in self:
return self.get(name)
raise ApiAttributeError(
"{0} has no attribute '{1}'".format(
type(self).__name__, name),
[e for e in [self._path_to_item, name] if e]
)
def __contains__(self, name):
"""used by `in` operator to check if an attribute value was set in an instance: `'attr' in instance`"""
if name in self.required_properties:
return name in self.__dict__
return name in self.__dict__['_data_store']
def to_dict(self):
"""Returns the model properties as a dict"""
return model_to_dict(self, serialize=False)
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, self.__class__):
return False
if not set(self._data_store.keys()) == set(other._data_store.keys()):
return False
for _var_name, this_val in self._data_store.items():
that_val = other._data_store[_var_name]
types = set()
types.add(this_val.__class__)
types.add(that_val.__class__)
vals_equal = this_val == that_val
if not vals_equal:
return False
return True
class ModelComposed(OpenApiModel):
"""the parent class of models whose type == object in their
swagger/openapi and have oneOf/allOf/anyOf
When one sets a property we use var_name_to_model_instances to store the value in
the correct class instances + run any type checking + validation code.
When one gets a property we use var_name_to_model_instances to get the value
from the correct class instances.
This allows multiple composed schemas to contain the same property with additive
constraints on the value.
_composed_schemas (dict) stores the anyOf/allOf/oneOf classes
key (str): allOf/oneOf/anyOf
value (list): the classes in the XOf definition.
Note: none_type can be included when the openapi document version >= 3.1.0
_composed_instances (list): stores a list of instances of the composed schemas
defined in _composed_schemas. When properties are accessed in the self instance,
they are returned from the self._data_store or the data stores in the instances
in self._composed_schemas
_var_name_to_model_instances (dict): maps between a variable name on self and
the composed instances (self included) which contain that data
key (str): property name
value (list): list of class instances, self or instances in _composed_instances
which contain the value that the key is referring to.
"""
def __setitem__(self, name, value):
"""set the value of an attribute using square-bracket notation: `instance[attr] = val`"""
if name in self.required_properties:
self.__dict__[name] = value
return
"""
Use cases:
1. additional_properties_type is None (additionalProperties == False in spec)
Check for property presence in self.openapi_types
if not present then throw an error
if present set in self, set attribute
always set on composed schemas
2. additional_properties_type exists
set attribute on self
always set on composed schemas
"""
if self.additional_properties_type is None:
"""
For an attribute to exist on a composed schema it must:
- fulfill schema_requirements in the self composed schema not considering oneOf/anyOf/allOf schemas AND
- fulfill schema_requirements in each oneOf/anyOf/allOf schemas
schema_requirements:
For an attribute to exist on a schema it must:
- be present in properties at the schema OR
- have additionalProperties unset (defaults additionalProperties = any type) OR
- have additionalProperties set
"""
if name not in self.openapi_types:
raise ApiAttributeError(
"{0} has no attribute '{1}'".format(
type(self).__name__, name),
[e for e in [self._path_to_item, name] if e]
)
# attribute must be set on self and composed instances
self.set_attribute(name, value)
for model_instance in self._composed_instances:
setattr(model_instance, name, value)
if name not in self._var_name_to_model_instances:
# we assigned an additional property
self.__dict__['_var_name_to_model_instances'][name] = self._composed_instances + [self]
return None
__unset_attribute_value__ = object()
def get(self, name, default=None):
"""returns the value of an attribute or some default value if the attribute was not set"""
if name in self.required_properties:
return self.__dict__[name]
# get the attribute from the correct instance
model_instances = self._var_name_to_model_instances.get(name)
values = []
# A composed model stores self and child (oneof/anyOf/allOf) models under
# self._var_name_to_model_instances.
# Any property must exist in self and all model instances
# The value stored in all model instances must be the same
if model_instances:
for model_instance in model_instances:
if name in model_instance._data_store:
v = model_instance._data_store[name]
if v not in values:
values.append(v)
len_values = len(values)
if len_values == 0:
return default
elif len_values == 1:
return values[0]
elif len_values > 1:
raise ApiValueError(
"Values stored for property {0} in {1} differ when looking "
"at self and self's composed instances. All values must be "
"the same".format(name, type(self).__name__),
[e for e in [self._path_to_item, name] if e]
)
def __getitem__(self, name):
"""get the value of an attribute using square-bracket notation: `instance[attr]`"""
value = self.get(name, self.__unset_attribute_value__)
if value is self.__unset_attribute_value__:
raise ApiAttributeError(
"{0} has no attribute '{1}'".format(
type(self).__name__, name),
[e for e in [self._path_to_item, name] if e]
)
return value
def __contains__(self, name):
"""used by `in` operator to check if an attribute value was set in an instance: `'attr' in instance`"""
if name in self.required_properties:
return name in self.__dict__
model_instances = self._var_name_to_model_instances.get(
name, self._additional_properties_model_instances)
if model_instances:
for model_instance in model_instances:
if name in model_instance._data_store:
return True
return False
def to_dict(self):
"""Returns the model properties as a dict"""
return model_to_dict(self, serialize=False)
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, self.__class__):
return False
if not set(self._data_store.keys()) == set(other._data_store.keys()):
return False
for _var_name, this_val in self._data_store.items():
that_val = other._data_store[_var_name]
types = set()
types.add(this_val.__class__)
types.add(that_val.__class__)
vals_equal = this_val == that_val
if not vals_equal:
return False
return True
COERCION_INDEX_BY_TYPE = {
ModelComposed: 0,
ModelNormal: 1,
ModelSimple: 2,
none_type: 3, # The type of 'None'.
list: 4,
dict: 5,
float: 6,
int: 7,
bool: 8,
datetime: 9,
date: 10,
str: 11,
file_type: 12, # 'file_type' is an alias for the built-in 'file' or 'io.IOBase' type.
}
# these are used to limit what type conversions we try to do
# when we have a valid type already and we want to try converting
# to another type
UPCONVERSION_TYPE_PAIRS = (
(str, datetime),
(str, date),
(int, float), # A float may be serialized as an integer, e.g. '3' is a valid serialized float.
(list, ModelComposed),
(dict, ModelComposed),
(str, ModelComposed),
(int, ModelComposed),
(float, ModelComposed),
(list, ModelComposed),
(list, ModelNormal),
(dict, ModelNormal),
(str, ModelSimple),
(int, ModelSimple),
(float, ModelSimple),
(list, ModelSimple),
)
COERCIBLE_TYPE_PAIRS = {
False: ( # client instantiation of a model with client data
# (dict, ModelComposed),
# (list, ModelComposed),
# (dict, ModelNormal),
# (list, ModelNormal),
# (str, ModelSimple),
# (int, ModelSimple),
# (float, ModelSimple),
# (list, ModelSimple),
# (str, int),
# (str, float),
# (str, datetime),
# (str, date),
# (int, str),
# (float, str),
),
True: ( # server -> client data
(dict, ModelComposed),
(list, ModelComposed),
(dict, ModelNormal),
(list, ModelNormal),
(str, ModelSimple),
(int, ModelSimple),
(float, ModelSimple),
(list, ModelSimple),
# (str, int),
# (str, float),
(str, datetime),
(str, date),
# (int, str),
# (float, str),
(str, file_type)
),
}
def get_simple_class(input_value):
"""Returns an input_value's simple class that we will use for type checking
Python2:
float and int will return int, where int is the python3 int backport
str and unicode will return str, where str is the python3 str backport
Note: float and int ARE both instances of int backport
Note: str_py2 and unicode_py2 are NOT both instances of str backport
Args:
input_value (class/class_instance): the item for which we will return
the simple class
"""
if isinstance(input_value, type):
# input_value is a class
return input_value
elif isinstance(input_value, tuple):
return tuple
elif isinstance(input_value, list):
return list
elif isinstance(input_value, dict):
return dict
elif isinstance(input_value, none_type):
return none_type
elif isinstance(input_value, file_type):
return file_type
elif isinstance(input_value, bool):
# this must be higher than the int check because
# isinstance(True, int) == True
return bool
elif isinstance(input_value, int):
return int
elif isinstance(input_value, datetime):
# this must be higher than the date check because
# isinstance(datetime_instance, date) == True
return datetime
elif isinstance(input_value, date):
return date
elif isinstance(input_value, str):
return str
return type(input_value)
def check_allowed_values(allowed_values, input_variable_path, input_values):
"""Raises an exception if the input_values are not allowed
Args:
allowed_values (dict): the allowed_values dict
input_variable_path (tuple): the path to the input variable
input_values (list/str/int/float/date/datetime): the values that we
are checking to see if they are in allowed_values
"""
these_allowed_values = list(allowed_values[input_variable_path].values())
if (isinstance(input_values, list)
and not set(input_values).issubset(
set(these_allowed_values))):
invalid_values = ", ".join(
map(str, set(input_values) - set(these_allowed_values))),
raise ApiValueError(
"Invalid values for `%s` [%s], must be a subset of [%s]" %
(
input_variable_path[0],
invalid_values,
", ".join(map(str, these_allowed_values))
)
)
elif (isinstance(input_values, dict)
and not set(
input_values.keys()).issubset(set(these_allowed_values))):
invalid_values = ", ".join(
map(str, set(input_values.keys()) - set(these_allowed_values)))
raise ApiValueError(
"Invalid keys in `%s` [%s], must be a subset of [%s]" %
(
input_variable_path[0],
invalid_values,
", ".join(map(str, these_allowed_values))
)
)
elif (not isinstance(input_values, (list, dict))
and input_values not in these_allowed_values):
raise ApiValueError(
"Invalid value for `%s` (%s), must be one of %s" %
(
input_variable_path[0],
input_values,
these_allowed_values
)
)
def is_json_validation_enabled(schema_keyword, configuration=None):
"""Returns true if JSON schema validation is enabled for the specified
validation keyword. This can be used to skip JSON schema structural validation
as requested in the configuration.
Args:
schema_keyword (string): the name of a JSON schema validation keyword.
configuration (Configuration): the configuration class.
"""
return (configuration is None or
not hasattr(configuration, '_disabled_client_side_validations') or
schema_keyword not in configuration._disabled_client_side_validations)
def check_validations(
validations, input_variable_path, input_values,
configuration=None):
"""Raises an exception if the input_values are invalid
Args:
validations (dict): the validation dictionary.
input_variable_path (tuple): the path to the input variable.
input_values (list/str/int/float/date/datetime): the values that we
are checking.
configuration (Configuration): the configuration class.
"""
if input_values is None:
return
current_validations = validations[input_variable_path]
if (is_json_validation_enabled('multipleOf', configuration) and
'multiple_of' in current_validations and
isinstance(input_values, (int, float)) and
not (float(input_values) / current_validations['multiple_of']).is_integer()):
# Note 'multipleOf' will be as good as the floating point arithmetic.
raise ApiValueError(
"Invalid value for `%s`, value must be a multiple of "
"`%s`" % (
input_variable_path[0],
current_validations['multiple_of']
)
)
if (is_json_validation_enabled('maxLength', configuration) and
'max_length' in current_validations and
len(input_values) > current_validations['max_length']):
raise ApiValueError(
"Invalid value for `%s`, length must be less than or equal to "
"`%s`" % (
input_variable_path[0],
current_validations['max_length']
)
)
if (is_json_validation_enabled('minLength', configuration) and
'min_length' in current_validations and
len(input_values) < current_validations['min_length']):
raise ApiValueError(
"Invalid value for `%s`, length must be greater than or equal to "
"`%s`" % (
input_variable_path[0],
current_validations['min_length']
)
)
if (is_json_validation_enabled('maxItems', configuration) and
'max_items' in current_validations and
len(input_values) > current_validations['max_items']):
raise ApiValueError(
"Invalid value for `%s`, number of items must be less than or "
"equal to `%s`" % (
input_variable_path[0],
current_validations['max_items']
)
)
if (is_json_validation_enabled('minItems', configuration) and
'min_items' in current_validations and
len(input_values) < current_validations['min_items']):
raise ValueError(
"Invalid value for `%s`, number of items must be greater than or "
"equal to `%s`" % (
input_variable_path[0],
current_validations['min_items']
)
)
items = ('exclusive_maximum', 'inclusive_maximum', 'exclusive_minimum',
'inclusive_minimum')
if (any(item in current_validations for item in items)):
if isinstance(input_values, list):
max_val = max(input_values)
min_val = min(input_values)
elif isinstance(input_values, dict):
max_val = max(input_values.values())
min_val = min(input_values.values())
else:
max_val = input_values
min_val = input_values
if (is_json_validation_enabled('exclusiveMaximum', configuration) and
'exclusive_maximum' in current_validations and
max_val >= current_validations['exclusive_maximum']):
raise ApiValueError(
"Invalid value for `%s`, must be a value less than `%s`" % (
input_variable_path[0],
current_validations['exclusive_maximum']
)
)
if (is_json_validation_enabled('maximum', configuration) and
'inclusive_maximum' in current_validations and
max_val > current_validations['inclusive_maximum']):
raise ApiValueError(
"Invalid value for `%s`, must be a value less than or equal to "
"`%s`" % (
input_variable_path[0],
current_validations['inclusive_maximum']
)
)
if (is_json_validation_enabled('exclusiveMinimum', configuration) and
'exclusive_minimum' in current_validations and
min_val <= current_validations['exclusive_minimum']):
raise ApiValueError(
"Invalid value for `%s`, must be a value greater than `%s`" %
(
input_variable_path[0],
current_validations['exclusive_maximum']
)
)
if (is_json_validation_enabled('minimum', configuration) and
'inclusive_minimum' in current_validations and
min_val < current_validations['inclusive_minimum']):
raise ApiValueError(
"Invalid value for `%s`, must be a value greater than or equal "
"to `%s`" % (
input_variable_path[0],
current_validations['inclusive_minimum']
)
)
flags = current_validations.get('regex', {}).get('flags', 0)
if (is_json_validation_enabled('pattern', configuration) and
'regex' in current_validations and
not re.search(current_validations['regex']['pattern'],
input_values, flags=flags)):
err_msg = r"Invalid value for `%s`, must match regular expression `%s`" % (
input_variable_path[0],
current_validations['regex']['pattern']
)
if flags != 0:
# Don't print the regex flags if the flags are not
# specified in the OAS document.
err_msg = r"%s with flags=`%s`" % (err_msg, flags)
raise ApiValueError(err_msg)
def order_response_types(required_types):
"""Returns the required types sorted in coercion order
Args:
required_types (list/tuple): collection of classes or instance of
list or dict with class information inside it.
Returns:
(list): coercion order sorted collection of classes or instance
of list or dict with class information inside it.
"""
def index_getter(class_or_instance):
if isinstance(class_or_instance, list):
return COERCION_INDEX_BY_TYPE[list]
elif isinstance(class_or_instance, dict):
return COERCION_INDEX_BY_TYPE[dict]
elif (inspect.isclass(class_or_instance)
and issubclass(class_or_instance, ModelComposed)):
return COERCION_INDEX_BY_TYPE[ModelComposed]
elif (inspect.isclass(class_or_instance)
and issubclass(class_or_instance, ModelNormal)):
return COERCION_INDEX_BY_TYPE[ModelNormal]
elif (inspect.isclass(class_or_instance)
and issubclass(class_or_instance, ModelSimple)):
return COERCION_INDEX_BY_TYPE[ModelSimple]
elif class_or_instance in COERCION_INDEX_BY_TYPE:
return COERCION_INDEX_BY_TYPE[class_or_instance]
raise ApiValueError("Unsupported type: %s" % class_or_instance)
sorted_types = sorted(
required_types,
key=lambda class_or_instance: index_getter(class_or_instance)
)
return sorted_types
def remove_uncoercible(required_types_classes, current_item, spec_property_naming,
must_convert=True):
"""Only keeps the type conversions that are possible
Args:
required_types_classes (tuple): tuple of classes that are required
these should be ordered by COERCION_INDEX_BY_TYPE
spec_property_naming (bool): True if the variable names in the input
data are serialized names as specified in the OpenAPI document.
False if the variables names in the input data are python
variable names in PEP-8 snake case.
current_item (any): the current item (input data) to be converted
Keyword Args:
must_convert (bool): if True the item to convert is of the wrong
type and we want a big list of coercibles
if False, we want a limited list of coercibles
Returns:
(list): the remaining coercible required types, classes only
"""
current_type_simple = get_simple_class(current_item)
results_classes = []
for required_type_class in required_types_classes:
# convert our models to OpenApiModel
required_type_class_simplified = required_type_class
if isinstance(required_type_class_simplified, type):
if issubclass(required_type_class_simplified, ModelComposed):
required_type_class_simplified = ModelComposed
elif issubclass(required_type_class_simplified, ModelNormal):
required_type_class_simplified = ModelNormal
elif issubclass(required_type_class_simplified, ModelSimple):
required_type_class_simplified = ModelSimple
if required_type_class_simplified == current_type_simple:
# don't consider converting to one's own class
continue
class_pair = (current_type_simple, required_type_class_simplified)
if must_convert and class_pair in COERCIBLE_TYPE_PAIRS[spec_property_naming]:
results_classes.append(required_type_class)
elif class_pair in UPCONVERSION_TYPE_PAIRS:
results_classes.append(required_type_class)
return results_classes
def get_discriminated_classes(cls):
"""
Returns all the classes that a discriminator converts to
TODO: lru_cache this
"""
possible_classes = []
key = list(cls.discriminator.keys())[0]
if is_type_nullable(cls):
possible_classes.append(cls)
for discr_cls in cls.discriminator[key].values():
if hasattr(discr_cls, 'discriminator') and discr_cls.discriminator is not None:
possible_classes.extend(get_discriminated_classes(discr_cls))
else:
possible_classes.append(discr_cls)
return possible_classes
def get_possible_classes(cls, from_server_context):
# TODO: lru_cache this
possible_classes = [cls]
if from_server_context:
return possible_classes
if hasattr(cls, 'discriminator') and cls.discriminator is not None:
possible_classes = []
possible_classes.extend(get_discriminated_classes(cls))
elif issubclass(cls, ModelComposed):
possible_classes.extend(composed_model_input_classes(cls))
return possible_classes
def get_required_type_classes(required_types_mixed, spec_property_naming):
"""Converts the tuple required_types into a tuple and a dict described
below
Args:
required_types_mixed (tuple/list): will contain either classes or
instance of list or dict
spec_property_naming (bool): if True these values came from the
server, and we use the data types in our endpoints.
If False, we are client side and we need to include
oneOf and discriminator classes inside the data types in our endpoints
Returns:
(valid_classes, dict_valid_class_to_child_types_mixed):
valid_classes (tuple): the valid classes that the current item
should be
dict_valid_class_to_child_types_mixed (dict):
valid_class (class): this is the key
child_types_mixed (list/dict/tuple): describes the valid child
types
"""
valid_classes = []
child_req_types_by_current_type = {}
for required_type in required_types_mixed:
if isinstance(required_type, list):
valid_classes.append(list)
child_req_types_by_current_type[list] = required_type
elif isinstance(required_type, tuple):
valid_classes.append(tuple)
child_req_types_by_current_type[tuple] = required_type
elif isinstance(required_type, dict):
valid_classes.append(dict)
child_req_types_by_current_type[dict] = required_type[str]
else:
valid_classes.extend(get_possible_classes(required_type, spec_property_naming))
return tuple(valid_classes), child_req_types_by_current_type
def change_keys_js_to_python(input_dict, model_class):
"""
Converts from javascript_key keys in the input_dict to python_keys in
the output dict using the mapping in model_class.
If the input_dict contains a key which does not declared in the model_class,
the key is added to the output dict as is. The assumption is the model_class
may have undeclared properties (additionalProperties attribute in the OAS
document).
"""
if getattr(model_class, 'attribute_map', None) is None:
return input_dict
output_dict = {}
reversed_attr_map = {value: key for key, value in
model_class.attribute_map.items()}
for javascript_key, value in input_dict.items():
python_key = reversed_attr_map.get(javascript_key)
if python_key is None:
# if the key is unknown, it is in error or it is an
# additionalProperties variable
python_key = javascript_key
output_dict[python_key] = value
return output_dict
def get_type_error(var_value, path_to_item, valid_classes, key_type=False):
error_msg = type_error_message(
var_name=path_to_item[-1],
var_value=var_value,
valid_classes=valid_classes,
key_type=key_type
)
return ApiTypeError(
error_msg,
path_to_item=path_to_item,
valid_classes=valid_classes,
key_type=key_type
)
def deserialize_primitive(data, klass, path_to_item):
"""Deserializes string to primitive type.
:param data: str/int/float
:param klass: str/class the class to convert to
:return: int, float, str, bool, date, datetime
"""
additional_message = ""
try:
if klass in {datetime, date}:
additional_message = (
"If you need your parameter to have a fallback "
"string value, please set its type as `type: {}` in your "
"spec. That allows the value to be any type. "
)
if klass == datetime:
if len(data) < 8:
raise ValueError("This is not a datetime")
# The string should be in iso8601 datetime format.
parsed_datetime = parse(data)
date_only = (
parsed_datetime.hour == 0 and
parsed_datetime.minute == 0 and
parsed_datetime.second == 0 and
parsed_datetime.tzinfo is None and
8 <= len(data) <= 10
)
if date_only:
raise ValueError("This is a date, not a datetime")
return parsed_datetime
elif klass == date:
if len(data) < 8:
raise ValueError("This is not a date")
return parse(data).date()
else:
converted_value = klass(data)
if isinstance(data, str) and klass == float:
if str(converted_value) != data:
# '7' -> 7.0 -> '7.0' != '7'
raise ValueError('This is not a float')
return converted_value
except (OverflowError, ValueError) as ex:
# parse can raise OverflowError
raise ApiValueError(
"{0}Failed to parse {1} as {2}".format(
additional_message, repr(data), klass.__name__
),
path_to_item=path_to_item
) from ex
def get_discriminator_class(model_class,
discr_name,
discr_value, cls_visited):
"""Returns the child class specified by the discriminator.
Args:
model_class (OpenApiModel): the model class.
discr_name (string): the name of the discriminator property.
discr_value (any): the discriminator value.
cls_visited (list): list of model classes that have been visited.
Used to determine the discriminator class without
visiting circular references indefinitely.
Returns:
used_model_class (class/None): the chosen child class that will be used
to deserialize the data, for example dog.Dog.
If a class is not found, None is returned.
"""
if model_class in cls_visited:
# The class has already been visited and no suitable class was found.
return None
cls_visited.append(model_class)
used_model_class = None
if discr_name in model_class.discriminator:
class_name_to_discr_class = model_class.discriminator[discr_name]
used_model_class = class_name_to_discr_class.get(discr_value)
if used_model_class is None:
# We didn't find a discriminated class in class_name_to_discr_class.
# So look in the ancestor or descendant discriminators
# The discriminator mapping may exist in a descendant (anyOf, oneOf)
# or ancestor (allOf).
# Ancestor example: in the GrandparentAnimal -> ParentPet -> ChildCat
# hierarchy, the discriminator mappings may be defined at any level
# in the hierarchy.
# Descendant example: mammal -> whale/zebra/Pig -> BasquePig/DanishPig
# if we try to make BasquePig from mammal, we need to travel through
# the oneOf descendant discriminators to find BasquePig
descendant_classes = model_class._composed_schemas.get('oneOf', ()) + \
model_class._composed_schemas.get('anyOf', ())
ancestor_classes = model_class._composed_schemas.get('allOf', ())
possible_classes = descendant_classes + ancestor_classes
for cls in possible_classes:
# Check if the schema has inherited discriminators.
if hasattr(cls, 'discriminator') and cls.discriminator is not None:
used_model_class = get_discriminator_class(
cls, discr_name, discr_value, cls_visited)
if used_model_class is not None:
return used_model_class
return used_model_class
def deserialize_model(model_data, model_class, path_to_item, check_type,
configuration, spec_property_naming):
"""Deserializes model_data to model instance.
Args:
model_data (int/str/float/bool/none_type/list/dict): data to instantiate the model
model_class (OpenApiModel): the model class
path_to_item (list): path to the model in the received data
check_type (bool): whether to check the data tupe for the values in
the model
configuration (Configuration): the instance to use to convert files
spec_property_naming (bool): True if the variable names in the input
data are serialized names as specified in the OpenAPI document.
False if the variables names in the input data are python
variable names in PEP-8 snake case.
Returns:
model instance
Raise:
ApiTypeError
ApiValueError
ApiKeyError
"""
kw_args = dict(_check_type=check_type,
_path_to_item=path_to_item,
_configuration=configuration,
_spec_property_naming=spec_property_naming)
if issubclass(model_class, ModelSimple):
return model_class._new_from_openapi_data(model_data, **kw_args)
if isinstance(model_data, dict):
kw_args.update(model_data)
return model_class._new_from_openapi_data(**kw_args)
elif isinstance(model_data, PRIMITIVE_TYPES):
return model_class._new_from_openapi_data(model_data, **kw_args)
def deserialize_file(response_data, configuration, content_disposition=None):
"""Deserializes body to file
Saves response body into a file in a temporary folder,
using the filename from the `Content-Disposition` header if provided.
Args:
param response_data (str): the file data to write
configuration (Configuration): the instance to use to convert files
Keyword Args:
content_disposition (str): the value of the Content-Disposition
header
Returns:
(file_type): the deserialized file which is open
The user is responsible for closing and reading the file
"""
fd, path = tempfile.mkstemp(dir=configuration.temp_folder_path)
os.close(fd)
os.remove(path)
if content_disposition:
filename = re.search(r'filename=[\'"]?([^\'"\s]+)[\'"]?',
content_disposition).group(1)
path = os.path.join(os.path.dirname(path), filename)
with open(path, "wb") as f:
if isinstance(response_data, str):
# change str to bytes so we can write it
response_data = response_data.encode('utf-8')
f.write(response_data)
f = open(path, "rb")
return f
def attempt_convert_item(input_value, valid_classes, path_to_item,
configuration, spec_property_naming, key_type=False,
must_convert=False, check_type=True):
"""
Args:
input_value (any): the data to convert
valid_classes (any): the classes that are valid
path_to_item (list): the path to the item to convert
configuration (Configuration): the instance to use to convert files
spec_property_naming (bool): True if the variable names in the input
data are serialized names as specified in the OpenAPI document.
False if the variables names in the input data are python
variable names in PEP-8 snake case.
key_type (bool): if True we need to convert a key type (not supported)
must_convert (bool): if True we must convert
check_type (bool): if True we check the type or the returned data in
ModelComposed/ModelNormal/ModelSimple instances
Returns:
instance (any) the fixed item
Raises:
ApiTypeError
ApiValueError
ApiKeyError
"""
valid_classes_ordered = order_response_types(valid_classes)
valid_classes_coercible = remove_uncoercible(
valid_classes_ordered, input_value, spec_property_naming)
if not valid_classes_coercible or key_type:
# we do not handle keytype errors, json will take care
# of this for us
if must_convert or configuration is None or not configuration.discard_unknown_keys:
raise get_type_error(input_value, path_to_item, valid_classes,
key_type=key_type)
for valid_class in valid_classes_coercible:
try:
if issubclass(valid_class, OpenApiModel):
return deserialize_model(input_value, valid_class,
path_to_item, check_type,
configuration, spec_property_naming)
elif valid_class == file_type:
return deserialize_file(input_value, configuration)
return deserialize_primitive(input_value, valid_class,
path_to_item)
except (ApiTypeError, ApiValueError, ApiKeyError) as conversion_exc:
if must_convert:
raise conversion_exc
# if we have conversion errors when must_convert == False
# we ignore the exception and move on to the next class
continue
# we were unable to convert, must_convert == False
return input_value
def is_type_nullable(input_type):
"""
Returns true if None is an allowed value for the specified input_type.
A type is nullable if at least one of the following conditions is true:
1. The OAS 'nullable' attribute has been specified,
1. The type is the 'null' type,
1. The type is a anyOf/oneOf composed schema, and a child schema is
the 'null' type.
Args:
input_type (type): the class of the input_value that we are
checking
Returns:
bool
"""
if input_type is none_type:
return True
if issubclass(input_type, OpenApiModel) and input_type._nullable:
return True
if issubclass(input_type, ModelComposed):
# If oneOf/anyOf, check if the 'null' type is one of the allowed types.
for t in input_type._composed_schemas.get('oneOf', ()):
if is_type_nullable(t): return True
for t in input_type._composed_schemas.get('anyOf', ()):
if is_type_nullable(t): return True
return False
def is_valid_type(input_class_simple, valid_classes):
"""
Args:
input_class_simple (class): the class of the input_value that we are
checking
valid_classes (tuple): the valid classes that the current item
should be
Returns:
bool
"""
if issubclass(input_class_simple, OpenApiModel) and \
valid_classes == (bool, date, datetime, dict, float, int, list, str, none_type,):
return True
valid_type = input_class_simple in valid_classes
if not valid_type and (
issubclass(input_class_simple, OpenApiModel) or
input_class_simple is none_type):
for valid_class in valid_classes:
if input_class_simple is none_type and is_type_nullable(valid_class):
# Schema is oneOf/anyOf and the 'null' type is one of the allowed types.
return True
if not (issubclass(valid_class, OpenApiModel) and valid_class.discriminator):
continue
discr_propertyname_py = list(valid_class.discriminator.keys())[0]
discriminator_classes = (
valid_class.discriminator[discr_propertyname_py].values()
)
valid_type = is_valid_type(input_class_simple, discriminator_classes)
if valid_type:
return True
return valid_type
def validate_and_convert_types(input_value, required_types_mixed, path_to_item,
spec_property_naming, _check_type, configuration=None):
"""Raises a TypeError is there is a problem, otherwise returns value
Args:
input_value (any): the data to validate/convert
required_types_mixed (list/dict/tuple): A list of
valid classes, or a list tuples of valid classes, or a dict where
the value is a tuple of value classes
path_to_item: (list) the path to the data being validated
this stores a list of keys or indices to get to the data being
validated
spec_property_naming (bool): True if the variable names in the input
data are serialized names as specified in the OpenAPI document.
False if the variables names in the input data are python
variable names in PEP-8 snake case.
_check_type: (boolean) if true, type will be checked and conversion
will be attempted.
configuration: (Configuration): the configuration class to use
when converting file_type items.
If passed, conversion will be attempted when possible
If not passed, no conversions will be attempted and
exceptions will be raised
Returns:
the correctly typed value
Raises:
ApiTypeError
"""
results = get_required_type_classes(required_types_mixed, spec_property_naming)
valid_classes, child_req_types_by_current_type = results
input_class_simple = get_simple_class(input_value)
valid_type = is_valid_type(input_class_simple, valid_classes)
if not valid_type:
if configuration:
# if input_value is not valid_type try to convert it
converted_instance = attempt_convert_item(
input_value,
valid_classes,
path_to_item,
configuration,
spec_property_naming,
key_type=False,
must_convert=True,
check_type=_check_type
)
return converted_instance
else:
raise get_type_error(input_value, path_to_item, valid_classes,
key_type=False)
# input_value's type is in valid_classes
if len(valid_classes) > 1 and configuration:
# there are valid classes which are not the current class
valid_classes_coercible = remove_uncoercible(
valid_classes, input_value, spec_property_naming, must_convert=False)
if valid_classes_coercible:
converted_instance = attempt_convert_item(
input_value,
valid_classes_coercible,
path_to_item,
configuration,
spec_property_naming,
key_type=False,
must_convert=False,
check_type=_check_type
)
return converted_instance
if child_req_types_by_current_type == {}:
# all types are of the required types and there are no more inner
# variables left to look at
return input_value
inner_required_types = child_req_types_by_current_type.get(
type(input_value)
)
if inner_required_types is None:
# for this type, there are not more inner variables left to look at
return input_value
if isinstance(input_value, list):
if input_value == []:
# allow an empty list
return input_value
for index, inner_value in enumerate(input_value):
inner_path = list(path_to_item)
inner_path.append(index)
input_value[index] = validate_and_convert_types(
inner_value,
inner_required_types,
inner_path,
spec_property_naming,
_check_type,
configuration=configuration
)
elif isinstance(input_value, dict):
if input_value == {}:
# allow an empty dict
return input_value
for inner_key, inner_val in input_value.items():
inner_path = list(path_to_item)
inner_path.append(inner_key)
if get_simple_class(inner_key) != str:
raise get_type_error(inner_key, inner_path, valid_classes,
key_type=True)
input_value[inner_key] = validate_and_convert_types(
inner_val,
inner_required_types,
inner_path,
spec_property_naming,
_check_type,
configuration=configuration
)
return input_value
def model_to_dict(model_instance, serialize=True):
"""Returns the model properties as a dict
Args:
model_instance (one of your model instances): the model instance that
will be converted to a dict.
Keyword Args:
serialize (bool): if True, the keys in the dict will be values from
attribute_map
"""
result = {}
extract_item = lambda item: (item[0], model_to_dict(item[1], serialize=serialize)) if hasattr(item[1], '_data_store') else item
model_instances = [model_instance]
if model_instance._composed_schemas:
model_instances.extend(model_instance._composed_instances)
seen_json_attribute_names = set()
used_fallback_python_attribute_names = set()
py_to_json_map = {}
for model_instance in model_instances:
for attr, value in model_instance._data_store.items():
if serialize:
# we use get here because additional property key names do not
# exist in attribute_map
try:
attr = model_instance.attribute_map[attr]
py_to_json_map.update(model_instance.attribute_map)
seen_json_attribute_names.add(attr)
except KeyError:
used_fallback_python_attribute_names.add(attr)
if isinstance(value, list):
if not value:
# empty list or None
result[attr] = value
else:
res = []
for v in value:
if isinstance(v, PRIMITIVE_TYPES) or v is None:
res.append(v)
elif isinstance(v, ModelSimple):
res.append(v.value)
elif isinstance(v, dict):
res.append(dict(map(
extract_item,
v.items()
)))
else:
res.append(model_to_dict(v, serialize=serialize))
result[attr] = res
elif isinstance(value, dict):
result[attr] = dict(map(
extract_item,
value.items()
))
elif isinstance(value, ModelSimple):
result[attr] = value.value
elif hasattr(value, '_data_store'):
result[attr] = model_to_dict(value, serialize=serialize)
else:
result[attr] = value
if serialize:
for python_key in used_fallback_python_attribute_names:
json_key = py_to_json_map.get(python_key)
if json_key is None:
continue
if python_key == json_key:
continue
json_key_assigned_no_need_for_python_key = json_key in seen_json_attribute_names
if json_key_assigned_no_need_for_python_key:
del result[python_key]
return result
def type_error_message(var_value=None, var_name=None, valid_classes=None,
key_type=None):
"""
Keyword Args:
var_value (any): the variable which has the type_error
var_name (str): the name of the variable which has the typ error
valid_classes (tuple): the accepted classes for current_item's
value
key_type (bool): False if our value is a value in a dict
True if it is a key in a dict
False if our item is an item in a list
"""
key_or_value = 'value'
if key_type:
key_or_value = 'key'
valid_classes_phrase = get_valid_classes_phrase(valid_classes)
msg = (
"Invalid type for variable '{0}'. Required {1} type {2} and "
"passed type was {3}".format(
var_name,
key_or_value,
valid_classes_phrase,
type(var_value).__name__,
)
)
return msg
def get_valid_classes_phrase(input_classes):
"""Returns a string phrase describing what types are allowed
"""
all_classes = list(input_classes)
all_classes = sorted(all_classes, key=lambda cls: cls.__name__)
all_class_names = [cls.__name__ for cls in all_classes]
if len(all_class_names) == 1:
return 'is {0}'.format(all_class_names[0])
return "is one of [{0}]".format(", ".join(all_class_names))
def get_allof_instances(self, model_args, constant_args):
"""
Args:
self: the class we are handling
model_args (dict): var_name to var_value
used to make instances
constant_args (dict):
metadata arguments:
_check_type
_path_to_item
_spec_property_naming
_configuration
_visited_composed_classes
Returns
composed_instances (list)
"""
composed_instances = []
for allof_class in self._composed_schemas['allOf']:
try:
if constant_args.get('_spec_property_naming'):
allof_instance = allof_class._from_openapi_data(**model_args, **constant_args)
else:
allof_instance = allof_class(**model_args, **constant_args)
composed_instances.append(allof_instance)
except Exception as ex:
raise ApiValueError(
"Invalid inputs given to generate an instance of '%s'. The "
"input data was invalid for the allOf schema '%s' in the composed "
"schema '%s'. Error=%s" % (
allof_class.__name__,
allof_class.__name__,
self.__class__.__name__,
str(ex)
)
) from ex
return composed_instances
def get_oneof_instance(cls, model_kwargs, constant_kwargs, model_arg=None):
"""
Find the oneOf schema that matches the input data (e.g. payload).
If exactly one schema matches the input data, an instance of that schema
is returned.
If zero or more than one schema match the input data, an exception is raised.
In OAS 3.x, the payload MUST, by validation, match exactly one of the
schemas described by oneOf.
Args:
cls: the class we are handling
model_kwargs (dict): var_name to var_value
The input data, e.g. the payload that must match a oneOf schema
in the OpenAPI document.
constant_kwargs (dict): var_name to var_value
args that every model requires, including configuration, server
and path to item.
Kwargs:
model_arg: (int, float, bool, str, date, datetime, ModelSimple, None):
the value to assign to a primitive class or ModelSimple class
Notes:
- this is only passed in when oneOf includes types which are not object
- None is used to suppress handling of model_arg, nullable models are handled in __new__
Returns
oneof_instance (instance)
"""
if len(cls._composed_schemas['oneOf']) == 0:
return None
oneof_instances = []
# Iterate over each oneOf schema and determine if the input data
# matches the oneOf schemas.
for oneof_class in cls._composed_schemas['oneOf']:
# The composed oneOf schema allows the 'null' type and the input data
# is the null value. This is a OAS >= 3.1 feature.
if oneof_class is none_type:
# skip none_types because we are deserializing dict data.
# none_type deserialization is handled in the __new__ method
continue
single_value_input = allows_single_value_input(oneof_class)
try:
if not single_value_input:
if model_arg is not None:
continue;
if constant_kwargs.get('_spec_property_naming'):
oneof_instance = oneof_class._new_from_openapi_data(**model_kwargs, **constant_kwargs)
else:
oneof_instance = oneof_class(**model_kwargs, **constant_kwargs)
else:
if issubclass(oneof_class, ModelSimple):
if constant_kwargs.get('_spec_property_naming'):
oneof_instance = oneof_class._new_from_openapi_data(model_arg, **constant_kwargs)
else:
oneof_instance = oneof_class(model_arg, **constant_kwargs)
elif oneof_class in PRIMITIVE_TYPES:
oneof_instance = validate_and_convert_types(
model_arg,
(oneof_class,),
constant_kwargs['_path_to_item'],
constant_kwargs['_spec_property_naming'],
constant_kwargs['_check_type'],
configuration=constant_kwargs['_configuration']
)
oneof_instances.append((oneof_class, oneof_instance))
except Exception:
pass
if len(oneof_instances) == 0:
raise ApiValueError(
"Invalid inputs given to generate an instance of %s. None "
"of the oneOf schemas matched the input data." %
cls.__name__
)
elif len(oneof_instances) > 1:
raise ApiValueError(
"Invalid inputs given to generate an instance of %s. Multiple "
"oneOf schemas matched the inputs, but a max of one is allowed. "
"Candidates: %s" %
(cls.__name__, oneof_instances)
)
return oneof_instances[0][1]
def get_anyof_instances(self, model_args, constant_args):
"""
Args:
self: the class we are handling
model_args (dict): var_name to var_value
The input data, e.g. the payload that must match at least one
anyOf child schema in the OpenAPI document.
constant_args (dict): var_name to var_value
args that every model requires, including configuration, server
and path to item.
Returns
anyof_instances (list)
"""
anyof_instances = []
if len(self._composed_schemas['anyOf']) == 0:
return anyof_instances
for anyof_class in self._composed_schemas['anyOf']:
# The composed oneOf schema allows the 'null' type and the input data
# is the null value. This is a OAS >= 3.1 feature.
if anyof_class is none_type:
# skip none_types because we are deserializing dict data.
# none_type deserialization is handled in the __new__ method
continue
try:
if constant_args.get('_spec_property_naming'):
anyof_instance = anyof_class._new_from_openapi_data(**model_args, **constant_args)
else:
anyof_instance = anyof_class(**model_args, **constant_args)
anyof_instances.append(anyof_instance)
except Exception:
pass
if len(anyof_instances) == 0:
raise ApiValueError(
"Invalid inputs given to generate an instance of %s. None of the "
"anyOf schemas matched the inputs." %
self.__class__.__name__
)
return anyof_instances
def get_discarded_args(self, composed_instances, model_args):
"""
Gathers the args that were discarded by configuration.discard_unknown_keys
"""
model_arg_keys = model_args.keys()
discarded_args = set()
# arguments passed to self were already converted to python names
# before __init__ was called
for instance in composed_instances:
if instance.__class__ in self._composed_schemas['allOf']:
try:
keys = instance.to_dict().keys()
discarded_keys = model_args - keys
discarded_args.update(discarded_keys)
except Exception:
# allOf integer schema will throw exception
pass
else:
try:
all_keys = set(model_to_dict(instance, serialize=False).keys())
js_keys = model_to_dict(instance, serialize=True).keys()
all_keys.update(js_keys)
discarded_keys = model_arg_keys - all_keys
discarded_args.update(discarded_keys)
except Exception:
# allOf integer schema will throw exception
pass
return discarded_args
def validate_get_composed_info(constant_args, model_args, self):
"""
For composed schemas, generate schema instances for
all schemas in the oneOf/anyOf/allOf definition. If additional
properties are allowed, also assign those properties on
all matched schemas that contain additionalProperties.
Openapi schemas are python classes.
Exceptions are raised if:
- 0 or > 1 oneOf schema matches the model_args input data
- no anyOf schema matches the model_args input data
- any of the allOf schemas do not match the model_args input data
Args:
constant_args (dict): these are the args that every model requires
model_args (dict): these are the required and optional spec args that
were passed in to make this model
self (class): the class that we are instantiating
This class contains self._composed_schemas
Returns:
composed_info (list): length three
composed_instances (list): the composed instances which are not
self
var_name_to_model_instances (dict): a dict going from var_name
to the model_instance which holds that var_name
the model_instance may be self or an instance of one of the
classes in self.composed_instances()
additional_properties_model_instances (list): a list of the
model instances which have the property
additional_properties_type. This list can include self
"""
# create composed_instances
composed_instances = []
allof_instances = get_allof_instances(self, model_args, constant_args)
composed_instances.extend(allof_instances)
oneof_instance = get_oneof_instance(self.__class__, model_args, constant_args)
if oneof_instance is not None:
composed_instances.append(oneof_instance)
anyof_instances = get_anyof_instances(self, model_args, constant_args)
composed_instances.extend(anyof_instances)
"""
set additional_properties_model_instances
additional properties must be evaluated at the schema level
so self's additional properties are most important
If self is a composed schema with:
- no properties defined in self
- additionalProperties: False
Then for object payloads every property is an additional property
and they are not allowed, so only empty dict is allowed
Properties must be set on all matching schemas
so when a property is assigned toa composed instance, it must be set on all
composed instances regardless of additionalProperties presence
keeping it to prevent breaking changes in v5.0.1
TODO remove cls._additional_properties_model_instances in 6.0.0
"""
additional_properties_model_instances = []
if self.additional_properties_type is not None:
additional_properties_model_instances = [self]
"""
no need to set properties on self in here, they will be set in __init__
By here all composed schema oneOf/anyOf/allOf instances have their properties set using
model_args
"""
discarded_args = get_discarded_args(self, composed_instances, model_args)
# map variable names to composed_instances
var_name_to_model_instances = {}
for prop_name in model_args:
if prop_name not in discarded_args:
var_name_to_model_instances[prop_name] = [self] + composed_instances
return [
composed_instances,
var_name_to_model_instances,
additional_properties_model_instances,
discarded_args
] | PypiClean |
/ucam-identitylib-2.0.2.tar.gz/ucam-identitylib-2.0.2/identitylib/photo_client/model/photo_identifier.py | import re # noqa: F401
import sys # noqa: F401
from identitylib.photo_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from identitylib.photo_client.exceptions import ApiAttributeError
def lazy_import():
from identitylib.photo_client.model.scheme_enum import SchemeEnum
from identitylib.photo_client.model.v1_beta1_photo_identifier_summary import V1Beta1PhotoIdentifierSummary
globals()['SchemeEnum'] = SchemeEnum
globals()['V1Beta1PhotoIdentifierSummary'] = V1Beta1PhotoIdentifierSummary
class PhotoIdentifier(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'id': (str,), # noqa: E501
'photos': ([str],), # noqa: E501
'related_identifiers': ([V1Beta1PhotoIdentifierSummary],), # noqa: E501
'scheme': (SchemeEnum,), # noqa: E501
'value': (str,), # noqa: E501
'deleted_at': (datetime, none_type,), # noqa: E501
'is_deleted': (bool,), # noqa: E501
'is_highest_primary_identifier': (bool,), # noqa: E501
'retain_until': (datetime, none_type,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'id': 'id', # noqa: E501
'photos': 'photos', # noqa: E501
'related_identifiers': 'relatedIdentifiers', # noqa: E501
'scheme': 'scheme', # noqa: E501
'value': 'value', # noqa: E501
'deleted_at': 'deletedAt', # noqa: E501
'is_deleted': 'isDeleted', # noqa: E501
'is_highest_primary_identifier': 'isHighestPrimaryIdentifier', # noqa: E501
'retain_until': 'retainUntil', # noqa: E501
}
read_only_vars = {
'id', # noqa: E501
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, id, photos, related_identifiers, scheme, value, *args, **kwargs): # noqa: E501
"""PhotoIdentifier - a model defined in OpenAPI
Args:
id (str):
photos ([str]): Photos identified by this identifier
related_identifiers ([V1Beta1PhotoIdentifierSummary]):
scheme (SchemeEnum):
value (str): The identifier's value
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
deleted_at (datetime, none_type): [optional] # noqa: E501
is_deleted (bool): [optional] # noqa: E501
is_highest_primary_identifier (bool): Photo identifier is highest primary identifier. [optional] # noqa: E501
retain_until (datetime, none_type): If non-NULL, the minimum period the record should be retained. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', True)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.id = id
self.photos = photos
self.related_identifiers = related_identifiers
self.scheme = scheme
self.value = value
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, photos, related_identifiers, scheme, value, *args, **kwargs): # noqa: E501
"""PhotoIdentifier - a model defined in OpenAPI
photos ([str]): Photos identified by this identifier
related_identifiers ([V1Beta1PhotoIdentifierSummary]):
scheme (SchemeEnum):
value (str): The identifier's value
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
deleted_at (datetime, none_type): [optional] # noqa: E501
is_deleted (bool): [optional] # noqa: E501
is_highest_primary_identifier (bool): Photo identifier is highest primary identifier. [optional] # noqa: E501
retain_until (datetime, none_type): If non-NULL, the minimum period the record should be retained. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.photos = photos
self.related_identifiers = related_identifiers
self.scheme = scheme
self.value = value
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.") | PypiClean |
/onium-0.9.2.tar.gz/onium-0.9.2/README.md | Onium - Inject hebrew support into slack desktop app
=====================================================
Onium is a small utility to inject Hebrew support (automatic RTL) into Slack's desktop app.
Onium does this by modifying Slack's app.asar (Slack application code) and changes the Slack app itself.
What is Onium?
--------------
Onium fixes one of Slack's most glaring issues for users in countries whose script is written
Right-to-left. It does so by injecting code into Slack that modifies the displayed text to correctly
show Right-to-left words.
The Slack app is built using Electron, which essentially wraps a Chromium browser and a Node.js
server together. Onium modifies the HTML displayed by the Chromium browser to properly support
Hebrew (and RTL) languages.
Onium does this by modifying Slack's internal code (creating a "Fixed" solution), until Slack adds a new update.
Requirements
------------
You need Python 2.7 or 3.5 or later to run Onium.
Onium should work on Windows, Mac and Linux.
Quick start
-----------
Onium can be installed using pip:
$ python -m pip install onium
As long as python's scripts folder is in your path, simply run
$ onium
Usage
-----
Onium supports various command line parameters
```
usage: onium [-h] [-l LOCATION] [--no-kill] [--no-start] [-b BACKUP]
[-f] [-d]
Inject hebrew support plugin into Slack's tab inside an electron app. This
program injects the Chrome's hebrew_slack plugin into any electron (desktop)
version of the slack app
optional arguments:
-h, --help show this help message and exit
-l LOCATION, --location LOCATION
Location of application to run, or auto, local
(Windows only), store (Windows only) [default: auto]
--no-kill Do not attempt to kill original application before
starting
--no-start Do not attempt to start application (assume already
running)
-b BACKUP, --backup BACKUP
Name to use save original slack app backup. This will
never overwrite an existing backup file. Fails if file
already exists and not used with -f [default:
app.asar.orig]
-f, --force Proceed even if backup file already exists [default:
False]
-d, --debug Pass --remote-debugging-port=9222 to enable rendered
debugger with chrome
```
Contribute / Join the conversation
----------------------------------
Onium is an open-source project distributed under the MIT license. Basically means go wild.
Development is taking place at [https://github.com/yonatan-mitmit/onium](https://github.com/yonatan-mitmit/onium)
Please report issues [here](https://github.com/yonatan-mitmit/onium/issues)
License
-------
Onium is licensed under the terms of the MIT License (see the file LICENSE.txt).
Acknowledgement
---------------
Shlomi Matichin for his [slack_hebrew](https://github.com/shlomimatichin/slack-hebrew) plugin
Yuval Raz and Lital Lechtman for Mac port and testing
Ami Chayun for Linux port
| PypiClean |
/kundajelab_shap-1-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_28_x86_64.whl/shap/benchmark/plots.py | import numpy as np
from .experiments import run_experiments
from ..plots import colors
from .. import __version__
from . import models
from . import methods
from . import metrics
import sklearn
import io
import base64
import os
try:
import matplotlib.pyplot as pl
import matplotlib
except ImportError:
pass
metadata = {
# "runtime": {
# "title": "Runtime",
# "sort_order": 1
# },
# "local_accuracy": {
# "title": "Local Accuracy",
# "sort_order": 2
# },
# "consistency_guarantees": {
# "title": "Consistency Guarantees",
# "sort_order": 3
# },
# "keep_positive_mask": {
# "title": "Keep Positive (mask)",
# "xlabel": "Max fraction of features kept",
# "ylabel": "Mean model output",
# "sort_order": 4
# },
# "keep_negative_mask": {
# "title": "Keep Negative (mask)",
# "xlabel": "Max fraction of features kept",
# "ylabel": "Negative mean model output",
# "sort_order": 5
# },
# "keep_absolute_mask__r2": {
# "title": "Keep Absolute (mask)",
# "xlabel": "Max fraction of features kept",
# "ylabel": "R^2",
# "sort_order": 6
# },
# "keep_absolute_mask__roc_auc": {
# "title": "Keep Absolute (mask)",
# "xlabel": "Max fraction of features kept",
# "ylabel": "ROC AUC",
# "sort_order": 6
# },
# "remove_positive_mask": {
# "title": "Remove Positive (mask)",
# "xlabel": "Max fraction of features removed",
# "ylabel": "Negative mean model output",
# "sort_order": 7
# },
# "remove_negative_mask": {
# "title": "Remove Negative (mask)",
# "xlabel": "Max fraction of features removed",
# "ylabel": "Mean model output",
# "sort_order": 8
# },
# "remove_absolute_mask__r2": {
# "title": "Remove Absolute (mask)",
# "xlabel": "Max fraction of features removed",
# "ylabel": "1 - R^2",
# "sort_order": 9
# },
# "remove_absolute_mask__roc_auc": {
# "title": "Remove Absolute (mask)",
# "xlabel": "Max fraction of features removed",
# "ylabel": "1 - ROC AUC",
# "sort_order": 9
# },
# "keep_positive_resample": {
# "title": "Keep Positive (resample)",
# "xlabel": "Max fraction of features kept",
# "ylabel": "Mean model output",
# "sort_order": 10
# },
# "keep_negative_resample": {
# "title": "Keep Negative (resample)",
# "xlabel": "Max fraction of features kept",
# "ylabel": "Negative mean model output",
# "sort_order": 11
# },
# "keep_absolute_resample__r2": {
# "title": "Keep Absolute (resample)",
# "xlabel": "Max fraction of features kept",
# "ylabel": "R^2",
# "sort_order": 12
# },
# "keep_absolute_resample__roc_auc": {
# "title": "Keep Absolute (resample)",
# "xlabel": "Max fraction of features kept",
# "ylabel": "ROC AUC",
# "sort_order": 12
# },
# "remove_positive_resample": {
# "title": "Remove Positive (resample)",
# "xlabel": "Max fraction of features removed",
# "ylabel": "Negative mean model output",
# "sort_order": 13
# },
# "remove_negative_resample": {
# "title": "Remove Negative (resample)",
# "xlabel": "Max fraction of features removed",
# "ylabel": "Mean model output",
# "sort_order": 14
# },
# "remove_absolute_resample__r2": {
# "title": "Remove Absolute (resample)",
# "xlabel": "Max fraction of features removed",
# "ylabel": "1 - R^2",
# "sort_order": 15
# },
# "remove_absolute_resample__roc_auc": {
# "title": "Remove Absolute (resample)",
# "xlabel": "Max fraction of features removed",
# "ylabel": "1 - ROC AUC",
# "sort_order": 15
# },
# "remove_positive_retrain": {
# "title": "Remove Positive (retrain)",
# "xlabel": "Max fraction of features removed",
# "ylabel": "Negative mean model output",
# "sort_order": 11
# },
# "remove_negative_retrain": {
# "title": "Remove Negative (retrain)",
# "xlabel": "Max fraction of features removed",
# "ylabel": "Mean model output",
# "sort_order": 12
# },
# "keep_positive_retrain": {
# "title": "Keep Positive (retrain)",
# "xlabel": "Max fraction of features kept",
# "ylabel": "Mean model output",
# "sort_order": 6
# },
# "keep_negative_retrain": {
# "title": "Keep Negative (retrain)",
# "xlabel": "Max fraction of features kept",
# "ylabel": "Negative mean model output",
# "sort_order": 7
# },
# "batch_remove_absolute__r2": {
# "title": "Batch Remove Absolute",
# "xlabel": "Fraction of features removed",
# "ylabel": "1 - R^2",
# "sort_order": 13
# },
# "batch_keep_absolute__r2": {
# "title": "Batch Keep Absolute",
# "xlabel": "Fraction of features kept",
# "ylabel": "R^2",
# "sort_order": 8
# },
# "batch_remove_absolute__roc_auc": {
# "title": "Batch Remove Absolute",
# "xlabel": "Fraction of features removed",
# "ylabel": "1 - ROC AUC",
# "sort_order": 13
# },
# "batch_keep_absolute__roc_auc": {
# "title": "Batch Keep Absolute",
# "xlabel": "Fraction of features kept",
# "ylabel": "ROC AUC",
# "sort_order": 8
# },
# "linear_shap_corr": {
# "title": "Linear SHAP (corr)"
# },
# "linear_shap_ind": {
# "title": "Linear SHAP (ind)"
# },
# "coef": {
# "title": "Coefficents"
# },
# "random": {
# "title": "Random"
# },
# "kernel_shap_1000_meanref": {
# "title": "Kernel SHAP 1000 mean ref."
# },
# "sampling_shap_1000": {
# "title": "Sampling SHAP 1000"
# },
# "tree_shap_tree_path_dependent": {
# "title": "Tree SHAP"
# },
# "saabas": {
# "title": "Saabas"
# },
# "tree_gain": {
# "title": "Gain/Gini Importance"
# },
# "mean_abs_tree_shap": {
# "title": "mean(|Tree SHAP|)"
# },
# "lasso_regression": {
# "title": "Lasso Regression"
# },
# "ridge_regression": {
# "title": "Ridge Regression"
# },
# "gbm_regression": {
# "title": "Gradient Boosting Regression"
# }
}
benchmark_color_map = {
"tree_shap": "#1E88E5",
"deep_shap": "#1E88E5",
"linear_shap_corr": "#1E88E5",
"linear_shap_ind": "#ff0d57",
"coef": "#13B755",
"random": "#999999",
"const_random": "#666666",
"kernel_shap_1000_meanref": "#7C52FF"
}
# negated_metrics = [
# "runtime",
# "remove_positive_retrain",
# "remove_positive_mask",
# "remove_positive_resample",
# "keep_negative_retrain",
# "keep_negative_mask",
# "keep_negative_resample"
# ]
# one_minus_metrics = [
# "remove_absolute_mask__r2",
# "remove_absolute_mask__roc_auc",
# "remove_absolute_resample__r2",
# "remove_absolute_resample__roc_auc"
# ]
def get_method_color(method):
for l in getattr(methods, method).__doc__.split("\n"):
l = l.strip()
if l.startswith("color = "):
v = l.split("=")[1].strip()
if v.startswith("red_blue_circle("):
return colors.red_blue_circle(float(v[16:-1]))
else:
return v
return "#000000"
def get_method_linestyle(method):
for l in getattr(methods, method).__doc__.split("\n"):
l = l.strip()
if l.startswith("linestyle = "):
return l.split("=")[1].strip()
return "solid"
def get_metric_attr(metric, attr):
for l in getattr(metrics, metric).__doc__.split("\n"):
l = l.strip()
# string
prefix = attr+" = \""
suffix = "\""
if l.startswith(prefix) and l.endswith(suffix):
return l[len(prefix):-len(suffix)]
# number
prefix = attr+" = "
if l.startswith(prefix):
return float(l[len(prefix):])
return ""
def plot_curve(dataset, model, metric, cmap=benchmark_color_map):
experiments = run_experiments(dataset=dataset, model=model, metric=metric)
pl.figure()
method_arr = []
for (name,(fcounts,scores)) in experiments:
_,_,method,_ = name
transform = get_metric_attr(metric, "transform")
if transform == "negate":
scores = -scores
elif transform == "one_minus":
scores = 1 - scores
auc = sklearn.metrics.auc(fcounts, scores) / fcounts[-1]
method_arr.append((auc, method, scores))
for (auc,method,scores) in sorted(method_arr):
method_title = getattr(methods, method).__doc__.split("\n")[0].strip()
l = "{:6.3f} - ".format(auc) + method_title
pl.plot(
fcounts / fcounts[-1], scores, label=l,
color=get_method_color(method), linewidth=2,
linestyle=get_method_linestyle(method)
)
metric_title = getattr(metrics, metric).__doc__.split("\n")[0].strip()
pl.xlabel(get_metric_attr(metric, "xlabel"))
pl.ylabel(get_metric_attr(metric, "ylabel"))
model_title = getattr(models, dataset+"__"+model).__doc__.split("\n")[0].strip()
pl.title(metric_title + " - " + model_title)
pl.gca().xaxis.set_ticks_position('bottom')
pl.gca().yaxis.set_ticks_position('left')
pl.gca().spines['right'].set_visible(False)
pl.gca().spines['top'].set_visible(False)
ahandles, alabels = pl.gca().get_legend_handles_labels()
pl.legend(reversed(ahandles), reversed(alabels))
return pl.gcf()
def plot_human(dataset, model, metric, cmap=benchmark_color_map):
experiments = run_experiments(dataset=dataset, model=model, metric=metric)
pl.figure()
method_arr = []
for (name,(fcounts,scores)) in experiments:
_,_,method,_ = name
diff_sum = np.sum(np.abs(scores[1] - scores[0]))
method_arr.append((diff_sum, method, scores[0], scores[1]))
inds = np.arange(3) # the x locations for the groups
inc_width = (1.0 / len(method_arr)) * 0.8
width = inc_width * 0.9
pl.bar(inds, method_arr[0][2], width, label="Human Consensus", color="black", edgecolor="white")
i = 1
line_style_to_hatch = {
"dashed": "///",
"dotted": "..."
}
for (diff_sum, method, _, methods_attrs) in sorted(method_arr):
method_title = getattr(methods, method).__doc__.split("\n")[0].strip()
l = "{:.2f} - ".format(diff_sum) + method_title
pl.bar(
inds + inc_width * i, methods_attrs.flatten(), width, label=l, edgecolor="white",
color=get_method_color(method), hatch=line_style_to_hatch.get(get_method_linestyle(method), None)
)
i += 1
metric_title = getattr(metrics, metric).__doc__.split("\n")[0].strip()
pl.xlabel("Features in the model")
pl.ylabel("Feature attribution value")
model_title = getattr(models, dataset+"__"+model).__doc__.split("\n")[0].strip()
pl.title(metric_title + " - " + model_title)
pl.gca().xaxis.set_ticks_position('bottom')
pl.gca().yaxis.set_ticks_position('left')
pl.gca().spines['right'].set_visible(False)
pl.gca().spines['top'].set_visible(False)
ahandles, alabels = pl.gca().get_legend_handles_labels()
#pl.legend(ahandles, alabels)
pl.xticks(np.array([0, 1, 2, 3]) - (inc_width + width)/2, ["", "", "", ""])
pl.gca().xaxis.set_minor_locator(matplotlib.ticker.FixedLocator([0.4, 1.4, 2.4]))
pl.gca().xaxis.set_minor_formatter(matplotlib.ticker.FixedFormatter(["Fever", "Cough", "Headache"]))
pl.gca().tick_params(which='minor', length=0)
pl.axhline(0, color="#aaaaaa", linewidth=0.5)
box = pl.gca().get_position()
pl.gca().set_position([
box.x0, box.y0 + box.height * 0.3,
box.width, box.height * 0.7
])
# Put a legend below current axis
pl.gca().legend(ahandles, alabels, loc='upper center', bbox_to_anchor=(0.5, -0.15), ncol=2)
return pl.gcf()
def _human_score_map(human_consensus, methods_attrs):
""" Converts human agreement differences to numerical scores for coloring.
"""
v = 1 - min(np.sum(np.abs(methods_attrs - human_consensus)) / (np.abs(human_consensus).sum() + 1), 1.0)
return v
def make_grid(scores, dataset, model):
color_vals = {}
metric_sort_order = {}
for (_,_,method,metric),(fcounts,score) in filter(lambda x: x[0][0] == dataset and x[0][1] == model, scores):
metric_sort_order[metric] = metric_sort_order.get(metric, len(metric_sort_order))
if metric not in color_vals:
color_vals[metric] = {}
transform = get_metric_attr(metric, "transform")
if transform == "negate":
score = -score
elif transform == "one_minus":
score = 1 - score
if fcounts is None:
color_vals[metric][method] = score
elif fcounts == "human":
color_vals[metric][method] = _human_score_map(*score)
else:
auc = sklearn.metrics.auc(fcounts, score) / fcounts[-1]
color_vals[metric][method] = auc
col_keys = sorted(list(color_vals.keys()), key=lambda v: metric_sort_order[metric])
row_keys = list(set([v for k in col_keys for v in color_vals[k].keys()]))
data = -28567 * np.ones((len(row_keys), len(col_keys)))
for i in range(len(row_keys)):
for j in range(len(col_keys)):
data[i,j] = color_vals[col_keys[j]][row_keys[i]]
assert np.sum(data == -28567) == 0, "There are missing data values!"
data = (data - data.min(0)) / (data.max(0) - data.min(0) + 1e-8)
# sort by performans
inds = np.argsort(-data.mean(1))
row_keys = [row_keys[i] for i in inds]
data = data[inds,:]
return row_keys, col_keys, data
from matplotlib.colors import LinearSegmentedColormap
red_blue_solid = LinearSegmentedColormap('red_blue_solid', {
'red': ((0.0, 198./255, 198./255),
(1.0, 5./255, 5./255)),
'green': ((0.0, 34./255, 34./255),
(1.0, 198./255, 198./255)),
'blue': ((0.0, 5./255, 5./255),
(1.0, 24./255, 24./255)),
'alpha': ((0.0, 1, 1),
(1.0, 1, 1))
})
from IPython.core.display import HTML
def plot_grids(dataset, model_names, out_dir=None):
if out_dir is not None:
os.mkdir(out_dir)
scores = []
for model in model_names:
scores.extend(run_experiments(dataset=dataset, model=model))
prefix = "<style type='text/css'> .shap_benchmark__select:focus { outline-width: 0 }</style>"
out = "" # background: rgb(30, 136, 229)
# out += "<div style='font-weight: regular; font-size: 24px; text-align: center; background: #f8f8f8; color: #000; padding: 20px;'>SHAP Benchmark</div>\n"
# out += "<div style='height: 1px; background: #ddd;'></div>\n"
#out += "<div style='height: 7px; background-image: linear-gradient(to right, rgb(30, 136, 229), rgb(255, 13, 87));'></div>"
out += "<div style='position: fixed; left: 0px; top: 0px; right: 0px; height: 230px; background: #fff;'>\n" # box-shadow: 0 4px 8px 0 rgba(0, 0, 0, 0.2), 0 6px 20px 0 rgba(0, 0, 0, 0.19);
out += "<div style='position: absolute; bottom: 0px; left: 0px; right: 0px;' align='center'><table style='border-width: 1px; margin-right: 100px'>\n"
for ind,model in enumerate(model_names):
row_keys, col_keys, data = make_grid(scores, dataset, model)
# print(data)
# print(colors.red_blue_solid(0.))
# print(colors.red_blue_solid(1.))
# return
for metric in col_keys:
save_plot = False
if metric.startswith("human_"):
plot_human(dataset, model, metric)
save_plot = True
elif metric not in ["local_accuracy", "runtime", "consistency_guarantees"]:
plot_curve(dataset, model, metric)
save_plot = True
if save_plot:
buf = io.BytesIO()
pl.gcf().set_size_inches(1200.0/175,1000.0/175)
pl.savefig(buf, format='png', dpi=175)
if out_dir is not None:
pl.savefig("%s/plot_%s_%s_%s.pdf" % (out_dir, dataset, model, metric), format='pdf')
pl.close()
buf.seek(0)
data_uri = base64.b64encode(buf.read()).decode('utf-8').replace('\n', '')
plot_id = "plot__"+dataset+"__"+model+"__"+metric
prefix += "<div onclick='document.getElementById(\"%s\").style.display = \"none\"' style='display: none; position: fixed; z-index: 10000; left: 0px; right: 0px; top: 0px; bottom: 0px; background: rgba(255,255,255,0.9);' id='%s'>" % (plot_id, plot_id)
prefix += "<img width='600' height='500' style='margin-left: auto; margin-right: auto; margin-top: 230px; box-shadow: 0 4px 8px 0 rgba(0, 0, 0, 0.2), 0 6px 20px 0 rgba(0, 0, 0, 0.19);' src='data:image/png;base64,%s'>" % data_uri
prefix += "</div>"
model_title = getattr(models, dataset+"__"+model).__doc__.split("\n")[0].strip()
if ind == 0:
out += "<tr><td style='background: #fff; width: 250px'></td></td>"
for j in range(data.shape[1]):
metric_title = getattr(metrics, col_keys[j]).__doc__.split("\n")[0].strip()
out += "<td style='width: 40px; min-width: 40px; background: #fff; text-align: right;'><div style='margin-left: 10px; margin-bottom: -5px; white-space: nowrap; transform: rotate(-45deg); transform-origin: left top 0; width: 1.5em; margin-top: 8em'>" + metric_title + "</div></td>"
out += "</tr>\n"
out += "</table></div></div>\n"
out += "<table style='border-width: 1px; margin-right: 100px; margin-top: 230px;'>\n"
out += "<tr><td style='background: #fff'></td><td colspan='%d' style='background: #fff; font-weight: bold; text-align: center; margin-top: 10px;'>%s</td></tr>\n" % (data.shape[1], model_title)
for i in range(data.shape[0]):
out += "<tr>"
# if i == 0:
# out += "<td rowspan='%d' style='background: #fff; text-align: center; white-space: nowrap; vertical-align: middle; '><div style='font-weight: bold; transform: rotate(-90deg); transform-origin: left top 0; width: 1.5em; margin-top: 8em'>%s</div></td>" % (data.shape[0], model_name)
method_title = getattr(methods, row_keys[i]).__doc__.split("\n")[0].strip()
out += "<td style='background: #ffffff; text-align: right; width: 250px' title='shap.LinearExplainer(model)'>" + method_title + "</td>\n"
for j in range(data.shape[1]):
plot_id = "plot__"+dataset+"__"+model+"__"+col_keys[j]
out += "<td onclick='document.getElementById(\"%s\").style.display = \"block\"' style='padding: 0px; padding-left: 0px; padding-right: 0px; border-left: 0px solid #999; width: 42px; min-width: 42px; height: 34px; background-color: #fff'>" % plot_id
#out += "<div style='opacity: "+str(2*(max(1-data[i,j], data[i,j])-0.5))+"; background-color: rgb" + str(tuple(v*255 for v in colors.red_blue_solid(0. if data[i,j] < 0.5 else 1.)[:-1])) + "; height: "+str((30*max(1-data[i,j], data[i,j])))+"px; margin-left: auto; margin-right: auto; width:"+str((30*max(1-data[i,j], data[i,j])))+"px'></div>"
out += "<div style='opacity: "+str(1)+"; background-color: rgb" + str(tuple(int(v*255) for v in colors.red_blue_no_bounds(5*(data[i,j]-0.8))[:-1])) + "; height: "+str((30*data[i,j]))+"px; margin-left: auto; margin-right: auto; width:"+str((30*data[i,j]))+"px'></div>"
#out += "<div style='float: left; background-color: #eee; height: 10px; width: "+str((40*(1-data[i,j])))+"px'></div>"
out += "</td>\n"
out += "</tr>\n" #
out += "<tr><td colspan='%d' style='background: #fff'></td></tr>" % (data.shape[1] + 1)
out += "</table>"
out += "<div style='position: fixed; left: 0px; top: 0px; right: 0px; text-align: left; padding: 20px; text-align: right'>\n"
out += "<div style='float: left; font-weight: regular; font-size: 24px; color: #000;'>SHAP Benchmark <span style='font-size: 14px; color: #777777;'>v"+__version__+"</span></div>\n"
# select {
# margin: 50px;
# width: 150px;
# padding: 5px 35px 5px 5px;
# font-size: 16px;
# border: 1px solid #ccc;
# height: 34px;
# -webkit-appearance: none;
# -moz-appearance: none;
# appearance: none;
# background: url(http://www.stackoverflow.com/favicon.ico) 96% / 15% no-repeat #eee;
# }
#out += "<div style='display: inline-block; margin-right: 20px; font-weight: normal; text-decoration: none; font-size: 18px; color: #000;'>Dataset:</div>\n"
out += "<select id='shap_benchmark__select' onchange=\"document.location = '../' + this.value + '/index.html'\"dir='rtl' class='shap_benchmark__select' style='font-weight: normal; font-size: 20px; color: #000; padding: 10px; background: #fff; border: 1px solid #fff; -webkit-appearance: none; appearance: none;'>\n"
out += "<option value='human' "+("selected" if dataset == "human" else "")+">Agreement with Human Intuition</option>\n"
out += "<option value='corrgroups60' "+("selected" if dataset == "corrgroups60" else "")+">Correlated Groups 60 Dataset</option>\n"
out += "<option value='independentlinear60' "+("selected" if dataset == "independentlinear60" else "")+">Independent Linear 60 Dataset</option>\n"
#out += "<option>CRIC</option>\n"
out += "</select>\n"
#out += "<script> document.onload = function() { document.getElementById('shap_benchmark__select').value = '"+dataset+"'; }</script>"
#out += "<div style='display: inline-block; margin-left: 20px; font-weight: normal; text-decoration: none; font-size: 18px; color: #000;'>CRIC</div>\n"
out += "</div>\n"
# output the legend
out += "<table style='border-width: 0px; width: 100px; position: fixed; right: 50px; top: 200px; background: rgba(255, 255, 255, 0.9)'>\n"
out += "<tr><td style='background: #fff; font-weight: normal; text-align: center'>Higher score</td></tr>\n"
legend_size = 21
for i in range(legend_size-9):
out += "<tr>"
out += "<td style='padding: 0px; padding-left: 0px; padding-right: 0px; border-left: 0px solid #999; height: 34px'>"
val = (legend_size-i-1) / (legend_size-1)
out += "<div style='opacity: 1; background-color: rgb" + str(tuple(int(v*255) for v in colors.red_blue_no_bounds(5*(val-0.8)))[:-1]) + "; height: "+str(30*val)+"px; margin-left: auto; margin-right: auto; width:"+str(30*val)+"px'></div>"
out += "</td>"
out += "</tr>\n" #
out += "<tr><td style='background: #fff; font-weight: normal; text-align: center'>Lower score</td></tr>\n"
out += "</table>\n"
if out_dir is not None:
with open(out_dir + "/index.html", "w") as f:
f.write("<html><body style='margin: 0px; font-size: 16px; font-family: \"Myriad Pro\", Arial, sans-serif;'><center>")
f.write(prefix)
f.write(out)
f.write("</center></body></html>")
else:
return HTML(prefix + out) | PypiClean |
/TurboMail-3.0.3.tar.gz/TurboMail-3.0.3/turbomail/api.py | import logging
import warnings
from turbomail.control import interface
from turbomail.exceptions import ManagerException
__all__ = ['Extension', 'TransportFactory', 'Transport', 'Manager']
class Extension(object):
"""Basic extension API that allows for startup and shutdown hooks."""
def __init__(self):
super(Extension, self).__init__()
self.ready = False
def start(self):
self.ready = True
return True
def stop(self):
if not self.ready:
return False
self.ready = False
return True
class TransportFactory(Extension):
"""An extension that creates new Transport instances.
This is useful to perform configuration or startup tasks outside the Transport's initializer.
"""
transport = None
def __init__(self):
super(TransportFactory, self).__init__()
def new(self):
if not self.ready: return None
return self.transport()
class Transport(object):
"""Message delivery subsystem API.
A Transport can deliver messages towards their recipients with a specific
method, e.g. SMTP. They don't care about delivery strategies like queing or
batch submission."""
def __init__(self):
super(Transport, self).__init__()
def deliver(self, message):
raise NotImplementedError, "Transport plugin must override this method without inheritance."
def config_get(self, key, default=None, tm2_key=None):
"""Returns the value for the given key from the configuration. If the
value was not found, this method looks if old configuration option
(specified in tm2_key) is used. If tm2_key was ommitted, it tries to
calculate the old key from the new one by cutting out the 'smtp.' in the
middle. If an old configuration key is used, a DeprecationWarning is
issued.
As a final fallback, the default value (default None) is
returned."""
# We can not use 'key in interface.config' because TurboGears'
# configuration (ConfigObj) does not support this (it does provide any
# possibility to detect if a value is present or not.
value = interface.config.get(key, None)
if value == None:
if tm2_key != None and not tm2_key.startswith('mail.'):
tm2_key = 'mail.' + tm2_key
elif tm2_key == None:
tm2_key = key.replace('.smtp.', '.')
value = interface.config.get(tm2_key, None)
if value is not None:
basemsg = 'Configuration key "%s" is deprecated, please use "%s" instead'
warn_text = basemsg % (tm2_key, key)
warnings.warn(warn_text, category=DeprecationWarning)
if value == None:
value = default
return value
def stop(self):
"""Called by the manager before the transport instance is destroyed. The
transport can do some final cleanups (like releasing external resources)
here."""
pass
class Manager(Extension):
"""Manager instances orchestrate the delivery of messages."""
def __init__(self):
super(Manager, self).__init__()
self.transport_factory = None
def get_new_transport(self):
if self.transport_factory == None:
self.transport_factory = interface.transport
transport = self.transport_factory.new()
if transport is None:
raise ManagerException('Unable to allocate new transport.')
return transport
def deliver(self, message):
return self.ready | PypiClean |
/pyDHL-0.3.tar.gz/pyDHL-0.3/README.md | pyDHL
===
Python module to work with DHL REST Webservice integration.
## pyDHL as module
```
import pyDHL
```
By running it as a module pyDHL will expose the following resources.
### Package
Inputs:
* `weight` (Mandatory): Package weight
* `length` (Mandatory): Package length
* `width` (Mandatory): Package width
* `height` (Mandatory): Package height
* `price` (Optional): Package price
* `description` (Optional): Package description
* `reference` (Optional): Package reference
```
from pyDHL import Package
package = Package(
weight=<package_weight>,
length=<package_length>,
width=<package_width>,
height=<package_height>
)]
```
### Person
A person is a combination of a Contact and Address information. This class is used for both sender and recipient of the the package.
> Rate request just needs shipment's Address.
#### Address
Inputs:
* `street_lines` (Mandatory): Person's address first line
* `city` (Mandatory): Person's city
* `postal_code` (Mandatory): Person's postal code
* `country` (Mandatory): Person's country, Must oblige to the DHL's country codes.
* `street_lines2` (Optional): Person's address second line. `'N/A'` by default.
* `street_lines3` (Optional): Person's address third line.
#### Contact
Inputs:
* `name` (Mandatory): Person's name
* `phone` (Mandatory): Person's phone
* `email` (Optional): Person's email. `'null'` by default
* `company` (Optional): Person's company. `name` by default
### Shipment
Mandatory inputs:
* `packages`: A list of Package
* `sender` and `recipient`: Persons (or Address in rate request)
In order to build a correct Shipment, please refer to the documentation. Some parameters are set by default and others such as `SERVICE_TYPE` are set following
a set of conditions and properties of the Shipment itself.
### Requests
All requests will have as input a valid Shipment object or a dict or dict-like structure.
In order to send requests to DHL Webservices you must first set credentials:
```
from pyDHL import requests
credentials = {
'account': # your account number
'username': # your username
'password': # your password
}
requests.set_credentials(credentials)
```
Optionally it is possible to set the 'sandbox' environment for testing purposes.
```
requests.set_sandbox([True|False]) # use DHL's sandbox endpoints
```
The result of every request is either the JSON response of the DHL endpoint or, if the requests was wrong, a JSON-like object if twith `error` and `message` keys describing the error.
Every requests will update the shipment object given by input if the request was successful.
#### Rate Request
Rate Request will return DHL’s product capabilities (products, services and estimated delivery time) and prices (where applicable) for a certain set of input data.
* Input: Shipment
* Output: JSON response. DHL Rate Request
```
from pyDHL.requests import rate
# create a valid shipment
response = rate(shipment)
```
#### Shipment Request
The key elements in the response of the Shipment Request will be a base64 encoded PDF label and the Shipment and Piece identification numbers, which you can use for tracking on the DHL web site.
* Input: Shipment
* Output: JSON response. DHL Shipment Request
```
from pyDHL.requests import shipment
# create a valid shipment
response = shipment(shipment)
```
#### Update Request
The updateShipment request allows for additional pieces to be added to a previously created shipment that has not been picked up by DHL Express/had a scan against it.
* Input: Shipment
* Output: JSON response. DHL Update Request
```
from pyDHL.requests import update
# create a valid shipment
response = update(shipment)
```
#### Tracking Request
The resulting response will provide tracking events at both the Shipment and/or Piece events corresponding to the DHL Waybill(s) submitted.
* Input: Shipment
* Output: JSON response. DHL Tracking Request
```
from pyDHL.requests import tracking
# create a valid shipment
response = tracking(shipment.id, level=[TRACKING_LAST|TRACKING_ALL])
```
## pyDHL's Command Line interface
Use pyDHL as a command line program to set up a quick shipment
```
pyDHL <option> <shipment_file> [mode]
```
### Options
Options flag gets mapped with each of the available requests:
* **-r RATE, --rate RATE**: Rate Request will return DHL’s product capabilities (products, services and estimated delivery time) and prices (where applicable) for a certain set of input data.
* **-s SHIPMENT, --shipment SHIPMENT**: The key elements in the response of the Shipment Request will be a base64 encoded PDF label and the Shipment and Piece identification numbers, which you can use for tracking on the DHL web site.
* **-u UPDATE, --update UPDATE**: The updateShipment request allows for additional pieces to be added to a previously created shipment that has not been picked up by DHL Express/had a scan against it.
* **-t TRACK, --track TRACK**: The resulting response will provide tracking events at both the Shipment and/or Piece events corresponding to the submitted DHL Waybill (Shipment id).
* **-p PICKUP, --pickup PICKUP**: The requestPickup request allows users to request standalone pickup requests for local and remote/import pickups.
* **-o OUTPUT, --output OUTPUT**: Output File
* **--sandbox**: Set sandbox mode
### Shipment File
Following every option there is a shipment file. This is a file that contains all necessary data to build and send requests to DHL. There is an example in `shipment.json`.
### Mode
By default `pyDHL` will use DHL endpoints that will execute real shipments
and other actions. For testing purposes and development there is a "sandbox" mode
that can be activated by adding `-snd|--sandbox` at the end of the command
## Testing
In order to a successful testing, please edit `tests/config.py` file with valid
DHL credentials for accessing HTTPS endpoints.
pyDHL uses pytest and coverage to execute tests and to check if everything was correctly executed.
```
coverage run -m pytest
```
Tests are separated between different kinds of shipments available:
* National: Shipments with origin and source in the same country.
* EU: Shipments coming from an country in Europe and with destination to a country in Europe.
* International: None of the cases above.
| PypiClean |
/aspose-words-cloud-23.7.0.tar.gz/aspose-words-cloud-23.7.0/asposewordscloud/models/requests/update_comment_online_request.py | import json
from six.moves.urllib.parse import quote
from asposewordscloud import *
from asposewordscloud.models import *
from asposewordscloud.models.requests import *
from asposewordscloud.models.responses import *
class UpdateCommentOnlineRequest(BaseRequestObject):
"""
Request model for update_comment_online operation.
Initializes a new instance.
:param document The document.
:param comment_index The index of the comment.
:param comment Comment data.
:param load_encoding Encoding that will be used to load an HTML (or TXT) document if the encoding is not specified in HTML.
:param password Password of protected Word document. Use the parameter to pass a password via SDK. SDK encrypts it automatically. We don't recommend to use the parameter to pass a plain password for direct call of API.
:param encrypted_password Password of protected Word document. Use the parameter to pass an encrypted password for direct calls of API. See SDK code for encyption details.
:param dest_file_name Result path of the document after the operation. If this parameter is omitted then result of the operation will be saved as the source document.
:param revision_author Initials of the author to use for revisions.If you set this parameter and then make some changes to the document programmatically, save the document and later open the document in MS Word you will see these changes as revisions.
:param revision_date_time The date and time to use for revisions.
"""
def __init__(self, document, comment_index, comment, load_encoding=None, password=None, encrypted_password=None, dest_file_name=None, revision_author=None, revision_date_time=None):
self.document = document
self.comment_index = comment_index
self.comment = comment
self.load_encoding = load_encoding
self.password = password
self.encrypted_password = encrypted_password
self.dest_file_name = dest_file_name
self.revision_author = revision_author
self.revision_date_time = revision_date_time
def create_http_request(self, api_client):
# verify the required parameter 'document' is set
if self.document is None:
raise ValueError("Missing the required parameter `document` when calling `update_comment_online`") # noqa: E501
# verify the required parameter 'comment_index' is set
if self.comment_index is None:
raise ValueError("Missing the required parameter `comment_index` when calling `update_comment_online`") # noqa: E501
# verify the required parameter 'comment' is set
if self.comment is None:
raise ValueError("Missing the required parameter `comment` when calling `update_comment_online`") # noqa: E501
path = '/v4.0/words/online/put/comments/{commentIndex}'
path_params = {}
if self.comment_index is not None:
path_params['commentIndex'] = self.comment_index # noqa: E501
else:
path_params['commentIndex'] = '' # noqa: E501
# path parameters
collection_formats = {}
if path_params:
path_params = api_client.sanitize_for_serialization(path_params)
path_params = api_client.parameters_to_tuples(path_params, collection_formats)
for k, v in path_params:
# specified safe chars, encode everything
path = path.replace(
'{%s}' % k,
quote(str(v), safe=api_client.configuration.safe_chars_for_path_param)
)
# remove optional path parameters
path = path.replace('//', '/')
query_params = []
if self.load_encoding is not None:
query_params.append(('loadEncoding', self.load_encoding)) # noqa: E501
if self.password is not None:
query_params.append(('password', self.password)) # noqa: E501
if self.encrypted_password is not None:
query_params.append(('encryptedPassword', self.encrypted_password)) # noqa: E501
if self.dest_file_name is not None:
query_params.append(('destFileName', self.dest_file_name)) # noqa: E501
if self.revision_author is not None:
query_params.append(('revisionAuthor', self.revision_author)) # noqa: E501
if self.revision_date_time is not None:
query_params.append(('revisionDateTime', self.revision_date_time)) # noqa: E501
header_params = {}
# HTTP header `Content-Type`
header_params['Content-Type'] = api_client.select_header_content_type( # noqa: E501
['multipart/form-data']) # noqa: E501
file_content_params = []
form_params = []
if self.document is not None:
form_params.append(['document', self.document, 'file']) # noqa: E501
if self.comment is not None:
form_params.append(['comment', self.comment, 'json']) # noqa: E501
for file_content_value in file_content_params:
form_params.append([file_content_value.reference, file_content_value.content, 'file']) # noqa: E501
return {
"method": "PUT",
"path": path,
"body": None,
"query_params": query_params,
"header_params": header_params,
"form_params": form_params,
"collection_formats": collection_formats,
"response_type": 'UpdateCommentOnlineResponse' # noqa: E501
}
def get_response_type(self):
return 'UpdateCommentOnlineResponse' # noqa: E501
def deserialize_response(self, api_client, response):
multipart = self.getparts(response)
return UpdateCommentOnlineResponse(
api_client.deserialize(api_client.findMultipartByName(multipart, "Model").content, api_client.findMultipartByName(multipart, "Model").headers, CommentResponse),
api_client.deserialize_files_collection(api_client.findMultipartByName(multipart, "Document").content, api_client.findMultipartByName(multipart, "Document").headers)) | PypiClean |
/PDFSegmenter-0.1.tar.gz/PDFSegmenter-0.1/README.rst |
This library builds a graph-representation of the content of PDFs. The graph is then clustered, resulting page segments are classified and returned. Tables are retrieved formatted in a CSV-style.
How-to
========
* Pass the path of the PDF file (as a string) which is wanted to be converted to ``PDFSegmenter``.
* Call the function ``segment_document()``.
* The function ``get_labeled_graphs()`` returns page-wise document graph representations as a list of ``networkx`` graphs. The labels indicate a clustering assignment.
* ``segments2json()`` returns a JSON representation of the segmented document.
* ``segments2text()`` returns a textual representation of the segmented document. This can be either annotated (lists, text and tables are supported) or not and controlled via the boolean parameter ``annotate``.
* Media boxes of a PDF can be accessed using ``get_media_boxes()``, the page count over ``get_page_count()``.
Example call:
segmenter = PDFSegmenter(pdf)
segmenter.segment\_document()
result = segmenter.segments2json()
text = segmenter.segments2text()
graphs = get\_labeled\_graphs()
A file is the only parameter mandatory for the page segmentation.
Beside the graph conversion, media boxes of a document can be accessed using ``get_media_boxes()`` and the page count over ``get_page_count()``.
JSON
=======
tbd
Annotated text
=================
tbd
Settings
==========
Clustering
=============
tbd
Merging
==========
tbd
Classification
=================
tbd
Graph
========
General parameters:
* ``file``: file name
* ``merge_boxes``: indicating if PDF text boxes should be graph nodes, based on visual rectangles present in documents.
* ``regress_parameters``: indicating if graph parameters are regressed or used as a priori optimized default ones.
Edge restrictions:
* ``use_font``: differing font size
* ``use_width``: differing width
* ``use_rect``: nodes contained in differing visual structures
* ``use_horizontal_overlap``: indicating if horizontal edges should be built on overlap. If not, default deltas are used.
* ``use_vertical_overlap``: indicating if vertical edges should be built on overlap. If not, default deltas are used.
Edge thresholds:
* ``page_ratio_x``: maximal relative horizontal distance of two nodes where an edge can be created
* ``page_ratio_y``: maximal relative vertical distance of two nodes where an edge can be created
* ``x_eps``: alignment epsilon for vertical edges in points if ``use_horizontal_overlap`` is not enabled
* ``y_eps``: alignment epsilon for horizontal edges in points if ``use_vertical_overlap`` is not enabled
* ``font_eps_h``: indicates how much font sizes of nodes are allowed to differ as a constraint for building horizontal edges when ``use_font`` is enabled
* ``font_eps_v``: indicates how much font sizes of nodes are allowed to differ as a constraint for building vertical edges when ``use_font`` is enabled
* ``width_pct_eps``: relative width difference of nodes as a condition for vertical edges if ``use_width`` is enabled
* ``width_page_eps``: indicating at which maximal width of a node the width should act as an edge condition if ``use_width`` is enabled
Project Structure
===================
tbd
Output Format
===============
JSON
=======
tbd
Text
=======
tbd
Graph
========
As a result, a list of ``networkx`` graphs is returned.
Each graph encapsulates a structured representation of a single page.
Edges are attributed with the following features:
* ``direction``: shows the direction of an edge.
\* ``v``: Vertical edge
\* ``h``: Horizontal edge
\* ``l``: Rectangular loop. This represents a novel concept encapsulating structural characteristics of document segments by observing if two different paths end up in the same node.
* ``length``: Scaled length of an edge
* ``lengthx_phys``: Horizontal edge length
* ``lengthy_phys``: Vertical edge length
* ``weight``: Scaled total length
All nodes contain the following content attributes:
* ``id``: unique identifier of the PDF element
* ``page``: page number, starting with 0
* ``text``: text of the PDF element
* ``x_0``: left x coordinate
* ``x_1``: right x coordinate
* ``y_0``: top y coordinate
* ``y_1``: bottom y coordinate
* ``pos_x``: center x coordinate
* ``pos_y``: center y coordinate
* ``abs_pos``: tuple containing a page independent representation of ``(pos_x,pos_y)`` coordinates
* ``original_font``: font as extracted by pdfminer
* ``font_name``: name of the font extracted from ``original_font``
* ``code``: font code as provided by pdfminer
* ``bold``: factor 1 indicating that a text is bold and 0 otherwise
* ``italic``: factor 1 indicating that a text is italic and 0 otherwise
* ``font_size``: size of the text in points
* ``masked``: text with numeric content substituted as #
* ``frequency_hist``: histogram of character type frequencies in a text, stored as a tuple containing percentages of textual, numerical, text symbolic and other symbols
* ``len_text``: number of characters
* ``n_tokens``: number of words
* ``tag``: tag for key-value pair extractions, indicating keys or values based on simple heuristics
* ``box``: box extracted by pdfminer Layout Analysis
* ``in_element_ids``: contains IDs of surrounding visual elements such as rectangles or lists. They are stored as a list [left, right, top, bottom]. -1 is indicating that there is no adjacent visual element.
* ``in_element``: indicates based on in_element_ids whether an element is stored in a visual rectangle representation (stored as "rectangle") or not (stored as "none").
* ``is_loop``: indicates whether or not a node is connected via a rectangular loop
The media boxes possess the following entries in a dictionary:
* ``x0``: Left x page crop box coordinate
* ``x1``: Right x page crop box coordinate
* ``y0``: Top y page crop box coordinate
* ``y1``: Bottom y page crop box coordinate
* ``x0page``: Left x page coordinate
* ``x1page``: Right x page coordinate
* ``y0page``: Top y page coordinate
* ``y1page``: Bottom y page coordinate
Acknowledgements
==================
* Example PDFs are obtained from the ICDAR Table Recognition Challenge 2013 https://roundtrippdf.com/en/data-extraction/pdf-table-recognition-dataset/.
Authors
=========
* Michael Benedikt Aigner
* Florian Preis
| PypiClean |
/global-workqueue-2.2.4rc2.tar.gz/global-workqueue-2.2.4rc2/src/python/WMCore/WMFactory.py | from builtins import object
import threading
class WMFactory(object):
"""
A factory Class that is 'not thread safe' but is intended to work in
threads (no sharing). The class dynamically loads objects from files
when needed and caches them.
"""
def __init__(self, name, namespace=''):
"""
Initializes the factory, and checks if this thread already
has an attribute for storing registries. It uses the reserved
'registries' attribute in the thread.
"""
self.namespace = namespace
self.objectList = {}
myThread = threading.currentThread()
if not hasattr(myThread, "factory"):
myThread.factory = {}
myThread.factory[name] = self
def loadObject(self, classname, args=None, storeInCache=True,
getFromCache=True, listFlag=False, alteredClassName=None):
"""
Dynamically loads the object from file.
For this to work the class name has to
be the same as the file name (minus the .py)
Be default objects are loaded from cache. However if you
want several different instances of the same object in one
thread, you set cache to False.
"""
if getFromCache:
if classname in self.objectList:
return self.objectList[classname]
if self.namespace == '':
module = classname
# FIXME: hoky way of doing this! Change this please!
errModule = classname
else:
module = "%s.%s" % (self.namespace, classname)
errModule = "%s.%s" % (self.namespace, classname)
if alteredClassName:
classname = alteredClassName
module = __import__(module, globals(), locals(), [classname])
obj = getattr(module, classname.split('.')[-1])
if args is None:
classinstance = obj()
else:
# This handles the passing of list-style arguments instead of dicts
# Primarily for setting the schema
# Or anywhere you need arguments of the form (a,b,c,...)
if isinstance(args, list) and listFlag:
classinstance = obj(*args)
elif isinstance(args, dict):
classinstance = obj(**args)
else:
# But if you actually need to pass a list, better do it the old fashioned way
classinstance = obj(args)
if storeInCache:
self.objectList[classname] = classinstance
return classinstance | PypiClean |
/pulumi_azure_nextgen-0.6.2a1613157620.tar.gz/pulumi_azure_nextgen-0.6.2a1613157620/pulumi_azure_nextgen/network/latest/virtual_network_tap.py |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['VirtualNetworkTap']
warnings.warn("""The 'latest' version is deprecated. Please migrate to the resource in the top-level module: 'azure-nextgen:network:VirtualNetworkTap'.""", DeprecationWarning)
class VirtualNetworkTap(pulumi.CustomResource):
warnings.warn("""The 'latest' version is deprecated. Please migrate to the resource in the top-level module: 'azure-nextgen:network:VirtualNetworkTap'.""", DeprecationWarning)
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
destination_load_balancer_front_end_ip_configuration: Optional[pulumi.Input[pulumi.InputType['FrontendIPConfigurationArgs']]] = None,
destination_network_interface_ip_configuration: Optional[pulumi.Input[pulumi.InputType['NetworkInterfaceIPConfigurationArgs']]] = None,
destination_port: Optional[pulumi.Input[int]] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tap_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Virtual Network Tap resource.
Latest API Version: 2020-08-01.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['FrontendIPConfigurationArgs']] destination_load_balancer_front_end_ip_configuration: The reference to the private IP address on the internal Load Balancer that will receive the tap.
:param pulumi.Input[pulumi.InputType['NetworkInterfaceIPConfigurationArgs']] destination_network_interface_ip_configuration: The reference to the private IP Address of the collector nic that will receive the tap.
:param pulumi.Input[int] destination_port: The VXLAN destination port that will receive the tapped traffic.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[str] tap_name: The name of the virtual network tap.
"""
pulumi.log.warn("VirtualNetworkTap is deprecated: The 'latest' version is deprecated. Please migrate to the resource in the top-level module: 'azure-nextgen:network:VirtualNetworkTap'.")
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['destination_load_balancer_front_end_ip_configuration'] = destination_load_balancer_front_end_ip_configuration
__props__['destination_network_interface_ip_configuration'] = destination_network_interface_ip_configuration
__props__['destination_port'] = destination_port
__props__['id'] = id
__props__['location'] = location
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['tags'] = tags
if tap_name is None and not opts.urn:
raise TypeError("Missing required property 'tap_name'")
__props__['tap_name'] = tap_name
__props__['etag'] = None
__props__['name'] = None
__props__['network_interface_tap_configurations'] = None
__props__['provisioning_state'] = None
__props__['resource_guid'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network:VirtualNetworkTap"), pulumi.Alias(type_="azure-nextgen:network/v20180801:VirtualNetworkTap"), pulumi.Alias(type_="azure-nextgen:network/v20181001:VirtualNetworkTap"), pulumi.Alias(type_="azure-nextgen:network/v20181101:VirtualNetworkTap"), pulumi.Alias(type_="azure-nextgen:network/v20181201:VirtualNetworkTap"), pulumi.Alias(type_="azure-nextgen:network/v20190201:VirtualNetworkTap"), pulumi.Alias(type_="azure-nextgen:network/v20190401:VirtualNetworkTap"), pulumi.Alias(type_="azure-nextgen:network/v20190601:VirtualNetworkTap"), pulumi.Alias(type_="azure-nextgen:network/v20190701:VirtualNetworkTap"), pulumi.Alias(type_="azure-nextgen:network/v20190801:VirtualNetworkTap"), pulumi.Alias(type_="azure-nextgen:network/v20190901:VirtualNetworkTap"), pulumi.Alias(type_="azure-nextgen:network/v20191101:VirtualNetworkTap"), pulumi.Alias(type_="azure-nextgen:network/v20191201:VirtualNetworkTap"), pulumi.Alias(type_="azure-nextgen:network/v20200301:VirtualNetworkTap"), pulumi.Alias(type_="azure-nextgen:network/v20200401:VirtualNetworkTap"), pulumi.Alias(type_="azure-nextgen:network/v20200501:VirtualNetworkTap"), pulumi.Alias(type_="azure-nextgen:network/v20200601:VirtualNetworkTap"), pulumi.Alias(type_="azure-nextgen:network/v20200701:VirtualNetworkTap"), pulumi.Alias(type_="azure-nextgen:network/v20200801:VirtualNetworkTap")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(VirtualNetworkTap, __self__).__init__(
'azure-nextgen:network/latest:VirtualNetworkTap',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'VirtualNetworkTap':
"""
Get an existing VirtualNetworkTap resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return VirtualNetworkTap(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="destinationLoadBalancerFrontEndIPConfiguration")
def destination_load_balancer_front_end_ip_configuration(self) -> pulumi.Output[Optional['outputs.FrontendIPConfigurationResponse']]:
"""
The reference to the private IP address on the internal Load Balancer that will receive the tap.
"""
return pulumi.get(self, "destination_load_balancer_front_end_ip_configuration")
@property
@pulumi.getter(name="destinationNetworkInterfaceIPConfiguration")
def destination_network_interface_ip_configuration(self) -> pulumi.Output[Optional['outputs.NetworkInterfaceIPConfigurationResponse']]:
"""
The reference to the private IP Address of the collector nic that will receive the tap.
"""
return pulumi.get(self, "destination_network_interface_ip_configuration")
@property
@pulumi.getter(name="destinationPort")
def destination_port(self) -> pulumi.Output[Optional[int]]:
"""
The VXLAN destination port that will receive the tapped traffic.
"""
return pulumi.get(self, "destination_port")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkInterfaceTapConfigurations")
def network_interface_tap_configurations(self) -> pulumi.Output[Sequence['outputs.NetworkInterfaceTapConfigurationResponse']]:
"""
Specifies the list of resource IDs for the network interface IP configuration that needs to be tapped.
"""
return pulumi.get(self, "network_interface_tap_configurations")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the virtual network tap resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> pulumi.Output[str]:
"""
The resource GUID property of the virtual network tap resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop | PypiClean |
/howso_engine-1.0.111-py3-none-any.whl/howso/community/client.py | from concurrent.futures import ThreadPoolExecutor, Future
from contextlib import contextmanager
import json
import logging
from typing import Union
from packaging.version import parse as parse_version
import certifi
from howso.direct import HowsoDirectClient
from howso.community import __version__ as community_version
import urllib3
from urllib3.util import Retry, Timeout
logger = logging.getLogger(__name__)
# If True, the version has already been checked for this process.
VERSION_HOST = "https://version-check.diveplane.com"
_VERSION_CHECKED = False
@contextmanager
def squelch_logs(log_level: int):
"""A context manager to temporarily disable logs."""
_old_level = logging.root.manager.disable
logging.disable(log_level)
try:
yield
finally:
logging.disable(_old_level)
class HowsoEngineClient(HowsoDirectClient):
"""
Creates a distinct HowsoDirectClient for howso-engine.
Parameters
----------
verbose : bool, default False
Set verbose output.
debug: bool, default False
Sets logger debug output.
"""
def __init__(self, verbose=False, debug=False, **kwargs):
"""
Creates a HowsoClient which executes via a direct interface using dynamic libraries.
Parameters
----------
verbose : bool, default False
Set verbose output.
debug: bool, default False
Sets logger debug output.
"""
global _VERSION_CHECKED
with ThreadPoolExecutor(max_workers=1) as executor:
if kwargs.pop("check_version", True) and not _VERSION_CHECKED:
_VERSION_CHECKED = True
self.version_check_task = executor.submit(self.check_version)
self.version_check_task.add_done_callback(self.report_version)
super().__init__(verbose=verbose, debug=debug, **kwargs)
def check_version(self) -> Union[str, None]:
"""Check if there is a more recent version."""
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED',
ca_certs=certifi.where(),
retries=Retry(total=1),
timeout=Timeout(total=3),
maxsize=10)
url = f"{VERSION_HOST}/v1/how-reactor-community?version={community_version}"
with squelch_logs(logging.WARNING + 1):
response = http.request(method="GET", url=url)
if 200 <= response.status < 300:
payload = json.loads(response.data.decode('utf-8'))
return payload.get('version')
raise AssertionError("Not OK response.")
def report_version(self, task: Future):
try:
latest_version = task.result()
except Exception:
pass
else:
if latest_version and latest_version != community_version:
if parse_version(latest_version) > parse_version(community_version):
logger.warning(
f"Version {latest_version} of Howso® Engine is "
f"available. You are using version {community_version}.")
elif parse_version(latest_version) < parse_version(community_version):
logger.debug(
f"Version {latest_version} of Howso® Engine is "
f"available. You are using version {community_version}. "
f"This is a pre-release version.") | PypiClean |
/networks/feature_extraction_model.py | import logging
import os
import random
import zipfile
from glob import glob
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import torch
import torch.nn as nn
from tqdm import tqdm
from ..utils.im import IM_EXTENSIONS, ResizeDataset, denormalise_tensors
from ..utils.settings import ExtractionSettings, NetworkSettings
from ..utils.stats import histogram_per_channel
def get_min_max_features(acc_feats, feats, layer):
with torch.no_grad():
# Keep only min and max values in 2 tensor channels of the last dimension. Min and max values are taken over the batch
if layer in acc_feats:
# Get min and max over batch
b_min_max = torch.stack(
(
torch.min(feats[layer], dim=0, keepdim=True).values,
torch.max(feats[layer], dim=0, keepdim=True).values,
),
dim=-1,
)
# Keep min max between previous data and current batch
acc_feats[layer] = torch.stack(
(
torch.min(acc_feats[layer][..., 0], b_min_max[..., 0]),
torch.max(acc_feats[layer][..., 1], b_min_max[..., 1]),
),
dim=-1,
)
else:
# If first batch, then just keep the histogram
acc_feats[layer] = torch.stack(
(
torch.min(feats[layer], dim=0, keepdim=True).values,
torch.max(feats[layer], dim=0, keepdim=True).values,
),
dim=-1,
)
return acc_feats
def get_pre_hist_norm_params_from_min_max(
min_max_per_neuron: Dict[str, Dict[str, Dict[int, float]]], range_scale: float, device: str = "cpu"
) -> Tuple[Dict[str, torch.Tensor], Dict[str, torch.Tensor]]:
"""Get the parameters for normalisation of the histogram of the neuron activations.
Args:
min_max_per_neuron (dict): Dictionary with the min and max activation for each neuron.
range_scale (float): Scaling to apply to the range boundaries before finding normalisation parameters.
Returns:
dict: Dictionary with the mean and std tensors to apply to each layer
"""
norm_means_per_layer = {}
norm_stds_per_layer = {}
for layer in min_max_per_neuron["mins_per_neuron"]:
min_activations = []
max_activations = []
for neuron in min_max_per_neuron["mins_per_neuron"][layer]:
min_activations.append(min_max_per_neuron["mins_per_neuron"][layer][neuron] * range_scale)
max_activations.append(min_max_per_neuron["maxs_per_neuron"][layer][neuron] * range_scale)
min_activations = torch.DoubleTensor(min_activations)
max_activations = torch.DoubleTensor(max_activations)
mean = (min_activations + max_activations) / 2
std = (max_activations - min_activations) / 2 + 1e-8
norm_means_per_layer[layer] = mean.reshape(1, -1, 1, 1).to(device)
norm_stds_per_layer[layer] = std.reshape(1, -1, 1, 1).to(device)
return norm_means_per_layer, norm_stds_per_layer
class FeatureExtractionModel(nn.Module):
def __init__(
self,
network_settings: NetworkSettings = NetworkSettings(),
extraction_settings: ExtractionSettings = ExtractionSettings(),
activation_ranges_per_neuron={"mins_per_neuron": {}, "maxs_per_neuron": {}},
):
super(FeatureExtractionModel, self).__init__()
self.network_settings = network_settings
self.extraction_settings = extraction_settings
self.norm_means_per_layer, self.norm_stds_per_layer = get_pre_hist_norm_params_from_min_max(
activation_ranges_per_neuron,
self.extraction_settings.range_scale_for_norm_params,
device=self.extraction_settings.device,
)
self.name = "not_set"
def get_features(self, batch):
raise NotImplementedError
"""
Compute the features for a batch of images. Should return a dict with features at each layer.
"""
def get_batch_features(self, batch, device):
return self.get_features(batch.to(device))
def get_dataloader(self, data_settings):
# Check if data_settings.source is a list of np arrays
if isinstance(data_settings.source, List) and isinstance(data_settings.source[0], np.ndarray):
images = data_settings.source
l_files = None
else:
images = None
l_files = self.get_files_list(data_settings)
dataset = ResizeDataset(
l_files,
images,
crop_to_square_pre_resize=data_settings.crop_to_square_pre_resize,
size=self.network_settings.expected_size,
resize_mode=data_settings.resize_mode,
norm_mean=self.network_settings.norm_mean,
norm_std=self.network_settings.norm_std,
)
if data_settings.custom_np_image_tranform is not None:
dataset.custom_np_image_tranform = data_settings.custom_np_image_tranform
if data_settings.custom_pil_image_tranform is not None:
dataset.custom_pil_image_tranform = data_settings.custom_pil_image_tranform
if data_settings.custom_fn_resize is not None:
dataset.fn_resize = data_settings.custom_fn_resize
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=self.extraction_settings.batch_size,
shuffle=False,
drop_last=False,
num_workers=self.extraction_settings.num_workers,
)
return dataloader
def get_data_features(self, data_settings):
dataloader = self.get_dataloader(data_settings)
# wrap the images in a dataloader for parallelizing the resize operation
if (
not self.extraction_settings.average_feats_spatially
and not self.extraction_settings.accumulate_spatial_feats_in_hist
and not self.extraction_settings.keep_only_min_max
):
logging.warning(
"Will try to accumulate all features from dataset with no spatial reduction, expect huge RAM usage if using many images!"
)
device = torch.device(self.extraction_settings.device)
# collect all features
acc_feats = {}
if self.extraction_settings.verbose:
pbar = tqdm(dataloader, desc=self.extraction_settings.description)
else:
pbar = dataloader
for batch in pbar:
with torch.no_grad():
feats = self.get_batch_features(batch, device)
if self.extraction_settings.average_feats_spatially:
for layer in feats:
feats[layer] = torch.mean(feats[layer], dim=(2, 3), keepdim=True)
if self.extraction_settings.normalise_feats:
for layer in feats:
feats[layer] = (feats[layer] - self.norm_means_per_layer[layer]) / self.norm_stds_per_layer[layer]
if (
self.extraction_settings.accumulate_spatial_feats_in_hist
or self.extraction_settings.accumulate_sample_feats_in_hist
):
for layer in feats:
feats[layer] = histogram_per_channel(
feats[layer],
hist_nb_bins=self.extraction_settings.hist_nb_bins,
hist_range=self.extraction_settings.hist_range,
)
for layer in feats:
if (
not self.extraction_settings.accumulate_sample_feats_in_hist
and not self.extraction_settings.keep_only_min_max
):
# Keep all features for each batch in a list
acc_feats[layer] = acc_feats.get(layer, []) + [feats[layer]]
elif self.extraction_settings.keep_only_min_max:
# Keep only min and max features over all samples
acc_feats = get_min_max_features(acc_feats, feats, layer)
else:
# Accumulate histograms
if layer in acc_feats:
# Sum the histograms over all samples
acc_feats[layer] += feats[layer]
else:
# If first batch, then just keep the histogram
acc_feats[layer] = feats[layer]
if (
not self.extraction_settings.accumulate_sample_feats_in_hist
and not self.extraction_settings.keep_only_min_max
):
acc_feats = {layer: torch.cat(acc_feats[layer]) for layer in acc_feats}
dataset = dataloader.dataset
n_sample_ims = min(len(dataset), self.extraction_settings.n_sample_images)
sample_images = [dataset[i] for i in random.sample(range(len(dataset)), n_sample_ims)]
sample_images = denormalise_tensors(
sample_images, self.network_settings.norm_mean, self.network_settings.norm_std
)
return acc_feats, len(dataset), sample_images
def get_files_list(self, data_settings):
# get all relevant files in the dataset
if isinstance(data_settings.source, List):
# Check that all files exists and are images using pathlib
for file in data_settings.source:
assert Path(file).is_file(), f"File {file} does not exist"
assert file.split(".")[-1] in IM_EXTENSIONS, f"File {file} is not an image"
files = data_settings.source
elif data_settings.source.split(".")[-1] in IM_EXTENSIONS:
files = [data_settings.source]
elif data_settings.source.split(".")[-1] == "txt":
files = []
# Read the text file
with open(data_settings.source, "r") as f:
for line in f:
files.append(line.strip())
# If path is relative, make it absolute using the directory of the text file
if not os.path.isabs(files[-1]):
files[-1] = os.path.join(os.path.abspath(os.path.dirname(data_settings.source)), files[-1])
# Check if the file is an image
assert files[-1].split(".")[-1] in IM_EXTENSIONS, f"File {files[-1]} is not an image"
# Check if the file exists
assert os.path.exists(files[-1]), f"File {files[-1]} does not exist"
# Sort files
files = sorted(files)
elif ".zip" in data_settings.source:
files = list(set(zipfile.ZipFile(data_settings.source).namelist()))
# remove the non-image files inside the zip
files = [x for x in files if os.path.splitext(x)[1].lower()[1:] in IM_EXTENSIONS]
else:
files = sorted(
[
file
for ext in IM_EXTENSIONS
for file in glob(os.path.join(data_settings.source, f"**/*.{ext}"), recursive=True)
]
)
if self.extraction_settings.verbose:
print(f"Found {len(files)} images in the provided source")
# use a subset number of files if needed
if data_settings.num_images > 0 and data_settings.num_images < len(files):
if data_settings.shuffle_files:
random.seed(self.extraction_settings.seed)
random.shuffle(files)
files = files[: data_settings.num_images]
if self.extraction_settings.verbose:
print(f"Using {len(files)} images")
return files
def forward(self, x):
self.get_features(x) | PypiClean |
/Django_patch-2.2.19-py3-none-any.whl/django/contrib/gis/forms/widgets.py | import logging
from django.conf import settings
from django.contrib.gis import gdal
from django.contrib.gis.geometry import json_regex
from django.contrib.gis.geos import GEOSException, GEOSGeometry
from django.forms.widgets import Widget
from django.utils import translation
logger = logging.getLogger('django.contrib.gis')
class BaseGeometryWidget(Widget):
"""
The base class for rich geometry widgets.
Render a map using the WKT of the geometry.
"""
geom_type = 'GEOMETRY'
map_srid = 4326
map_width = 600
map_height = 400
display_raw = False
supports_3d = False
template_name = '' # set on subclasses
def __init__(self, attrs=None):
self.attrs = {}
for key in ('geom_type', 'map_srid', 'map_width', 'map_height', 'display_raw'):
self.attrs[key] = getattr(self, key)
if attrs:
self.attrs.update(attrs)
def serialize(self, value):
return value.wkt if value else ''
def deserialize(self, value):
try:
return GEOSGeometry(value)
except (GEOSException, ValueError, TypeError) as err:
logger.error("Error creating geometry from value '%s' (%s)", value, err)
return None
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
# If a string reaches here (via a validation error on another
# field) then just reconstruct the Geometry.
if value and isinstance(value, str):
value = self.deserialize(value)
if value:
# Check that srid of value and map match
if value.srid and value.srid != self.map_srid:
try:
ogr = value.ogr
ogr.transform(self.map_srid)
value = ogr
except gdal.GDALException as err:
logger.error(
"Error transforming geometry from srid '%s' to srid '%s' (%s)",
value.srid, self.map_srid, err
)
context.update(self.build_attrs(self.attrs, {
'name': name,
'module': 'geodjango_%s' % name.replace('-', '_'), # JS-safe
'serialized': self.serialize(value),
'geom_type': gdal.OGRGeomType(self.attrs['geom_type']),
'STATIC_URL': settings.STATIC_URL,
'LANGUAGE_BIDI': translation.get_language_bidi(),
**(attrs or {}),
}))
return context
class OpenLayersWidget(BaseGeometryWidget):
template_name = 'gis/openlayers.html'
map_srid = 3857
class Media:
css = {
'all': (
'https://cdnjs.cloudflare.com/ajax/libs/ol3/4.6.5/ol.css',
'gis/css/ol3.css',
)
}
js = (
'https://cdnjs.cloudflare.com/ajax/libs/ol3/4.6.5/ol.js',
'gis/js/OLMapWidget.js',
)
def serialize(self, value):
return value.json if value else ''
def deserialize(self, value):
geom = super().deserialize(value)
# GeoJSON assumes WGS84 (4326). Use the map's SRID instead.
if geom and json_regex.match(value) and self.map_srid != 4326:
geom.srid = self.map_srid
return geom
class OSMWidget(OpenLayersWidget):
"""
An OpenLayers/OpenStreetMap-based widget.
"""
template_name = 'gis/openlayers-osm.html'
default_lon = 5
default_lat = 47
default_zoom = 12
def __init__(self, attrs=None):
super().__init__()
for key in ('default_lon', 'default_lat', 'default_zoom'):
self.attrs[key] = getattr(self, key)
if attrs:
self.attrs.update(attrs) | PypiClean |
/zhaohuan-yuzhou-nengliangde-xingyun-zhenglishu-2022.10.7.1.tar.gz/zhaohuan-yuzhou-nengliangde-xingyun-zhenglishu-2022.10.7.1/ZhaohuanYuzhouNengliangdeXingyunZhenglishu/README.md | # 召唤宇宙能量的幸运整理术
## 下载
### Docker
```
docker pull apachecn0/zhaohuan-yuzhou-nengliangde-xingyun-zhenglishu
docker run -tid -p <port>:80 apachecn0/zhaohuan-yuzhou-nengliangde-xingyun-zhenglishu
# 访问 http://localhost:{port} 查看文档
```
### PYPI
```
pip install zhaohuan-yuzhou-nengliangde-xingyun-zhenglishu
zhaohuan-yuzhou-nengliangde-xingyun-zhenglishu <port>
# 访问 http://localhost:{port} 查看文档
```
### NPM
```
npm install -g zhaohuan-yuzhou-nengliangde-xingyun-zhenglishu
zhaohuan-yuzhou-nengliangde-xingyun-zhenglishu <port>
# 访问 http://localhost:{port} 查看文档
``` | PypiClean |
/tensorflow_intel-2.14.0rc0-cp39-cp39-win_amd64.whl/tensorflow/_api/v2/__internal__/distribute/combinations/__init__.py | import sys as _sys
from tensorflow.python.distribute.combinations import env
from tensorflow.python.distribute.combinations import generate
from tensorflow.python.distribute.combinations import in_main_process
from tensorflow.python.distribute.strategy_combinations import central_storage_strategy_with_gpu_and_cpu
from tensorflow.python.distribute.strategy_combinations import central_storage_strategy_with_two_gpus
from tensorflow.python.distribute.strategy_combinations import cloud_tpu_strategy
from tensorflow.python.distribute.strategy_combinations import default_strategy
from tensorflow.python.distribute.strategy_combinations import mirrored_strategy_with_cpu_1_and_2
from tensorflow.python.distribute.strategy_combinations import mirrored_strategy_with_gpu_and_cpu
from tensorflow.python.distribute.strategy_combinations import mirrored_strategy_with_one_cpu
from tensorflow.python.distribute.strategy_combinations import mirrored_strategy_with_one_gpu
from tensorflow.python.distribute.strategy_combinations import mirrored_strategy_with_two_cpus
from tensorflow.python.distribute.strategy_combinations import mirrored_strategy_with_two_gpus
from tensorflow.python.distribute.strategy_combinations import mirrored_strategy_with_two_gpus_no_merge_call
from tensorflow.python.distribute.strategy_combinations import multi_worker_mirrored_2x1_cpu
from tensorflow.python.distribute.strategy_combinations import multi_worker_mirrored_2x1_gpu
from tensorflow.python.distribute.strategy_combinations import multi_worker_mirrored_2x1_gpu_noshare
from tensorflow.python.distribute.strategy_combinations import multi_worker_mirrored_2x2_gpu
from tensorflow.python.distribute.strategy_combinations import multi_worker_mirrored_2x2_gpu_no_merge_call
from tensorflow.python.distribute.strategy_combinations import one_device_strategy
from tensorflow.python.distribute.strategy_combinations import one_device_strategy_gpu
from tensorflow.python.distribute.strategy_combinations import parameter_server_strategy_1worker_2ps_1gpu
from tensorflow.python.distribute.strategy_combinations import parameter_server_strategy_1worker_2ps_cpu
from tensorflow.python.distribute.strategy_combinations import parameter_server_strategy_3worker_2ps_1gpu
from tensorflow.python.distribute.strategy_combinations import parameter_server_strategy_3worker_2ps_cpu
from tensorflow.python.distribute.strategy_combinations import tpu_strategy
from tensorflow.python.distribute.strategy_combinations import tpu_strategy_one_core
from tensorflow.python.distribute.strategy_combinations import tpu_strategy_packed_var | PypiClean |
/tzcelery-3.1.27.tar.gz/tzcelery-3.1.27/docs/userguide/canvas.rst | .. _guide-canvas:
=============================
Canvas: Designing Workflows
=============================
.. contents::
:local:
:depth: 2
.. _canvas-subtasks:
.. _canvas-signatures:
Signatures
==========
.. versionadded:: 2.0
You just learned how to call a task using the tasks ``delay`` method
in the :ref:`calling <guide-calling>` guide, and this is often all you need,
but sometimes you may want to pass the signature of a task invocation to
another process or as an argument to another function.
A :func:`~celery.signature` wraps the arguments, keyword arguments, and execution options
of a single task invocation in a way such that it can be passed to functions
or even serialized and sent across the wire.
Signatures are often nicknamed "subtasks" because they describe a task to be called
within a task.
- You can create a signature for the ``add`` task using its name like this::
>>> from celery import signature
>>> signature('tasks.add', args=(2, 2), countdown=10)
tasks.add(2, 2)
This task has a signature of arity 2 (two arguments): ``(2, 2)``,
and sets the countdown execution option to 10.
- or you can create one using the task's ``subtask`` method::
>>> add.subtask((2, 2), countdown=10)
tasks.add(2, 2)
- There is also a shortcut using star arguments::
>>> add.s(2, 2)
tasks.add(2, 2)
- Keyword arguments are also supported::
>>> add.s(2, 2, debug=True)
tasks.add(2, 2, debug=True)
- From any signature instance you can inspect the different fields::
>>> s = add.subtask((2, 2), {'debug': True}, countdown=10)
>>> s.args
(2, 2)
>>> s.kwargs
{'debug': True}
>>> s.options
{'countdown': 10}
- It supports the "Calling API" which means it supports ``delay`` and
``apply_async`` or being called directly.
Calling the signature will execute the task inline in the current process::
>>> add(2, 2)
4
>>> add.s(2, 2)()
4
``delay`` is our beloved shortcut to ``apply_async`` taking star-arguments::
>>> result = add.delay(2, 2)
>>> result.get()
4
``apply_async`` takes the same arguments as the :meth:`Task.apply_async <@Task.apply_async>` method::
>>> add.apply_async(args, kwargs, **options)
>>> add.subtask(args, kwargs, **options).apply_async()
>>> add.apply_async((2, 2), countdown=1)
>>> add.subtask((2, 2), countdown=1).apply_async()
- You can't define options with :meth:`[email protected]`, but a chaining
``set`` call takes care of that::
>>> add.s(2, 2).set(countdown=1)
proj.tasks.add(2, 2)
Partials
--------
With a signature, you can execute the task in a worker::
>>> add.s(2, 2).delay()
>>> add.s(2, 2).apply_async(countdown=1)
Or you can call it directly in the current process::
>>> add.s(2, 2)()
4
Specifying additional args, kwargs or options to ``apply_async``/``delay``
creates partials:
- Any arguments added will be prepended to the args in the signature::
>>> partial = add.s(2) # incomplete signature
>>> partial.delay(4) # 2 + 4
>>> partial.apply_async((4,)) # same
- Any keyword arguments added will be merged with the kwargs in the signature,
with the new keyword arguments taking precedence::
>>> s = add.s(2, 2)
>>> s.delay(debug=True) # -> add(2, 2, debug=True)
>>> s.apply_async(kwargs={'debug': True}) # same
- Any options added will be merged with the options in the signature,
with the new options taking precedence::
>>> s = add.subtask((2, 2), countdown=10)
>>> s.apply_async(countdown=1) # countdown is now 1
You can also clone signatures to create derivates:
>>> s = add.s(2)
proj.tasks.add(2)
>>> s.clone(args=(4,), kwargs={'debug': True})
proj.tasks.add(4, 2, debug=True)
Immutability
------------
.. versionadded:: 3.0
Partials are meant to be used with callbacks, any tasks linked or chord
callbacks will be applied with the result of the parent task.
Sometimes you want to specify a callback that does not take
additional arguments, and in that case you can set the signature
to be immutable::
>>> add.apply_async((2, 2), link=reset_buffers.subtask(immutable=True))
The ``.si()`` shortcut can also be used to create immutable signatures::
>>> add.apply_async((2, 2), link=reset_buffers.si())
Only the execution options can be set when a signature is immutable,
so it's not possible to call the signature with partial args/kwargs.
.. note::
In this tutorial I sometimes use the prefix operator `~` to signatures.
You probably shouldn't use it in your production code, but it's a handy shortcut
when experimenting in the Python shell::
>>> ~sig
>>> # is the same as
>>> sig.delay().get()
.. _canvas-callbacks:
Callbacks
---------
.. versionadded:: 3.0
Callbacks can be added to any task using the ``link`` argument
to ``apply_async``::
add.apply_async((2, 2), link=other_task.s())
The callback will only be applied if the task exited successfully,
and it will be applied with the return value of the parent task as argument.
As I mentioned earlier, any arguments you add to a signature,
will be prepended to the arguments specified by the signature itself!
If you have the signature::
>>> sig = add.s(10)
then `sig.delay(result)` becomes::
>>> add.apply_async(args=(result, 10))
...
Now let's call our ``add`` task with a callback using partial
arguments::
>>> add.apply_async((2, 2), link=add.s(8))
As expected this will first launch one task calculating :math:`2 + 2`, then
another task calculating :math:`4 + 8`.
The Primitives
==============
.. versionadded:: 3.0
.. topic:: Overview
- ``group``
The group primitive is a signature that takes a list of tasks that should
be applied in parallel.
- ``chain``
The chain primitive lets us link together signatures so that one is called
after the other, essentially forming a *chain* of callbacks.
- ``chord``
A chord is just like a group but with a callback. A chord consists
of a header group and a body, where the body is a task that should execute
after all of the tasks in the header are complete.
- ``map``
The map primitive works like the built-in ``map`` function, but creates
a temporary task where a list of arguments is applied to the task.
E.g. ``task.map([1, 2])`` results in a single task
being called, applying the arguments in order to the task function so
that the result is::
res = [task(1), task(2)]
- ``starmap``
Works exactly like map except the arguments are applied as ``*args``.
For example ``add.starmap([(2, 2), (4, 4)])`` results in a single
task calling::
res = [add(2, 2), add(4, 4)]
- ``chunks``
Chunking splits a long list of arguments into parts, e.g the operation::
>>> items = zip(xrange(1000), xrange(1000)) # 1000 items
>>> add.chunks(items, 10)
will split the list of items into chunks of 10, resulting in 100
tasks (each processing 10 items in sequence).
The primitives are also signature objects themselves, so that they can be combined
in any number of ways to compose complex workflows.
Here's some examples:
- Simple chain
Here's a simple chain, the first task executes passing its return value
to the next task in the chain, and so on.
.. code-block:: python
>>> from celery import chain
# 2 + 2 + 4 + 8
>>> res = chain(add.s(2, 2), add.s(4), add.s(8))()
>>> res.get()
16
This can also be written using pipes::
>>> (add.s(2, 2) | add.s(4) | add.s(8))().get()
16
- Immutable signatures
Signatures can be partial so arguments can be
added to the existing arguments, but you may not always want that,
for example if you don't want the result of the previous task in a chain.
In that case you can mark the signature as immutable, so that the arguments
cannot be changed::
>>> add.subtask((2, 2), immutable=True)
There's also an ``.si`` shortcut for this::
>>> add.si(2, 2)
Now you can create a chain of independent tasks instead::
>>> res = (add.si(2, 2) | add.si(4, 4) | add.s(8, 8))()
>>> res.get()
16
>>> res.parent.get()
8
>>> res.parent.parent.get()
4
- Simple group
You can easily create a group of tasks to execute in parallel::
>>> from celery import group
>>> res = group(add.s(i, i) for i in xrange(10))()
>>> res.get(timeout=1)
[0, 2, 4, 6, 8, 10, 12, 14, 16, 18]
- Simple chord
The chord primitive enables us to add callback to be called when
all of the tasks in a group have finished executing, which is often
required for algorithms that aren't embarrassingly parallel::
>>> from celery import chord
>>> res = chord((add.s(i, i) for i in xrange(10)), xsum.s())()
>>> res.get()
90
The above example creates 10 task that all start in parallel,
and when all of them are complete the return values are combined
into a list and sent to the ``xsum`` task.
The body of a chord can also be immutable, so that the return value
of the group is not passed on to the callback::
>>> chord((import_contact.s(c) for c in contacts),
... notify_complete.si(import_id)).apply_async()
Note the use of ``.si`` above which creates an immutable signature.
- Blow your mind by combining
Chains can be partial too::
>>> c1 = (add.s(4) | mul.s(8))
# (16 + 4) * 8
>>> res = c1(16)
>>> res.get()
160
Which means that you can combine chains::
# ((4 + 16) * 2 + 4) * 8
>>> c2 = (add.s(4, 16) | mul.s(2) | (add.s(4) | mul.s(8)))
>>> res = c2()
>>> res.get()
352
Chaining a group together with another task will automatically
upgrade it to be a chord::
>>> c3 = (group(add.s(i, i) for i in xrange(10)) | xsum.s())
>>> res = c3()
>>> res.get()
90
Groups and chords accepts partial arguments too, so in a chain
the return value of the previous task is forwarded to all tasks in the group::
>>> new_user_workflow = (create_user.s() | group(
... import_contacts.s(),
... send_welcome_email.s()))
... new_user_workflow.delay(username='artv',
... first='Art',
... last='Vandelay',
... email='[email protected]')
If you don't want to forward arguments to the group then
you can make the signatures in the group immutable::
>>> res = (add.s(4, 4) | group(add.si(i, i) for i in xrange(10)))()
>>> res.get()
<GroupResult: de44df8c-821d-4c84-9a6a-44769c738f98 [
bc01831b-9486-4e51-b046-480d7c9b78de,
2650a1b8-32bf-4771-a645-b0a35dcc791b,
dcbee2a5-e92d-4b03-b6eb-7aec60fd30cf,
59f92e0a-23ea-41ce-9fad-8645a0e7759c,
26e1e707-eccf-4bf4-bbd8-1e1729c3cce3,
2d10a5f4-37f0-41b2-96ac-a973b1df024d,
e13d3bdb-7ae3-4101-81a4-6f17ee21df2d,
104b2be0-7b75-44eb-ac8e-f9220bdfa140,
c5c551a5-0386-4973-aa37-b65cbeb2624b,
83f72d71-4b71-428e-b604-6f16599a9f37]>
>>> res.parent.get()
8
.. _canvas-chain:
Chains
------
.. versionadded:: 3.0
Tasks can be linked together, which in practice means adding
a callback task::
>>> res = add.apply_async((2, 2), link=mul.s(16))
>>> res.get()
4
The linked task will be applied with the result of its parent
task as the first argument, which in the above case will result
in ``mul(4, 16)`` since the result is 4.
You can also add *error callbacks* using the ``link_error`` argument::
>>> add.apply_async((2, 2), link_error=log_error.s())
>>> add.subtask((2, 2), link_error=log_error.s())
Since exceptions can only be serialized when pickle is used
the error callbacks take the id of the parent task as argument instead:
.. code-block:: python
from __future__ import print_function
import os
from proj.celery import app
@app.task
def log_error(task_id):
result = app.AsyncResult(task_id)
result.get(propagate=False) # make sure result written.
with open(os.path.join('/var/errors', task_id), 'a') as fh:
print('--\n\n{0} {1} {2}'.format(
task_id, result.result, result.traceback), file=fh)
To make it even easier to link tasks together there is
a special signature called :class:`~celery.chain` that lets
you chain tasks together:
.. code-block:: python
>>> from celery import chain
>>> from proj.tasks import add, mul
# (4 + 4) * 8 * 10
>>> res = chain(add.s(4, 4), mul.s(8), mul.s(10))
proj.tasks.add(4, 4) | proj.tasks.mul(8) | proj.tasks.mul(10)
Calling the chain will call the tasks in the current process
and return the result of the last task in the chain::
>>> res = chain(add.s(4, 4), mul.s(8), mul.s(10))()
>>> res.get()
640
It also sets ``parent`` attributes so that you can
work your way up the chain to get intermediate results::
>>> res.parent.get()
64
>>> res.parent.parent.get()
8
>>> res.parent.parent
<AsyncResult: eeaad925-6778-4ad1-88c8-b2a63d017933>
Chains can also be made using the ``|`` (pipe) operator::
>>> (add.s(2, 2) | mul.s(8) | mul.s(10)).apply_async()
.. note::
It's not possible to synchronize on groups, so a group chained to another
signature is automatically upgraded to a chord:
.. code-block:: python
# will actually be a chord when finally evaluated
res = (group(add.s(i, i) for i in range(10)) | xsum.s()).delay()
Trails
~~~~~~
Tasks will keep track of what subtasks a task calls in the
result backend (unless disabled using :attr:`Task.trail <[email protected]>`)
and this can be accessed from the result instance::
>>> res.children
[<AsyncResult: 8c350acf-519d-4553-8a53-4ad3a5c5aeb4>]
>>> res.children[0].get()
64
The result instance also has a :meth:`[email protected]` method
that treats the result as a graph, enabling you to iterate over
the results::
>>> list(res.collect())
[(<AsyncResult: 7b720856-dc5f-4415-9134-5c89def5664e>, 4),
(<AsyncResult: 8c350acf-519d-4553-8a53-4ad3a5c5aeb4>, 64)]
By default :meth:`[email protected]` will raise an
:exc:`~@IncompleteStream` exception if the graph is not fully
formed (one of the tasks has not completed yet),
but you can get an intermediate representation of the graph
too::
>>> for result, value in res.collect(intermediate=True)):
....
Graphs
~~~~~~
In addition you can work with the result graph as a
:class:`~celery.datastructures.DependencyGraph`:
.. code-block:: python
>>> res = chain(add.s(4, 4), mul.s(8), mul.s(10))()
>>> res.parent.parent.graph
285fa253-fcf8-42ef-8b95-0078897e83e6(1)
463afec2-5ed4-4036-b22d-ba067ec64f52(0)
872c3995-6fa0-46ca-98c2-5a19155afcf0(2)
285fa253-fcf8-42ef-8b95-0078897e83e6(1)
463afec2-5ed4-4036-b22d-ba067ec64f52(0)
You can even convert these graphs to *dot* format::
>>> with open('graph.dot', 'w') as fh:
... res.parent.parent.graph.to_dot(fh)
and create images:
.. code-block:: bash
$ dot -Tpng graph.dot -o graph.png
.. image:: ../images/result_graph.png
.. _canvas-group:
Groups
------
.. versionadded:: 3.0
A group can be used to execute several tasks in parallel.
The :class:`~celery.group` function takes a list of signatures::
>>> from celery import group
>>> from proj.tasks import add
>>> group(add.s(2, 2), add.s(4, 4))
(proj.tasks.add(2, 2), proj.tasks.add(4, 4))
If you **call** the group, the tasks will be applied
one after one in the current process, and a :class:`~celery.result.GroupResult`
instance is returned which can be used to keep track of the results,
or tell how many tasks are ready and so on::
>>> g = group(add.s(2, 2), add.s(4, 4))
>>> res = g()
>>> res.get()
[4, 8]
Group also supports iterators::
>>> group(add.s(i, i) for i in xrange(100))()
A group is a signature object, so it can be used in combination
with other signatures.
Group Results
~~~~~~~~~~~~~
The group task returns a special result too,
this result works just like normal task results, except
that it works on the group as a whole::
>>> from celery import group
>>> from tasks import add
>>> job = group([
... add.s(2, 2),
... add.s(4, 4),
... add.s(8, 8),
... add.s(16, 16),
... add.s(32, 32),
... ])
>>> result = job.apply_async()
>>> result.ready() # have all subtasks completed?
True
>>> result.successful() # were all subtasks successful?
True
>>> result.get()
[4, 8, 16, 32, 64]
The :class:`~celery.result.GroupResult` takes a list of
:class:`~celery.result.AsyncResult` instances and operates on them as
if it was a single task.
It supports the following operations:
* :meth:`~celery.result.GroupResult.successful`
Return :const:`True` if all of the subtasks finished
successfully (e.g. did not raise an exception).
* :meth:`~celery.result.GroupResult.failed`
Return :const:`True` if any of the subtasks failed.
* :meth:`~celery.result.GroupResult.waiting`
Return :const:`True` if any of the subtasks
is not ready yet.
* :meth:`~celery.result.GroupResult.ready`
Return :const:`True` if all of the subtasks
are ready.
* :meth:`~celery.result.GroupResult.completed_count`
Return the number of completed subtasks.
* :meth:`~celery.result.GroupResult.revoke`
Revoke all of the subtasks.
* :meth:`~celery.result.GroupResult.join`
Gather the results for all of the subtasks
and return a list with them ordered by the order of which they
were called.
.. _canvas-chord:
Chords
------
.. versionadded:: 2.3
.. note::
Tasks used within a chord must *not* ignore their results. If the result
backend is disabled for *any* task (header or body) in your chord you
should read ":ref:`chord-important-notes`".
A chord is a task that only executes after all of the tasks in a group have
finished executing.
Let's calculate the sum of the expression
:math:`1 + 1 + 2 + 2 + 3 + 3 ... n + n` up to a hundred digits.
First you need two tasks, :func:`add` and :func:`tsum` (:func:`sum` is
already a standard function):
.. code-block:: python
@app.task
def add(x, y):
return x + y
@app.task
def tsum(numbers):
return sum(numbers)
Now you can use a chord to calculate each addition step in parallel, and then
get the sum of the resulting numbers::
>>> from celery import chord
>>> from tasks import add, tsum
>>> chord(add.s(i, i)
... for i in xrange(100))(tsum.s()).get()
9900
This is obviously a very contrived example, the overhead of messaging and
synchronization makes this a lot slower than its Python counterpart::
sum(i + i for i in xrange(100))
The synchronization step is costly, so you should avoid using chords as much
as possible. Still, the chord is a powerful primitive to have in your toolbox
as synchronization is a required step for many parallel algorithms.
Let's break the chord expression down:
.. code-block:: python
>>> callback = tsum.s()
>>> header = [add.s(i, i) for i in range(100)]
>>> result = chord(header)(callback)
>>> result.get()
9900
Remember, the callback can only be executed after all of the tasks in the
header have returned. Each step in the header is executed as a task, in
parallel, possibly on different nodes. The callback is then applied with
the return value of each task in the header. The task id returned by
:meth:`chord` is the id of the callback, so you can wait for it to complete
and get the final return value (but remember to :ref:`never have a task wait
for other tasks <task-synchronous-subtasks>`)
.. _chord-errors:
Error handling
~~~~~~~~~~~~~~
So what happens if one of the tasks raises an exception?
This was not documented for some time and before version 3.1
the exception value will be forwarded to the chord callback.
From 3.1 errors will propagate to the callback, so the callback will not be executed
instead the callback changes to failure state, and the error is set
to the :exc:`~@ChordError` exception:
.. code-block:: python
>>> c = chord([add.s(4, 4), raising_task.s(), add.s(8, 8)])
>>> result = c()
>>> result.get()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "*/celery/result.py", line 120, in get
interval=interval)
File "*/celery/backends/amqp.py", line 150, in wait_for
raise meta['result']
celery.exceptions.ChordError: Dependency 97de6f3f-ea67-4517-a21c-d867c61fcb47
raised ValueError('something something',)
If you're running 3.0.14 or later you can enable the new behavior via
the :setting:`CELERY_CHORD_PROPAGATES` setting::
CELERY_CHORD_PROPAGATES = True
While the traceback may be different depending on which result backend is
being used, you can see the error description includes the id of the task that failed
and a string representation of the original exception. You can also
find the original traceback in ``result.traceback``.
Note that the rest of the tasks will still execute, so the third task
(``add.s(8, 8)``) is still executed even though the middle task failed.
Also the :exc:`~@ChordError` only shows the task that failed
first (in time): it does not respect the ordering of the header group.
.. _chord-important-notes:
Important Notes
~~~~~~~~~~~~~~~
Tasks used within a chord must *not* ignore their results. In practice this
means that you must enable a :const:`CELERY_RESULT_BACKEND` in order to use
chords. Additionally, if :const:`CELERY_IGNORE_RESULT` is set to :const:`True`
in your configuration, be sure that the individual tasks to be used within
the chord are defined with :const:`ignore_result=False`. This applies to both
Task subclasses and decorated tasks.
Example Task subclass:
.. code-block:: python
class MyTask(Task):
abstract = True
ignore_result = False
Example decorated task:
.. code-block:: python
@app.task(ignore_result=False)
def another_task(project):
do_something()
By default the synchronization step is implemented by having a recurring task
poll the completion of the group every second, calling the signature when
ready.
Example implementation:
.. code-block:: python
from celery import maybe_signature
@app.task(bind=True)
def unlock_chord(self, group, callback, interval=1, max_retries=None):
if group.ready():
return maybe_signature(callback).delay(group.join())
raise self.retry(countdown=interval, max_retries=max_retries)
This is used by all result backends except Redis and Memcached, which
increment a counter after each task in the header, then applying the callback
when the counter exceeds the number of tasks in the set. *Note:* chords do not
properly work with Redis before version 2.2; you will need to upgrade to at
least 2.2 to use them.
The Redis and Memcached approach is a much better solution, but not easily
implemented in other backends (suggestions welcome!).
.. note::
If you are using chords with the Redis result backend and also overriding
the :meth:`Task.after_return` method, you need to make sure to call the
super method or else the chord callback will not be applied.
.. code-block:: python
def after_return(self, *args, **kwargs):
do_something()
super(MyTask, self).after_return(*args, **kwargs)
.. _canvas-map:
Map & Starmap
-------------
:class:`~celery.map` and :class:`~celery.starmap` are built-in tasks
that calls the task for every element in a sequence.
They differ from group in that
- only one task message is sent
- the operation is sequential.
For example using ``map``:
.. code-block:: python
>>> from proj.tasks import add
>>> ~xsum.map([range(10), range(100)])
[45, 4950]
is the same as having a task doing:
.. code-block:: python
@app.task
def temp():
return [xsum(range(10)), xsum(range(100))]
and using ``starmap``::
>>> ~add.starmap(zip(range(10), range(10)))
[0, 2, 4, 6, 8, 10, 12, 14, 16, 18]
is the same as having a task doing:
.. code-block:: python
@app.task
def temp():
return [add(i, i) for i in range(10)]
Both ``map`` and ``starmap`` are signature objects, so they can be used as
other signatures and combined in groups etc., for example
to call the starmap after 10 seconds::
>>> add.starmap(zip(range(10), range(10))).apply_async(countdown=10)
.. _canvas-chunks:
Chunks
------
Chunking lets you divide an iterable of work into pieces, so that if
you have one million objects, you can create 10 tasks with hundred
thousand objects each.
Some may worry that chunking your tasks results in a degradation
of parallelism, but this is rarely true for a busy cluster
and in practice since you are avoiding the overhead of messaging
it may considerably increase performance.
To create a chunks signature you can use :meth:`@Task.chunks`:
.. code-block:: python
>>> add.chunks(zip(range(100), range(100)), 10)
As with :class:`~celery.group` the act of sending the messages for
the chunks will happen in the current process when called:
.. code-block:: python
>>> from proj.tasks import add
>>> res = add.chunks(zip(range(100), range(100)), 10)()
>>> res.get()
[[0, 2, 4, 6, 8, 10, 12, 14, 16, 18],
[20, 22, 24, 26, 28, 30, 32, 34, 36, 38],
[40, 42, 44, 46, 48, 50, 52, 54, 56, 58],
[60, 62, 64, 66, 68, 70, 72, 74, 76, 78],
[80, 82, 84, 86, 88, 90, 92, 94, 96, 98],
[100, 102, 104, 106, 108, 110, 112, 114, 116, 118],
[120, 122, 124, 126, 128, 130, 132, 134, 136, 138],
[140, 142, 144, 146, 148, 150, 152, 154, 156, 158],
[160, 162, 164, 166, 168, 170, 172, 174, 176, 178],
[180, 182, 184, 186, 188, 190, 192, 194, 196, 198]]
while calling ``.apply_async`` will create a dedicated
task so that the individual tasks are applied in a worker
instead::
>>> add.chunks(zip(range(100), range(100)), 10).apply_async()
You can also convert chunks to a group::
>>> group = add.chunks(zip(range(100), range(100)), 10).group()
and with the group skew the countdown of each task by increments
of one::
>>> group.skew(start=1, stop=10)()
which means that the first task will have a countdown of 1, the second
a countdown of 2 and so on.
| PypiClean |
/cgse-2023.35.0.tar.gz/cgse-2023.35.0/src/egse/coordinates/pyplot.py | import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # needed for 3d projection
import numpy as np
import egse
from egse.coordinates.referenceFrame import ReferenceFrame
from egse.coordinates.point import Point, Points
def plot_reference_frame(frame, master=None, figname=None, **kwargs):
"""Plot a Reference Frame.
Args:
frame : egse.coordinates.referenceFrame.ReferenceFrame
master : master ReferenceFrame (optional)
figname: string. Name of matplotlib figure (can be pre-existing)
kwargs : passed to matplotlib.axes._subplots.Axes3DSubplot.quiver
Returns:
matplotlib.axes._subplots.Axes3DSubplot displaying the reference frame.
The three unit vectors are shown with the following colors ('RGB'):
x: Red
y: Green
z: Blue
.. note::
Use ax.set_xlim3d(min,max) to properly set the ranges of the display
"""
if master is None:
tmpmaster = ReferenceFrame.createMaster()
else:
tmpmaster = master.__copy__()
f0 = frame.getOrigin()
fx = frame.getAxis('x', name='fx')
fy = frame.getAxis('y', name='fy')
fz = frame.getAxis('z', name='fz')
f0m = f0.expressIn(tmpmaster)[:3]
fxm = fx.expressIn(tmpmaster)[:3]
fym = fy.expressIn(tmpmaster)[:3]
fzm = fz.expressIn(tmpmaster)[:3]
del tmpmaster
# Origin of the X,Y and Z vectors (x = the 'x' coordinates of the origin of all 3 vectors)
# Every vector independently (--> plot in diff. colors)
x, y, z = np.array([f0m[0]]), np.array([f0m[1]]), np.array([f0m[2]])
# Orientation of the X,Y and Z vectors
vecxx, vecyx, veczx = np.array([fxm[0] - f0m[0]]), np.array([fym[0] - f0m[0]]), np.array([fzm[0] - f0m[0]])
vecxy, vecyy, veczy = np.array([fxm[1] - f0m[1]]), np.array([fym[1] - f0m[1]]), np.array([fzm[1] - f0m[1]])
vecxz, vecyz, veczz = np.array([fxm[2] - f0m[2]]), np.array([fym[2] - f0m[2]]), np.array([fzm[2] - f0m[2]])
kwargs.setdefault('length', 1)
kwargs.setdefault('normalize', True)
#kwargs.setdefault('figsize', (10,10))
fig = plt.figure(figname, figsize=plt.figaspect(1.0))
ax = fig.gca(projection='3d')
ax.quiver(x, y, z, vecxx, vecxy, vecxz, color='r', **kwargs)
ax.quiver(x, y, z, vecyx, vecyy, vecyz, color='g', **kwargs)
ax.quiver(x, y, z, veczx, veczy, veczz, color='b', **kwargs)
#ax.axis('equal')
return ax
def plot_points(points, master=None, figname=None, **kwargs):
"""Plot the Points object.
Args:
points : either a (egse.coordinate.point.)Points object or a list of Point objects
master : master ReferenceFrame (optional)
figname: string. Name of matplotlib figure (can be pre-existing)
kwargs : passed to matplotlib.axes._subplots.Axes3DSubplot.scatter
Returns:
matplotlib.axes._subplots.Axes3DSubplot displaying the reference frame.
.. note::
Use ax.set_xlim3d(min,max) to properly set the ranges of the display.
"""
if master is None:
tmpmaster = ReferenceFrame.createMaster()
else:
tmpmaster = master.__copy__()
#
if isinstance(points, list):
allpoints = Points(points, ref=tmpmaster)
elif isinstance(points, Points) or isinstance(points, egse.coordinates.point.Points):
allpoints = points
else:
raise ValueError("If the input is a list, all items in it must be Point objects")
#
del tmpmaster
#
coordinates = allpoints.coordinates
xs = coordinates[0, :]
ys = coordinates[1, :]
zs = coordinates[2, :]
#
kwargs.setdefault('s', 50)
kwargs.setdefault('marker', 'o')
kwargs.setdefault('color', 'k')
#
fig = plt.figure(figname)
ax = fig.gca(projection='3d')
ax.scatter(xs, ys, zs, **kwargs)
return ax
def plot_vectors(points, master=None, figname=None, fromorigin=True, **kwargs):
"""Plot the Points object.
Args:
points : either a (egse.coordinate.point.)Points object or a list of Point objects
master : master ReferenceFrame (optional)
figname: string. Name of matplotlib figure (can be pre-existing)
fromorigin: bool
if True, all vectors are displayed starting from the origin
if False, all vectors go towards the origin
kwargs : passed to matplotlib.axes._subplots.Axes3DSubplot.scatter
Returns:
matplotlib.axes._subplots.Axes3DSubplot displaying the reference frame.
.. note::
Use ax.set_xlim3d(min,max) to properly set the ranges of the display.
"""
#from egse.coordinates.point import Points
if master is None:
tmpmaster = ReferenceFrame.createMaster()
else:
tmpmaster = master.__copy__()
#
if isinstance(points, list):
allpoints = Points(points, ref=tmpmaster)
elif isinstance(points, Points) or isinstance(points, egse.coordinates.point.Points):
allpoints = points
else:
raise ValueError("If the input is a list, all items in it must be Point objects")
#
del tmpmaster
#
# SET DEFAULTS
kwargs.setdefault('color', 'k')
#
# PREPARE VECTOR COORDINATES
coordinates = allpoints.coordinates
xs = coordinates[0, :]
ys = coordinates[1, :]
zs = coordinates[2, :]
# Origin of the X,Y and Z vectors
# ==> x = the 'x' coordinates of the origin of all vectors)
# ==> [x,y,z] = the origin of points.ref
x, y, z = points.ref.getOrigin().coordinates[:3]
x = np.ones_like(xs) * x
y = np.ones_like(xs) * y
z = np.ones_like(xs) * z
### PLOT
fig = plt.figure(figname)
ax = fig.gca(projection='3d')
if fromorigin == True:
ax.quiver(x, y, z, xs-x, ys-y, zs-z, **kwargs)
elif fromorigin == False:
ax.quiver(xs, ys, zs, x-xs, y-ys, z-zs, **kwargs)
else:
print("Parameter 'fromorigin' must be True or False")
print("Setting it to True by default")
ax.quiver(x, y, z, xs-x, ys-y, zs-z, **kwargs)
return ax | PypiClean |
/gpipe-0.0.13.tar.gz/gpipe-0.0.13/examples/resequencing/workflow/00-reference.workflow.py |
import os
import sys
from gpipe.dsl import *
# ================================================================================
#
# ================================================================================
reference = import_relative_file('./libs/reference.py')
common = import_relative_file('./common.py')
# ================================================================================
#
# ================================================================================
options.validate({'reference': common.REFERENCE_SCHEMA})
reference.update_reference_options(options.reference)
# ================================================================================
#
# ================================================================================
def verify_par_masked(fasta, pars, non_pars):
cpus(1)
memory('4GB')
parameter('python', sys.executable)
parameter('pars', pars)
parameter('non_pars', non_pars)
input('fasta', fasta)
output('fasta_par_mask_verified', f'{fasta}.par_mask_verified')
script(r"""
{{ python }} {{ workflow_directory }}/scripts/x-verify_par_masked.py\{% for p in pars %}
--par {{ p.chrY }}\{% endfor %}{% for np in non_pars %}
--non-par {{ np.chrY }}\{% endfor %}
{{ fasta }}
touch {{ fasta_par_mask_verified }}
""")
def mask_pars(fasta_par2, fasta_par3, pars):
cpus(1)
memory('4GB')
parameter('python', sys.executable)
parameter('pars', pars)
input('fasta_par2', fasta_par2)
output('fasta_par3', fasta_par3)
script(r"""
{{ python }} {{ workflow_directory }}/scripts/x-mask_fasta.py\{% for p in pars %}
--mask {{ p.chrY }}\{% endfor %}
{{ fasta_par2 }}\
> {{ fasta_par3 }}
""")
def shift_chrMT(fasta_original, fasta_mt_shifted, shift_size):
cpus(1)
memory('4GB')
parameter('python', sys.executable)
parameter('shift_size', shift_size)
input('fasta_original', fasta_original)
input('fasta_original_mt_interval_ist', f'{fasta_original}.chrMT.interval_list')
output('fasta_mt_shifted', fasta_mt_shifted)
script(r"""
# options.reference.contigs.chrMT might be null, thus, we have to manually resolve contig name
chrMT=$(cat {{ fasta_original_mt_interval_ist }} | awk 'END { print $1 }')
#
{{ python }} {{ workflow_directory }}/scripts/x-shift_fasta.py\
--shift $chrMT:{{ shift_size }}\
{{ fasta_original }}\
> {{ fasta_mt_shifted }}
""")
def bwa_index(fasta):
module(common.BWA_MODULE)
cpus(1)
memory('32GB')
input('fasta', fasta)
output('fasta_amb', f'{fasta}.amb')
output('fasta_ann', f'{fasta}.ann')
output('fasta_bwt', f'{fasta}.bwt')
output('fasta_pac', f'{fasta}.pac')
output('fasta_sa', f'{fasta}.sa')
script(r"""
bwa index {{ fasta }}
""")
def samtools_faidx(fasta):
module(common.SAMTOOLS_MODULE)
cpus(1)
memory('4GB')
input('fasta', fasta)
output('fasta_fai', f'{fasta}.fai')
script(r"""
samtools faidx {{ fasta }}
""")
def samtools_dict(fasta):
module(common.SAMTOOLS_MODULE)
cpus(1)
memory('4GB')
input('fasta', fasta)
output('dict', os.path.splitext(fasta)[0] + '.dict')
script(r"""
samtools dict {{ fasta }} -o {{ dict }}
""")
def generate_interval_lists(fasta):
cpus(1)
memory('4GB')
parameter('python', sys.executable)
input('dict', os.path.splitext(fasta)[0] + '.dict')
output('autosome', f'{fasta}.autosome.interval_list')
output('chrX', f'{fasta}.chrX.interval_list')
output('chrY', f'{fasta}.chrY.interval_list')
output('chrMT', f'{fasta}.chrMT.interval_list')
script(r"""
python={{ python }}
script={{ workflow_directory }}/scripts/x-generate_interval_list_from_dict.py
dict={{ dict }}
$python $script --autosome $dict > {{ autosome }}
$python $script --chrX $dict > {{ chrX }}
$python $script --chrY $dict > {{ chrY }}
$python $script --chrMT $dict > {{ chrMT }}
""")
# ================================================================================
# Original
# ================================================================================
with task('s00-ORIG-verify_PARs_masked'):
verify_par_masked(
options.reference.fasta,
[p for p in options.reference.chrXY.PARs if p.id != 'XTR'],
[p for p in options.reference.chrXY.PARs if p.id == 'XTR'])
with task('s01-ORIG-bwa_index'):
bwa_index(options.reference.fasta)
with task('s02-ORIG-samtools_faidx'):
samtools_faidx(options.reference.fasta)
with task('s03-ORIG-samtools_dict'):
samtools_dict(options.reference.fasta)
with task('s04-ORIG-interval_list'):
generate_interval_lists(options.reference.fasta)
# ================================================================================
# PAR3
# ================================================================================
with task('s10-PAR3-mask_PARs'):
mask_pars(
options.reference.fasta_PAR2,
options.reference.fasta_PAR3,
options.reference.chrXY.PARs3)
with task('s11-PAR3-bwa_index'):
bwa_index(options.reference.fasta_PAR3)
with task('s12-PAR3-samtools_faidx'):
samtools_faidx(options.reference.fasta_PAR3)
with task('s13-PAR3-samtools_dict'):
samtools_dict(options.reference.fasta_PAR3)
with task('s14-PAR3-interval_list'):
generate_interval_lists(options.reference.fasta_PAR3)
# ================================================================================
# MT
# ================================================================================
with task('s20-MT-shift_chrMT'):
shift_chrMT(
options.reference.fasta,
options.reference.fasta_mt_shifted,
options.reference.chrMT.shift_size)
with task('s21-MT-bwa_index'):
bwa_index(options.reference.fasta_mt_shifted)
with task('s22-MT-samtools_faidx'):
samtools_faidx(options.reference.fasta_mt_shifted)
with task('s23-MT-samtools_dict'):
samtools_dict(options.reference.fasta_mt_shifted)
with task('s24-MT-interval_list'):
generate_interval_lists(options.reference.fasta_mt_shifted) | PypiClean |
/simiki-1.6.2.3.tar.gz/simiki-1.6.2.3/README.md | # Simiki #
[![Latest Version](http://img.shields.io/pypi/v/simiki.svg)](https://pypi.python.org/pypi/simiki)
[![The MIT License](http://img.shields.io/badge/license-MIT-yellow.svg)](https://github.com/tankywoo/simiki/blob/master/LICENSE)
[![Build Status](https://travis-ci.org/tankywoo/simiki.svg)](https://travis-ci.org/tankywoo/simiki)
[![Coverage Status](https://img.shields.io/coveralls/tankywoo/simiki.svg)](https://coveralls.io/r/tankywoo/simiki)
Simiki is a simple wiki framework, written in [Python](https://www.python.org/).
* Easy to use. Creating a wiki only needs a few steps
* Use [Markdown](http://daringfireball.net/projects/markdown/). Just open your editor and write
* Store source files by category
* Static HTML output
* A CLI tool to manage the wiki
Simiki is short for `Simple Wiki` :)
> New in version 1.6.2.3 (2019-05-11)
>
> - Fix Issue #124
>
>
> New in version 1.6.2.2 (2019-04-21)
>
> - Fix PyYAML CVE-2017-18342
> - Fix Jinja2 CVE-2019-10906
>
>
> New in version 1.6.2.1 (2017-06-04)
>
> - Fix preview not work in py3
## Installation ##
It is available for **Python 2.7, 3.3, 3.4, 3.5, 3.6**, with Linux, Mac OS X and Windows.
Install from [PyPI](https://pypi.python.org/pypi/simiki):
pip install simiki
Update:
pip install -U simiki
## Quick Start ##
### Init Site ###
mkdir mywiki && cd mywiki
simiki init
### Generate ###
simiki g
### Preview ###
simiki p
For more information, `simiki -h` or have a look at [Simiki.org](http://simiki.org)
## Others ##
* [simiki.org](http://simiki.org)
* <https://github.com/tankywoo/simiki>
* Email: <[email protected]>
* [Simiki Users](https://github.com/tankywoo/simiki/wiki/Simiki-Users)
## Contribution ##
Your contributions are always welcome!
Sending pull requests on [Pull Requests Page](https://github.com/tankywoo/simiki/pulls) is the preferred method for receiving contributions.
* Bug fixes can be based on **`master`** branch and I will also merge into `dev` branch.
* Feature can be based on **`dev`** branch.
Following links are the contribution guidelines you may need:
* [Fork A Repo](https://help.github.com/articles/fork-a-repo/)
* [Contributing to Processing with Pull Requests](https://github.com/processing/processing/wiki/Contributing-to-Processing-with-Pull-Requests)
Thanks to every [contributor](https://github.com/tankywoo/simiki/graphs/contributors).
## License ##
The MIT License (MIT)
Copyright (c) 2013 Tanky Woo
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
| PypiClean |
/lavue-2.83.4.tar.gz/lavue-2.83.4/lavuelib/takeMotorsDialog.py | from .qtuic import uic
from pyqtgraph import QtCore
import os
try:
from pyqtgraph import QtWidgets
except Exception:
from pyqtgraph import QtGui as QtWidgets
try:
try:
import tango
except ImportError:
import PyTango as tango
#: (:obj:`bool`) tango imported
TANGO = True
except ImportError:
#: (:obj:`bool`) tango imported
TANGO = False
_formclass, _baseclass = uic.loadUiType(
os.path.join(os.path.dirname(os.path.abspath(__file__)),
"ui", "TakeMotorsDialog.ui"))
class TakeMotorsDialog(QtWidgets.QDialog):
""" detector geometry widget class"""
def __init__(self, parent=None):
""" constructor
:param parent: parent object
:type parent: :class:`pyqtgraph.QtCore.QObject`
"""
QtWidgets.QDialog.__init__(self, parent)
#: (:class:`Ui_Dialog') ui_dialog object from qtdesigner
self.__ui = _formclass()
self.__ui.setupUi(self)
#: (:obj:`str`) x motor name
self.xmotorname = ""
#: (:obj:`str`) y motor name
self.ymotorname = ""
#: (:obj:`list`<:obj:`str`>) motortips list
self.motortips = []
#: (:obj:`str`) group title
self.title = None
#: (:class:`tango.DeviceProxy`) x motor device
self.xmotordevice = None
#: (:class:`tango.DeviceProxy`) y motor device
self.ymotordevice = None
def createGUI(self):
""" create GUI
"""
if self.title is not None:
self.__ui.groupBox.setTitle(str(self.title))
self.__updateComboBox(self.__ui.xComboBox, str(self.xmotorname))
self.__updateComboBox(self.__ui.yComboBox, str(self.ymotorname))
@QtCore.pyqtSlot()
def accept(self):
""" updates class variables with the form content
"""
try:
self.xmotorname = str(self.__ui.xComboBox.currentText())
self.xmotordevice = tango.DeviceProxy(self.xmotorname)
for attr in ["state", "position"]:
if not hasattr(self.xmotordevice, attr):
raise Exception("Missing %s" % attr)
except Exception:
self.__ui.xComboBox.setFocus()
return
try:
self.ymotorname = str(self.__ui.yComboBox.currentText())
self.ymotordevice = tango.DeviceProxy(self.ymotorname)
for attr in ["state", "position"]:
if not hasattr(self.ymotordevice, attr):
raise Exception("Missing %s" % attr)
except Exception:
self.__ui.yComboBox.setFocus()
return
QtWidgets.QDialog.accept(self)
def __updateComboBox(self, combobox, motorname):
""" updates a value of motor combo box
"""
combobox.clear()
for mt in sorted(self.motortips):
combobox.addItem(mt)
if motorname not in self.motortips:
combobox.addItem(motorname)
ind = combobox.findText(motorname)
combobox.setCurrentIndex(ind) | PypiClean |
/NJS-0.0.5.2-py3-none-any.whl/njsCore/Color/ColorUtility.py | import math
def lab2rgb(L, A, B):
y = (L + 16) / 116
x = A / 500 + y
z = y - B / 200
r = 0; g = 0; b = 0
if x * x * x > 0.008856:
x = (x * x * x ) * 0.95047
else:
x = ((x - 16 / 116) / 7.787) * 0.95047
if y * y * y > 0.008856:
y = (y * y * y ) * 1.00000
else:
y = ((y - 16 / 116) / 7.787) * 1.00000
if z * z * z > 0.008856:
z = (z * z * z ) * 1.08883
else:
z = ((z - 16 / 116) / 7.787) * 1.08883
r = x * 3.2406 + y * -1.5372 + z * -0.4986
g = x * -0.9689 + y * 1.8758 + z * 0.0415
b = x * 0.0557 + y * -0.2040 + z * 1.0570
if r > 0.0031308:
r = (1.055 * math.pow(r, 1 / 2.4) - 0.055)
else:
r = 12.92 * r
if g > 0.0031308:
g = (1.055 * math.pow(g, 1 / 2.4) - 0.055)
else:
g = 12.92 * g
if b > 0.0031308:
b = (1.055 * math.pow(g, 1 / 2.4) - 0.055)
else:
b = 12.92 * b
return [max(0, min(1, r)) * 255,
max(0, min(1, g)) * 255,
max(0, min(1, b)) * 255]
def rgb2lab(R: float, G: float, B: float):
r = R / 255
g = G / 255
b = B / 255
x = 0; y = 0; z = 0
if r > 0.04045:
r = math.pow((r + 0.055) / 1.055, 2.4)
else:
r = r / 12.92
if g > 0.04045:
g = math.pow((g + 0.055) / 1.055, 2.4)
else:
g = g / 12.92
if b > 0.04045:
b = math.pow((b + 0.055) / 1.055, 2.4)
else:
b = b / 12.92
# r = (r > 0.04045) ? Math.pow((r + 0.055) / 1.055, 2.4) : r / 12.92;
# g = (g > 0.04045) ? Math.pow((g + 0.055) / 1.055, 2.4) : g / 12.92;
# b = (b > 0.04045) ? Math.pow((b + 0.055) / 1.055, 2.4) : b / 12.92;
x = (r * 0.4124 + g * 0.3576 + b * 0.1805) / 0.95047
y = (r * 0.2126 + g * 0.7152 + b * 0.0722) / 1.00000
z = (r * 0.0193 + g * 0.1192 + b * 0.9505) / 1.08883
if x > 0.008856:
x = math.pow(x, 1 / 3)
else:
x = (7.787 * x) + 16 / 116
if y > 0.008856:
y = math.pow(y, 1 / 3)
else:
y = (7.787 * y) + 16 / 116
if z > 0.008856:
z = math.pow(z, 1 / 3)
else:
z = (7.787 * z) + 16 / 116
# x = (x > 0.008856) ? Math.pow(x, 1 / 3) : (7.787 * x) + 16 / 116;
# y = (y > 0.008856) ? Math.pow(y, 1 / 3) : (7.787 * y) + 16 / 116;
# z = (z > 0.008856) ? Math.pow(z, 1 / 3) : (7.787 * z) + 16 / 116;
return [(116 * y) - 16, 500 * (x - y), 200 * (y - z)]
def distance2(x0, y0, z0, x1, y1, z1):
return ((x1 - x0) * (x1 - x0)) + ((y1 - y0) * (y1 - y0)) + ((z1 - z0) * (z1 - z0))
def distance(x0, y0, z0, x1, y1, z1):
return math.sqrt(distance2(x0, y0, z0, x1, y1, z1)) | PypiClean |
/cioat-0.0.1.tar.gz/cioat-0.0.1/README.md | # cioat
The goal of this project is to visualize COVID-19's impact on airport traffic during Australia', 'Chile', 'Canada', 'United States of America (the).
Using a Geotab dataset, this dataset shows traffic to and from the Airport as a Percentage of the Traffic volume during the baseline period.
The dataset is downloadable from:
https://github.com/jiaoyang-x/covidT/blob/main/covid_impact_on_airport_traffic.csv
# How to install cioat
You may need matplotlib library.
$ pip install matplotlib
$ pip install cioat
# How to run cioat
cioat program allows user to specify up to visualize COVID-19's impact on airport traffic.
$ python cioat.py
<img src='https://github.com/jiaoyang-x/covidT/blob/main/result.png' height=480 width=640>
| PypiClean |
/tensorflow_datasets-4.9.2-py3-none-any.whl/tensorflow_datasets/core/split_builder.py | import collections.abc
import contextlib
import dataclasses
import functools
import itertools
import sys
import typing
from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Tuple, Union
from absl import logging
import click
import psutil
from tensorflow_datasets.core import example_serializer
from tensorflow_datasets.core import features as features_lib
from tensorflow_datasets.core import file_adapters
from tensorflow_datasets.core import lazy_imports_lib
from tensorflow_datasets.core import naming
from tensorflow_datasets.core import splits as splits_lib
from tensorflow_datasets.core import utils
from tensorflow_datasets.core import writer as writer_lib
from tensorflow_datasets.core.utils import shard_utils
if typing.TYPE_CHECKING:
import apache_beam as beam # pytype: disable=import-error
# Example key used for shuffling
Key = Union[str, int]
# The nested example dict passed to `features.encode_example`
Example = Dict[str, Any]
KeyExample = Tuple[Key, Example]
# Possible values returned by `GeneratorBasedBuilder._split_generators`
SplitGenerator = Union[
Iterable[KeyExample],
# Ideally we should add input/output type annotations
# `beam.PTransform[[], KeyExample]`, similar to `Callable[[], KeyExample]`
'beam.PTransform',
'beam.PCollection[KeyExample]',
]
@utils.docs.deprecated
@dataclasses.dataclass
class SplitGeneratorLegacy:
"""Defines the split information for the generator.
DEPRECATED: `_split_generators` should return `dict<split_name, generators>`
instead. See the
[documentation](https://www.tensorflow.org/datasets/api_docs/python/tfds/core/GeneratorBasedBuilder).
Attributes:
name: `str`, name of the Split for which the generator will create the
examples.
gen_kwargs: `dict`, kwargs to forward to the _generate_examples() method of
the builder.
"""
name: str
gen_kwargs: Optional[Dict[str, Any]] = dataclasses.field(default_factory=dict)
class _SplitInfoFuture:
"""Future containing the `tfds.core.SplitInfo` result."""
def __init__(self, callback: Callable[[], splits_lib.SplitInfo]):
self._callback = callback
def result(self) -> splits_lib.SplitInfo:
return self._callback()
@dataclasses.dataclass
class PipelineProxy:
"""Proxy which allows access to beam.Pipeline result after completion.
This is yielded by the maybe_beam_pipeline() context and can only be used if
beam is used to generate the dataset.
"""
_beam_pipeline: Optional['beam.Pipeline']
@property
def result(self):
return self._beam_pipeline.result
class SplitBuilder:
"""Util class to build splits.
Usage is as follow:
```py
split_builder = SplitBuilder(...)
with split_builder.maybe_beam_pipeline():
split_info_future = split_builder.submit_split_generation(...)
split_info = split_info_future.result()
```
* submit_split_generation:
* For generator based split: Generate the split
* For Apache Beam based split: Create the `beam.PCollection` and returns
a future.
* `split_info_future.result()`: Called after all `beam.PCollection`s have
finished. Finalize the `split_info` by collecting all pipeline results.
`submit_split_generation` / `.result` should be called once per
split.
"""
def __init__(
self,
*,
split_dict: splits_lib.SplitDict, # Used for precomputed nb of examples
features: features_lib.FeatureConnector,
dataset_size: utils.Size,
beam_options: Optional['beam.options.pipeline_options.PipelineOptions'],
beam_runner: Optional['beam.runners.PipelineRunner'],
max_examples_per_split: Optional[int],
file_format: file_adapters.FileFormat = file_adapters.DEFAULT_FILE_FORMAT,
shard_config: Optional[shard_utils.ShardConfig] = None,
):
self._split_dict = split_dict
self._features = features
self._dataset_size = dataset_size
self._max_examples_per_split = max_examples_per_split
self._in_contextmanager: bool = False
self._beam_options = beam_options
self._beam_runner = beam_runner
self._beam_pipeline: Optional['beam.Pipeline'] = None
self._file_format = file_format
self._shard_config = shard_config
@contextlib.contextmanager
def maybe_beam_pipeline(self) -> Iterator[PipelineProxy]:
"""Context manager wrapping the beam pipeline.
If Apache Beam is used, then the pipeline created withing the contextmanager
will be launched when exiting the context manager:
```py
with split_builder.maybe_beam_pipeline():
pcollection = (
split_builder.beam_pipeline
| beam.Create()
| beam.Map()
)
```
Is equivalent to:
```py
with beam.Pipeline() as beam_pipeline:
pcollection = (
beam_pipeline
| beam.Create()
| beam.Map()
)
```
If `split_builder.beam_pipeline` is never called, then `beam.Pipeline` is
never created and this function is a no-op.
Yields:
PipelineProxy containing a reference to the beam pipeline, allowing access
to the pipeline result for (e.g) logging metrics to file.
"""
self._in_contextmanager = True
try:
# Entering the contextmanager is a no-op. Only if Apache Beam is used
# is the `beam.Pipeline` contextmanager activated.
# Construct pipeline proxy with a placeholder beam pipeline.
pipeline_proxy = PipelineProxy(_beam_pipeline=None)
yield pipeline_proxy
except Exception: # pylint: disable=broad-except
# Close and forward the exception
if not self._beam_pipeline or not self._beam_pipeline.__exit__(
*sys.exc_info()
):
raise # Forward the exception
else:
# If the Beam pipeline was used, then exit it.
if self._beam_pipeline is not None:
self._beam_pipeline.__exit__(None, None, None)
# Fill in the beam pipeline in the proxy.
pipeline_proxy._beam_pipeline = self._beam_pipeline # pylint:disable=protected-access
self._in_contextmanager = False
@functools.cached_property
def beam_pipeline(self) -> 'beam.Pipeline':
"""Instanciates and returns Apache Beam pipeline.
Calling this function starts the Apache Beam mode.
Returns:
pipeline: The beam pipeline
"""
if not self._in_contextmanager:
raise AssertionError(
'beam_pipeline has to be created from within `SplitBuilder` '
'contextmanager.'
)
beam = lazy_imports_lib.lazy_imports.apache_beam
# On Colab, stderr isn't displayed by default, so using `print`.
print_fn = print if utils.is_notebook() else logging.warning
if not self._beam_runner and not self._beam_options:
msg = utils.dedent(
"""
**************************** WARNING *********************************
Warning: The dataset you're trying to generate is using Apache Beam,
yet no `beam_runner` nor `beam_options` was explicitly provided.
Some Beam datasets take weeks to generate, so are usually not suited
for single machine generation. Please have a look at the instructions
to setup distributed generation:
https://www.tensorflow.org/datasets/beam_datasets#generating_a_beam_dataset
**********************************************************************
"""
)
print_fn(msg)
total_memory = psutil.virtual_memory().total
if self._dataset_size >= total_memory:
if not click.confirm(
(
f'The dataset is {self._dataset_size} in size, but your machine'
f' has only {utils.Size(total_memory)} of memory. Continue?'
),
default=True,
):
sys.exit(1)
beam_options = (
self._beam_options or beam.options.pipeline_options.PipelineOptions()
)
# Beam type checking assumes transforms multiple outputs are of same type,
# which is not our case. Plus it doesn't handle correctly all types, so we
# are better without it.
beam_options.view_as(
beam.options.pipeline_options.TypeOptions
).pipeline_type_check = False
# Create the global pipeline object common for all splits
pipeline = beam.Pipeline(runner=self._beam_runner, options=beam_options)
self._beam_pipeline = pipeline.__enter__()
return self._beam_pipeline
def normalize_legacy_split_generators(
self,
split_generators: Union[
Dict[str, SplitGenerator], List[SplitGeneratorLegacy]
],
generator_fn: Callable[..., Any],
is_beam: bool,
) -> Dict[str, SplitGenerator]:
"""Normalize legacy split API into new dict[split_name, generator].
This function convert the legacy `List[tfds.core.SplitGenerator]` into
the new `{'split_name': generator}` structure.
Could be removed if all datasets were updated.
Args:
split_generators: Either legacy or new split_generators
generator_fn: The `GeneratorBasedBuilder._generate_examples` function.
is_beam: `True` if using legacy `tfds.core.BeamBasedBuilder`
Returns:
split_generators: New split generator structure.
"""
if isinstance(split_generators, dict): # New structure
return split_generators
if isinstance(split_generators, list): # Legacy structure
if is_beam: # Legacy `tfds.core.BeamBasedBuilder`
beam = lazy_imports_lib.lazy_imports.apache_beam
generator_fn = beam.ptransform_fn(generator_fn)
return {
s.name: generator_fn(**s.gen_kwargs) # Create the `beam.PTransform`
for s in split_generators
}
else:
return {
split_generator.name: generator_fn(**split_generator.gen_kwargs)
for split_generator in split_generators
}
else:
raise TypeError(
f'Invalid `_split_generators` returned value: {split_generators}'
)
def submit_split_generation(
self,
split_name: str,
generator: SplitGenerator,
filename_template: naming.ShardedFileTemplate,
disable_shuffling: bool,
) -> _SplitInfoFuture:
"""Start the split generation.
Args:
split_name: Name of the split to generate
generator: Generator, beam.PTransform,... yielding the examples
filename_template: Template to format the filename for a shard.
disable_shuffling: Specifies whether to shuffle the examples
Returns:
split_info_future: Future containing the `split_info`, once generation
is complete. The `tfds.core.SplitInfo` can be accessed through
`split_info_future.result()`
"""
build_kwargs = dict(
split_name=split_name,
generator=generator,
filename_template=filename_template,
disable_shuffling=disable_shuffling,
)
# Depending on the type of generator, we use the corresponding
# `_build_from_xyz` method.
if isinstance(generator, collections.abc.Iterable):
return self._build_from_generator(**build_kwargs)
else: # Otherwise, beam required
unknown_generator_type = TypeError(
f'Invalid split generator value for split `{split_name}`. '
'Expected generator or apache_beam object. Got: '
f'{type(generator)}'
)
try:
import apache_beam as beam # pylint: disable=g-import-not-at-top
except ImportError:
# Beam can't be imported, what was the object returned by the user ?
raise unknown_generator_type # pylint: disable=raise-missing-from
if isinstance(generator, beam.PTransform):
# Generate the beam.PCollection
pcollection = self.beam_pipeline | split_name >> generator
build_kwargs['generator'] = pcollection
return self._build_from_pcollection(**build_kwargs)
elif isinstance(generator, beam.PCollection):
return self._build_from_pcollection(**build_kwargs)
else:
raise unknown_generator_type
def _build_from_generator(
self,
split_name: str,
generator: Iterable[KeyExample],
filename_template: naming.ShardedFileTemplate,
disable_shuffling: bool,
) -> _SplitInfoFuture:
"""Split generator for example generators.
Args:
split_name: str,
generator: Iterable[KeyExample],
filename_template: Template to format the filename for a shard.
disable_shuffling: Specifies whether to shuffle the examples,
Returns:
future: The future containing the `tfds.core.SplitInfo`.
"""
if self._max_examples_per_split is not None:
logging.warning(
'Splits capped at %s examples max.', self._max_examples_per_split
)
generator = itertools.islice(generator, self._max_examples_per_split)
total_num_examples = self._max_examples_per_split
else:
# If dataset info has been pre-downloaded from the internet,
# we can use the pre-computed number of example for the progression bar.
split_info = self._split_dict.get(split_name)
if split_info and split_info.num_examples:
total_num_examples = split_info.num_examples
else:
total_num_examples = None
serialized_info = self._features.get_serialized_info()
writer = writer_lib.Writer(
serializer=example_serializer.ExampleSerializer(serialized_info),
filename_template=filename_template,
hash_salt=split_name,
disable_shuffling=disable_shuffling,
# TODO(weide) remove this because it's already in filename_template?
file_format=self._file_format,
shard_config=self._shard_config,
)
for key, example in utils.tqdm(
generator,
desc=f'Generating {split_name} examples...',
unit=' examples',
total=total_num_examples,
leave=False,
):
try:
example = self._features.encode_example(example)
except Exception as e: # pylint: disable=broad-except
utils.reraise(e, prefix=f'Failed to encode example:\n{example}\n')
writer.write(key, example)
shard_lengths, total_size = writer.finalize()
split_info = splits_lib.SplitInfo(
name=split_name,
shard_lengths=shard_lengths,
num_bytes=total_size,
filename_template=filename_template,
)
return _SplitInfoFuture(lambda: split_info)
def _build_from_pcollection(
self,
split_name: str,
generator: 'beam.PCollection[KeyExample]',
filename_template: naming.ShardedFileTemplate,
disable_shuffling: bool,
) -> _SplitInfoFuture:
"""Split generator for `beam.PCollection`."""
# TODO(tfds): Should try to add support to `max_examples_per_split`
beam = lazy_imports_lib.lazy_imports.apache_beam
beam_writer = writer_lib.BeamWriter(
serializer=example_serializer.ExampleSerializer(
self._features.get_serialized_info()
),
filename_template=filename_template,
hash_salt=split_name,
disable_shuffling=disable_shuffling,
file_format=self._file_format,
shard_config=self._shard_config,
)
def _encode_example(key_ex, encode_fn=self._features.encode_example):
# We do not access self._features in this function to avoid pickling the
# entire class.
return key_ex[0], encode_fn(key_ex[1])
# Note: We need to wrap the pipeline in a PTransform to avoid
# errors due to duplicated ``>> beam_labels`
@beam.ptransform_fn
def _encode_pcollection(pipeline):
"""PTransformation which build a single split."""
pcoll_examples = pipeline | 'Encode' >> beam.Map(_encode_example)
return beam_writer.write_from_pcollection(pcoll_examples)
# Add the PCollection to the pipeline
_ = generator | f'{split_name}_write' >> _encode_pcollection() # pylint: disable=no-value-for-parameter
def _resolve_future():
if self._in_contextmanager:
raise AssertionError(
'`future.result()` should be called after the '
'`maybe_beam_pipeline` contextmanager.'
)
logging.info('Retrieving split info for %s...', split_name)
shard_lengths, total_size = beam_writer.finalize()
return splits_lib.SplitInfo(
name=split_name,
shard_lengths=shard_lengths,
num_bytes=total_size,
filename_template=filename_template,
)
return _SplitInfoFuture(_resolve_future) | PypiClean |
/nameshark-vcard-1.0.0.tar.gz/nameshark-vcard-1.0.0/nameshark_vcard/nameshark_vcard.py | """Convert vCard-formatted string to the JSON format expected by Name Shark."""
# coding=utf-8
import base64
import json
import collections
import argparse
import vobject
NAMES = collections.namedtuple('Names', ['first_name', 'surname'])
def get_pp_names(fn_field):
"""
Use probablepeople to extract firstname/surname from vCard 'fn' field.
:param fn_field: the input vCard 'fn' field.
:return: a namedtuple containing the first name and surname.
>>> get_names('John Smith')
Extracting data for John Smith
Names(first_name='John', surname='Smith')
"""
first_name = None
surname = None
try:
import probablepeople as pp # not python 2.6 compatible
# Use probablepeople to tag the parts of the name.
full_name_dict = pp.tag(fn_field)[0]
if 'GivenName' in full_name_dict:
# If probablepeople has successfully extracted the first name,
# use it.
first_name = full_name_dict['GivenName']
if 'Surname' in full_name_dict:
# If probablepeople has successfully extracted the surname,
# use it.
surname = full_name_dict['Surname']
except (ImportError, SyntaxError, TypeError) as error:
print(error)
return NAMES(first_name, surname)
def get_names(fn_field):
"""
Extract the first name and surname from a vCard 'fn' field.
:param fn_field: the input vCard 'fn' field.
:return: a namedtuple containing the first name and surname.
>>> get_names('John Smith')
Extracting data for John Smith
Names(first_name='John', surname='Smith')
"""
names = get_pp_names(fn_field)
first_name = names.first_name
surname = names.surname
try:
fn_field_split = fn_field.split(' ')
except (TypeError, AttributeError):
fn_field_split = ['']
if first_name is None:
# If we can't get first name from probablepeople, assume it's the
# first part of the string.
first_name = fn_field_split[0]
if first_name == surname:
first_name = ''
if surname is None:
# If we can't get surname from probablepeople, assume it's the
# second part of the string, if that exists.
if len(fn_field_split) > 1:
surname = fn_field_split[1]
else:
surname = ''
print('Extracting data for ' + first_name + ' ' + surname)
return NAMES(first_name, surname)
def get_photo(photo):
"""
Extract the photo data (if it exists) from a vCard 'photo' field.
:param photo: the input vCard 'photo' field.
:return: a base64-encoded string containing the photo data.
"""
# TODO: Add doctest above? or pytest
if photo is not None:
photo_data = base64.b64encode(photo)
photo_data = "data:image/jpeg;base64," + photo_data.decode('utf8')
else:
photo_data = ""
return photo_data
def extract_contact_from_component(component):
"""
Extract the contact info from a vCard component.
:param component: the input vCard component text.
:return: a dictionary containing the extracted contact info.
"""
names = get_names(component.getChildValue('fn'))
photo_data = get_photo(component.getChildValue('photo'))
if photo_data == '':
print('Warning: Missing photo for ' + names.first_name + ' ' +
names.surname + '...!')
entry = {'first': names.first_name, 'last': names.surname,
'photoData': photo_data, 'details': ''}
return entry
def extract_contacts_from_vcard(vcard):
"""
Extract the contact info from a vCard.
:param vcard: the vCard text to convert.
:return: a list containing the extracted contact info.
"""
contacts = []
for v_component in vobject.readComponents(vcard):
entry = extract_contact_from_component(v_component)
contacts.append(entry)
return contacts
def convert_to_nameshark(group_name, contacts, ):
"""
Convert a list containing contact info into JSON for Name Shark.
:param group_name: the Name Shark group to use.
:param contacts:
:return: the list containing contact info extracted from a vCard.
"""
shark = {'name': group_name, 'contacts': contacts}
json_str = json.dumps(shark, sort_keys=True, indent=4)
return json_str
def vcard_to_nameshark(vcard, group_name):
"""
Convert vCard-formatted string to the JSON format expected by Name Shark.
:param vcard: the vCard text to convert.
:param group_name: the Name Shark group to use.
:return: JSON version of vCard input.
"""
contacts = extract_contacts_from_vcard(vcard)
json_str = convert_to_nameshark(group_name, contacts)
return json_str
def main():
"""
The main nameshark_vcard module.
:return: None
"""
# TODO: Add pytest?
# TODO: Switch to using click, and apply click-man
parser = argparse.ArgumentParser()
parser.add_argument('file', help='the input file')
parser.add_argument('group', help='the output group name')
args = parser.parse_args()
with open(args.file, 'r') as input_file:
text = input_file.read()
json_str = vcard_to_nameshark(text, args.group)
with open(args.group + '.json', 'w') as output_file:
output_file.write(json_str)
if __name__ == '__main__':
main() | PypiClean |
/trytond_analytic_budget-6.8.0.tar.gz/trytond_analytic_budget-6.8.0/doc/usage.rst | *****
Usage
*****
.. Setting analytic budgets:
Setting analytic budgets
========================
New `Analytic Budgets <model-analytic_account.budget>` can easily be created,
and existing budgets updated, by changing the budgets that are found in the
[:menuselection:`Financial --> Budgets --> Analytic Budgets`] main menu entry.
.. tip::
To quickly add missing lines to a budget, you can use the :guilabel:`Update
Lines` button. This creates a budget line for each missing analytic account.
.. tip::
An analytic budget can be copied by using the `Copy Budget
<wizard-analytic_account.budget.copy>` wizard from the budget's
:guilabel:`Copy` button.
.. Tracking analytic budgets
Tracking analytic budgets
=========================
A report of actual amounts of each `Analytic Budget
<model-analytic_account.budget>` can be found by opening the
[:menuselection:`Financial --> Reporting --> Analytic Budgets`] main menu item.
| PypiClean |
/phishing-tracker-0.0.7.tar.gz/phishing-tracker-0.0.7/README.md | # Phishing Tracker
[![PyPi](https://img.shields.io/pypi/v/phishing-tracker.svg)](https://pypi.python.org/pypi/phishing-tracker/)
[![Python Versions](https://img.shields.io/pypi/pyversions/phishing-tracker.svg)](https://github.com/ndejong/phishing-tracker/)
[![Build Status](https://api.travis-ci.org/ndejong/phishing-tracker.svg?branch=master)](https://travis-ci.org/ndejong/phishing-tracker/)
[![License](https://img.shields.io/github/license/ndejong/phishing-tracker.svg)](https://github.com/ndejong/phishing-tracker)
Utility to manage sets of phishing links making it easier to track their removal progress over time.
Project started out of frustration in dealing over-and-over again with phishing threat-actors and wanting an easy tool
to handle the tracking of these links over time without needing to roll out a full-fledged CERT stack (eg The Hive)
Captures everything per-run in a single JSON file making it easy to compare and track change over time - and integrate
with other tooling if desired.
See examples to get a clear idea on usage and possibilities.
## Features
* Batch mode with `.yml` configuration file
* Single shot mode by passing link/hostname/domain in at cli
* Collects useful reference-information and artifacts per phish link stored in an easy reference json file
* Create rules to define expected (or desired) analyzers output responses
* Easy to re-run and hence re-compare the latest status of phish-links over time
* Debug mode output to STDERR
## Analyzers
### `dig`
* dig-domain - determine domain relative to TLD and collect A, CNAME, NS, MX, TXT records
* dig-hostname - collect hostname A, AAAA, CNAME, NS, MX, TXT records
### `http`
* http-get - perform http (clear-text) GET request capturing request/response headers and response content
### `https`
* https-get - as per http-get using HTTPS
### `https_certificate`
* https-certificate - obtain the https SSL certificate and parse certificate attributes
### `smtp`
* smtp-headers - connect to hostname/domain MX records and capture the server header
### `safe_browsing`
* safe-browsing - query the Google safe-browsing API - https://developers.google.com/safe-browsing/v4
### `whois`
* whois - perform a whois and parse associated attributes
## Analyzers - Todo
* Virustotal lookup - https://developers.virustotal.com/reference#url-scan
## Install
#### via PyPi
```bash
pip3 install phishing-tracker
```
#### via Source
```bash
git clone https://github.com/ndejong/phishing-tracker
cd phishing-tracker
python3 -m venv venv
source venv/bin/activate
pip3 install -r requirements.txt
python3 setup.py clean
python3 setup.py test
python3 setup.py install
```
## Project
* [github.com/ndejong/phishing-tracker](https://github.com/ndejong/phishing-tracker)
## Analyzer Response Reports
```dns_domainname_aaaa_record
dns_domainname_a_record
dns_domainname_cname_record
dns_domainname_mx_record
dns_domainname_ns_record
dns_domainname_txt_record
dns_domainname_unknown_tld
dns_hostname_aaaa_record
dns_hostname_a_record
dns_hostname_cname_record
dns_hostname_eq_dns_domainname
dns_hostname_mx_record
dns_hostname_ns_record
dns_hostname_txt_record
http_exception
http_hostname_<statuscode>_response
https_certificate_exception
https_certificate_hostname_mismatch
https_exception
https_hostname_<statuscode>_response
safe_browsing_exception
safe_browsing_record
smtp_domainname_active
smtp_exception
smtp_hostname_active
whois_domainname_record
whois_exception
```
## Google Safe Browsing API key
In order to make use of the Google Safe Browsing API lookup, the environment variable `GCP_API_KEY` needs to be
set with an appropriate GCP key that has access to the safe-browsing API - [read more here](https://developers.google.com/safe-browsing/v4/get-started).
## Examples
* [examples01.yml](https://github.com/ndejong/phishing-tracker/blob/master/examples/examples01.yml)
## Authors
[Nicholas de Jong](https://nicholasdejong.com)
## License
BSD-2-Clause - see LICENSE file for full details.
| PypiClean |
/chinilla-blockchain-1.4.0b6.tar.gz/chinilla-blockchain-1.4.0b6/chinilla/wallet/db_wallet/db_wallet_puzzles.py | from __future__ import annotations
from typing import Iterator, List, Tuple, Union
from chinilla.types.blockchain_format.program import Program
from chinilla.types.blockchain_format.sized_bytes import bytes32
from chinilla.types.condition_opcodes import ConditionOpcode
from chinilla.util.ints import uint64
from chinilla.wallet.nft_wallet.nft_puzzles import NFT_STATE_LAYER_MOD, create_nft_layer_puzzle_with_curry_params
from chinilla.wallet.puzzles.load_clvm import load_clvm_maybe_recompile
# from chinilla.types.condition_opcodes import ConditionOpcode
# from chinilla.wallet.util.merkle_tree import MerkleTree, TreeType
ACS_MU = Program.to(11) # returns the third argument a.k.a the full solution
ACS_MU_PH = ACS_MU.get_tree_hash()
SINGLETON_TOP_LAYER_MOD = load_clvm_maybe_recompile("singleton_top_layer_v1_1.clvm")
SINGLETON_LAUNCHER = load_clvm_maybe_recompile("singleton_launcher.clvm")
GRAFTROOT_DL_OFFERS = load_clvm_maybe_recompile("graftroot_dl_offers.clvm")
P2_PARENT = load_clvm_maybe_recompile("p2_parent.clvm")
def create_host_fullpuz(innerpuz: Union[Program, bytes32], current_root: bytes32, genesis_id: bytes32) -> Program:
db_layer = create_host_layer_puzzle(innerpuz, current_root)
mod_hash = SINGLETON_TOP_LAYER_MOD.get_tree_hash()
singleton_struct = Program.to((mod_hash, (genesis_id, SINGLETON_LAUNCHER.get_tree_hash())))
return SINGLETON_TOP_LAYER_MOD.curry(singleton_struct, db_layer)
def create_host_layer_puzzle(innerpuz: Union[Program, bytes32], current_root: bytes32) -> Program:
# some hard coded metadata formatting and metadata updater for now
return create_nft_layer_puzzle_with_curry_params(
Program.to((current_root, None)),
ACS_MU_PH,
# TODO: the nft driver doesn't like the Union yet, but changing that is out of scope for me rn - Quex
innerpuz, # type: ignore
)
def match_dl_singleton(puzzle: Program) -> Tuple[bool, Iterator[Program]]:
"""
Given a puzzle test if it's a CAT and, if it is, return the curried arguments
"""
mod, singleton_curried_args = puzzle.uncurry()
if mod == SINGLETON_TOP_LAYER_MOD:
mod, dl_curried_args = singleton_curried_args.at("rf").uncurry()
if mod == NFT_STATE_LAYER_MOD and dl_curried_args.at("rrf") == ACS_MU_PH:
launcher_id = singleton_curried_args.at("frf")
root = dl_curried_args.at("rff")
innerpuz = dl_curried_args.at("rrrf")
return True, iter((innerpuz, root, launcher_id))
return False, iter(())
def launch_solution_to_singleton_info(launch_solution: Program) -> Tuple[bytes32, uint64, bytes32, bytes32]:
solution = launch_solution.as_python()
try:
full_puzzle_hash = bytes32(solution[0])
amount = uint64(int.from_bytes(solution[1], "big"))
root = bytes32(solution[2][0])
inner_puzzle_hash = bytes32(solution[2][1])
except (IndexError, TypeError):
raise ValueError("Launcher is not a data layer launcher")
return full_puzzle_hash, amount, root, inner_puzzle_hash
def launcher_to_struct(launcher_id: bytes32) -> Program:
struct: Program = Program.to(
(SINGLETON_TOP_LAYER_MOD.get_tree_hash(), (launcher_id, SINGLETON_LAUNCHER.get_tree_hash()))
)
return struct
def create_graftroot_offer_puz(
launcher_ids: List[bytes32], values_to_prove: List[List[bytes32]], inner_puzzle: Program
) -> Program:
return GRAFTROOT_DL_OFFERS.curry(
inner_puzzle,
[launcher_to_struct(launcher) for launcher in launcher_ids],
[NFT_STATE_LAYER_MOD.get_tree_hash()] * len(launcher_ids),
values_to_prove,
)
def create_mirror_puzzle() -> Program:
return P2_PARENT.curry(Program.to(1))
MIRROR_PUZZLE_HASH = create_mirror_puzzle().get_tree_hash()
def get_mirror_info(parent_puzzle: Program, parent_solution: Program) -> Tuple[bytes32, List[bytes]]:
conditions = parent_puzzle.run(parent_solution)
for condition in conditions.as_iter():
if (
condition.first().as_python() == ConditionOpcode.CREATE_COIN
and condition.at("rf").as_python() == create_mirror_puzzle().get_tree_hash()
):
memos: List[bytes] = condition.at("rrrf").as_python()
launcher_id = bytes32(memos[0])
return launcher_id, [url for url in memos[1:]]
raise ValueError("The provided puzzle and solution do not create a mirror coin") | PypiClean |
/running_mate-0.0.6-py3-none-any.whl/mate/db.py | import datetime
import logging
import os
import pathlib
from typing import List, Union
from peewee import ( # type: ignore
CharField,
DatabaseProxy,
DateTimeField,
FloatField,
ForeignKeyField,
IntegerField,
Model,
SqliteDatabase,
)
logger = logging.getLogger("mate")
db = DatabaseProxy()
class Mate(Model):
name = CharField()
version = IntegerField()
item_count = IntegerField(null=True)
created_at = DateTimeField(default=datetime.datetime.now)
class Meta:
database = db
table_name = "mate"
class Inference(Model):
mate = ForeignKeyField(Mate)
runtime = CharField(null=True)
created_at = DateTimeField(default=datetime.datetime.now)
class Meta:
database = db
table_name = "inference"
class Feature(Model):
name = CharField()
inferred_type = CharField()
mate = ForeignKeyField(Mate)
created_at = DateTimeField(default=datetime.datetime.now)
class Meta:
database = db
table_name = "feature"
class NumericalStats(Model):
num_present = IntegerField()
num_missing = IntegerField()
mean = FloatField()
sum = IntegerField()
std_dev = FloatField()
min = IntegerField()
max = IntegerField()
feature = ForeignKeyField(Feature)
created_at = DateTimeField(default=datetime.datetime.now)
class Meta:
database = db
table_name = "numerical_stats"
class StringStats(Model):
num_present = IntegerField()
num_missing = IntegerField()
distinct_count = IntegerField()
feature = ForeignKeyField(Feature)
created_at = DateTimeField(default=datetime.datetime.now)
class Meta:
database = db
table_name = "string_stats"
class FeatureValue(Model):
value = CharField(null=True)
feature = ForeignKeyField(Feature)
inference = ForeignKeyField(Inference)
created_at = DateTimeField(default=datetime.datetime.now)
class Meta:
database = db
table_name = "feature_value"
class FeatureAlert(Model):
name = CharField()
kind = CharField()
feature_value = ForeignKeyField(FeatureValue)
feature = ForeignKeyField(Feature)
inference = ForeignKeyField(Inference)
class Meta:
database = db
table_name = "feature_alert"
def connect_db():
db = init_db()
db.connect()
if (
not db.table_exists("mate")
or not db.table_exists("inference")
or not db.table_exists("feature")
or not db.table_exists("numerical_stats")
or not db.table_exists("string_stats")
or not db.table_exists("feature_alert")
or not db.table_exists("feature_value")
):
db.create_tables(
[
Mate,
Inference,
Feature,
NumericalStats,
StringStats,
FeatureValue,
FeatureAlert,
]
)
def get_current_mate(name: str) -> Union[Mate, None]:
mate = Mate.select().where(Mate.name == name).order_by(Mate.version.desc()).limit(1)
if mate:
return mate[0]
return None
def get_features(mate: Mate) -> List[Feature]:
features = Feature.select().where(Feature.mate == mate)
return features
def get_feature_values(mate: Mate) -> List[FeatureValue]:
feature_values = FeatureValue.select().join(Feature).where(Feature.mate == mate)
return feature_values
def get_statistics(Stats: Model, feature: Feature) -> Model:
return Stats.get(feature=feature)
def init_db() -> SqliteDatabase:
test_mode = int(os.getenv("TESTING", "0"))
if test_mode:
database = SqliteDatabase(":memory:")
logger.info("Using in-memory SQLite")
else:
BASE = pathlib.Path.cwd()
database = SqliteDatabase(BASE / "mate.db")
logger.info("Using disk-based SQLite")
db.initialize(database)
return db
def version_or_create_mate(name: str) -> Mate:
current_mate = get_current_mate(name)
if current_mate:
version = int(current_mate.version) + 1
logger.info(f"Model found. Creating new version: {version}.")
else:
version = 1
logger.info("Model not found. Creating new mate.")
mate = Mate(name=name, version=version)
mate.save()
return mate | PypiClean |
/ethtools-py-evm-0.7.0a3.tar.gz/ethtools-py-evm-0.7.0a3/eth/consensus/clique/encoding.py | from typing import (
Tuple,
)
from eth_typing import (
Address,
)
import rlp
from eth.consensus.clique.datatypes import (
Snapshot,
Tally,
Vote,
VoteAction,
)
from eth.rlp.sedes import (
uint256,
)
ADDRESS_TALLY_SEDES = rlp.sedes.List((rlp.sedes.binary, rlp.sedes.binary))
VOTE_SEDES = rlp.sedes.List(
(
rlp.sedes.binary,
uint256,
rlp.sedes.binary,
rlp.sedes.binary,
)
)
SNAPSHOT_SEDES = rlp.sedes.List(
(
rlp.sedes.binary,
rlp.sedes.CountableList(rlp.sedes.binary),
rlp.sedes.CountableList(rlp.sedes.binary),
rlp.sedes.CountableList(rlp.sedes.binary),
)
)
TALLY_SEDES = rlp.sedes.List((rlp.sedes.binary, uint256))
def encode_address_tally_pair(pair: Tuple[Address, Tally]) -> bytes:
return rlp.encode(
[pair[0], encode_tally(pair[1])],
sedes=ADDRESS_TALLY_SEDES,
)
def decode_address_tally_pair(pair: bytes) -> Tuple[Address, Tally]:
(
address,
tally_bytes,
) = rlp.decode(
pair,
sedes=ADDRESS_TALLY_SEDES,
)
tally = decode_tally(tally_bytes)
return address, tally
def encode_vote(vote: Vote) -> bytes:
return rlp.encode(
[
vote.signer,
vote.block_number,
vote.subject,
vote.action.value,
],
sedes=VOTE_SEDES,
)
def decode_vote(vote: bytes) -> Vote:
signer, block_number, subject, action = rlp.decode(
vote,
sedes=VOTE_SEDES,
)
return Vote(
signer=signer,
block_number=block_number,
subject=subject,
action=VoteAction.NOMINATE
if action == VoteAction.NOMINATE.value
else VoteAction.KICK,
)
def encode_snapshot(snapshot: Snapshot) -> bytes:
return rlp.encode(
[
snapshot.block_hash,
list(snapshot.signers),
[encode_vote(vote) for vote in snapshot.votes],
[
encode_address_tally_pair((address, tally))
for address, tally in snapshot.tallies.items()
],
],
sedes=SNAPSHOT_SEDES,
)
def decode_snapshot(snapshot: bytes) -> Snapshot:
block_hash, signers, votes_rlp, tallies_rlp = rlp.decode(
snapshot,
sedes=SNAPSHOT_SEDES,
)
votes = [decode_vote(vote) for vote in votes_rlp]
tallies = dict(decode_address_tally_pair(pair) for pair in tallies_rlp)
return Snapshot(
signers=frozenset(signers),
block_hash=block_hash,
votes=frozenset(votes),
tallies=tallies,
)
def encode_tally(tally: Tally) -> bytes:
return rlp.encode(
[tally.action.value, tally.votes],
sedes=TALLY_SEDES,
)
def decode_tally(tally: bytes) -> Tally:
action_binary, votes = rlp.decode(
tally,
sedes=TALLY_SEDES,
)
return Tally(action=VoteAction(action_binary), votes=votes) | PypiClean |
/askbot-tuan-1.5.tar.gz/askbot-tuan-1.5/askbot/setup_templates/static/tiny_mce/langs/ia.js | tinyMCE.addI18n({ia:{common:{"more_colors":"\u66f4\u591a\u989c\u8272","invalid_data":"\u9519\u8bef\uff1a\u8f93\u5165\u4e86\u65e0\u6548\u7684\u503c\uff0c\u5df2\u6807\u8bb0\u4e3a\u7ea2\u8272\u3002","popup_blocked":"\u5bf9\u4e0d\u8d77\uff0c\u60a8\u7684\u5feb\u663e\u7a97\u53e3\u963b\u6b62\u7a0b\u5e8f\u5df2\u7ecf\u963b\u6b62\u4e86\u672c\u5feb\u663e\u7a97\u53e3\uff0c\u8bf7\u8c03\u6574\u4f60\u7684\u6d4f\u89c8\u5668\u8bbe\u7f6e\uff0c\u5141\u8bb8\u672c\u7f51\u7ad9\u5f39\u51fa\u65b0\u7a97\u53e3\uff0c\u4ee5\u4fbf\u4f7f\u7528\u6b64\u529f\u80fd","clipboard_no_support":"\u5c1a\u4e0d\u652f\u6301\u60a8\u6240\u4f7f\u7528\u7684\u6d4f\u89c8\u5668\uff0c\u8bf7\u4f7f\u7528\u952e\u76d8\u65b9\u5f0f\u64cd\u4f5c","clipboard_msg":"\u590d\u5236\u3001\u526a\u5207\u548c\u7c98\u8d34\u529f\u80fd\u5728Mozilla \u548c Firefox\u4e2d\u65e0\u6cd5\u4f7f\u7528","not_set":"-- \u672a\u8bbe\u7f6e --","class_name":"\u6837\u5f0f\u7c7b\u540d",browse:"\u6d4f\u89c8",close:"\u5173\u95ed",cancel:"\u53d6\u6d88",update:"\u66f4\u65b0",insert:"\u63d2\u5165",apply:"\u5e94\u7528","edit_confirm":"\u662f\u5426\u5728\u8be5text area\u5185\u542f\u7528\u6240\u89c1\u5373\u6240\u5f97\u6a21\u5f0f\uff1f","invalid_data_number":"{#field} must be a number","invalid_data_min":"{#field} must be a number greater than {#min}","invalid_data_size":"{#field} must be a number or percentage",value:"(value)"},contextmenu:{full:"\u4e24\u7aef\u5bf9\u9f50",right:"\u53f3\u5bf9\u9f50",center:"\u5c45\u4e2d",left:"\u5de6\u5bf9\u9f50",align:"\u5bf9\u9f50\u65b9\u5f0f"},insertdatetime:{"day_short":"\u5468\u65e5,\u5468\u4e00,\u5468\u4e8c,\u5468\u4e09,\u5468\u56db,\u5468\u4e94,\u5468\u516d,\u5468\u65e5","day_long":"\u661f\u671f\u65e5,\u661f\u671f\u4e00,\u661f\u671f\u4e8c,\u661f\u671f\u4e09,\u661f\u671f\u56db,\u661f\u671f\u4e94,\u661f\u671f\u516d,\u661f\u671f\u65e5","months_short":"1\u6708,2\u6708,3\u6708,4\u6708,5\u6708,6\u6708,7\u6708,8\u6708,9\u6708,10\u6708,11\u6708,12\u6708","months_long":"\u4e00\u6708,\u4e8c\u6708,\u4e09\u6708,\u56db\u6708,\u4e94\u6708,\u516d\u6708,\u4e03\u6708,\u516b\u6708,\u4e5d\u6708,\u5341\u6708,\u5341\u4e00\u6708,\u5341\u4e8c\u6708","inserttime_desc":"\u63d2\u5165\u73b0\u5728\u65f6\u95f4","insertdate_desc":"\u63d2\u5165\u4eca\u5929\u65e5\u671f","time_fmt":"%H:%M:%S","date_fmt":"%Y-%m-%d"},print:{"print_desc":"\u6253\u5370"},preview:{"preview_desc":"\u9884\u89c8"},directionality:{"rtl_desc":"\u6587\u5b57\u4ece\u53f3\u5230\u5de6","ltr_desc":"\u6587\u5b57\u4ece\u5de6\u5230\u53f3"},layer:{content:"\u65b0\u589e\u5c42...","absolute_desc":"\u7edd\u5bf9\u4f4d\u7f6e","backward_desc":"\u540e\u7f6e","forward_desc":"\u524d\u7f6e","insertlayer_desc":"\u63d2\u5165\u5c42"},save:{"save_desc":"\u4fdd\u5b58","cancel_desc":"\u653e\u5f03\u6240\u6709\u66f4\u6539"},nonbreaking:{"nonbreaking_desc":"\u63d2\u5165\u7a7a\u683c\u7b26"},iespell:{download:"\u672a\u68c0\u6d4b\u5230ieSpell\u7ec4\u4ef6\uff0c\u662f\u5426\u73b0\u5728\u5b89\u88c5 ?","iespell_desc":"\u6267\u884c\u62fc\u5199\u68c0\u67e5"},advhr:{"advhr_desc":"\u5206\u9694\u7ebf","delta_height":"","delta_width":""},emotions:{"emotions_desc":"\u8868\u60c5","delta_height":"","delta_width":""},searchreplace:{"replace_desc":"\u67e5\u627e/\u66ff\u6362","search_desc":"\u67e5\u627e","delta_width":"","delta_height":""},advimage:{"image_desc":"\u63d2\u5165/\u7f16\u8f91 \u56fe\u7247","delta_width":"","delta_height":""},advlink:{"link_desc":"\u63d2\u5165/\u7f16\u8f91 \u8fde\u7ed3","delta_height":"","delta_width":""},xhtmlxtras:{"attribs_desc":"\u63d2\u5165/\u7f16\u8f91 \u5c5e\u6027","ins_desc":"\u63d2\u5165","del_desc":"\u5220\u9664","acronym_desc":"\u9996\u5b57\u6bcd\u7f29\u5199","abbr_desc":"\u7f29\u5199","cite_desc":"\u5f15\u7528","attribs_delta_height":"","attribs_delta_width":"","ins_delta_height":"","ins_delta_width":"","del_delta_height":"","del_delta_width":"","acronym_delta_height":"","acronym_delta_width":"","abbr_delta_height":"","abbr_delta_width":"","cite_delta_height":"","cite_delta_width":""},style:{desc:"\u7f16\u8f91 CSS \u6837\u5f0f\u8868","delta_height":"","delta_width":""},paste:{"selectall_desc":"\u5168\u9009","paste_word_desc":"\u7c98\u8d34\u4e3aWord\u683c\u5f0f","paste_text_desc":"\u7c98\u8d34\u4e3a\u7eaf\u6587\u5b57","plaintext_mode":"Paste is now in plain text mode. Click again to toggle back to regular paste mode.","plaintext_mode_sticky":"Paste is now in plain text mode. Click again to toggle back to regular paste mode. After you paste something you will be returned to regular paste mode."},"paste_dlg":{"word_title":"\u5c06\u590d\u5236(CTRL + C)\u7684\u5185\u5bb9\u7c98\u8d34(CTRL + V)\u5230\u7a97\u53e3\u3002","text_linebreaks":"\u4fdd\u7559\u5206\u884c\u7b26\u53f7\u53f7","text_title":"\u5c06\u590d\u5236(CTRL + C)\u7684\u5185\u5bb9\u7c98\u8d34(CTRL + V)\u5230\u7a97\u53e3\u3002"},table:{cell:"\u50a8\u5b58\u683c",col:"\u5217",row:"\u884c",del:"\u5220\u9664\u8868\u683c","copy_row_desc":"\u590d\u5236\u5f53\u524d\u5217","cut_row_desc":"\u526a\u5207\u5f53\u524d\u5217","paste_row_after_desc":"\u7c98\u8d34\u884c\u5230\u4e0b\u65b9","paste_row_before_desc":"\u7c98\u8d34\u884c\u5230\u4e0a\u65b9","props_desc":"\u8868\u683c \u5c5e\u6027","cell_desc":"\u50a8\u5b58\u683c \u5c5e\u6027","row_desc":"\u5217 \u5c5e\u6027","merge_cells_desc":"\u5408\u5e76\u50a8\u5b58\u683c","split_cells_desc":"\u62c6\u5206\u50a8\u5b58\u683c","delete_col_desc":"\u5220\u9664\u5f53\u524d\u5217","col_after_desc":"\u5728\u53f3\u4fa7\u63d2\u5165\u5217","col_before_desc":"\u5728\u5de6\u4fa7\u63d2\u5165\u5217","delete_row_desc":"\u5220\u9664\u5f53\u524d\u884c","row_after_desc":"\u5728\u4e0b\u65b9\u63d2\u5165\u884c","row_before_desc":"\u5728\u4e0a\u65b9\u63d2\u5165\u884c",desc:"\u63d2\u5165\u65b0\u8868\u683c","merge_cells_delta_height":"","merge_cells_delta_width":"","table_delta_height":"","table_delta_width":"","cellprops_delta_height":"","cellprops_delta_width":"","rowprops_delta_height":"","rowprops_delta_width":""},autosave:{"unload_msg":"\u5982\u679c\u79bb\u5f00\u8be5\u9875\u5c06\u5bfc\u81f4\u6240\u6709\u4fee\u6539\u5168\u90e8\u4e22\u5931\u3002","warning_message":"If you restore the saved content, you will lose all the content that is currently in the editor.\n\nAre you sure you want to restore the saved content?","restore_content":"Restore auto-saved content."},fullscreen:{desc:"\u5168\u5c4f\u6a21\u5f0f"},media:{edit:"\u7f16\u8f91 \u5a92\u4f53",desc:"\u63d2\u5165/\u7f16\u8f91 \u5a92\u4f53","delta_height":"","delta_width":""},fullpage:{desc:"\u6587\u4ef6","delta_width":"","delta_height":""},template:{desc:"\u63d2\u5165\u9009\u5b9a\u7684\u8303\u672c"},visualchars:{desc:"\u663e\u793a\u63a7\u5236\u7b26\u53f7\u3002"},spellchecker:{desc:"\u62fc\u5199\u68c0\u67e5",menu:"\u62fc\u5199\u68c0\u67e5 \u8bbe\u7f6e","ignore_word":"\u5ffd\u7565","ignore_words":"\u5168\u90e8\u5ffd\u7565",langs:"\u8bed\u8a00\u6e05\u5355",wait:"\u8bf7\u7a0d\u5019...",sug:"\u5efa\u8bae\u8bcd","no_sug":"\u65e0\u62fc\u5199\u5efa\u8bae","no_mpell":"\u65e0\u62fc\u5199\u9519\u8bef","learn_word":"Learn word"},pagebreak:{desc:"\u63d2\u5165\u5206\u9875\u7b26\u53f7"},advlist:{types:"Types",def:"Default","lower_alpha":"Lower Alpha","lower_greek":"Lower Greek","lower_roman":"Lower Roman","upper_alpha":"Upper Alpha","upper_roman":"Upper Roman",circle:"Circle",disc:"Disc",square:"Square"},colors:{"333300":"Dark olive","993300":"Burnt orange","000000":"Black","003300":"Dark green","003366":"Dark azure","000080":"Navy Blue","333399":"Indigo","333333":"Very dark gray","800000":"Maroon",FF6600:"Orange","808000":"Olive","008000":"Green","008080":"Teal","0000FF":"Blue","666699":"Grayish blue","808080":"Gray",FF0000:"Red",FF9900:"Amber","99CC00":"Yellow green","339966":"Sea green","33CCCC":"Turquoise","3366FF":"Royal blue","800080":"Purple","999999":"Medium gray",FF00FF:"Magenta",FFCC00:"Gold",FFFF00:"Yellow","00FF00":"Lime","00FFFF":"Aqua","00CCFF":"Sky blue","993366":"Brown",C0C0C0:"Silver",FF99CC:"Pink",FFCC99:"Peach",FFFF99:"Light yellow",CCFFCC:"Pale green",CCFFFF:"Pale cyan","99CCFF":"Light sky blue",CC99FF:"Plum",FFFFFF:"White"},aria:{"rich_text_area":"Rich Text Area"},wordcount:{words:"Words"}}}); | PypiClean |
/insight2-0.1.36.tar.gz/insight2-0.1.36/magetool/SystemMsgTool.py |
import os, sys
import time
import platform
import zlib
#http://www.cnblogs.com/freeliver54/archive/2008/04/08/1142356.html
#http://blog.csdn.net/xtx1990/article/details/7288903
class SystemMsgObj(object):
"""docstring for SystemMsgObj"""
def __init__(self):
super(SystemMsgObj, self).__init__()
self.sysversion = platform.version()
self.sysplatform = platform.platform()
self.sysSystem = platform.system()
self.ver = ''
self.ostype = 0 #1.windows,2.mac,3.linux
if self.sysSystem == 'Windows': #mac系统
self.ostype = 1 #1.windows,2.mac,3.linux
self.ver = platform.win32_ver()
elif self.sysSystem == 'Darwin':
self.ostype = 2
self.ver = platform.mac_ver()
elif self.sysSystem == 'Linux':
self.ostype = 3
self.ver = platform.linux_distribution()
self.c = None
self.sysMsg = {}
self.sysMsg['osversion'] = str(self.sysversion)
self.sysMsg['osplatform'] = str(self.sysplatform)
self.sysMsg['os'] = str(self.sysSystem)
self.sysMsg['ver'] = self.ver
self.sysMsg['ostype'] = self.ostype
if self.ostype == 1:
import wmi
self.c = wmi.WMI()
self.initWinSystemHardMsg()
elif self.ostype == 2:
self.initMacSystemHardMsg()
elif self.ostype == 3:
self.initLinuxSystemHardMsg()
self.getUserHardID()
def initWinSystemHardMsg(self):
self.sysMsg['cpu'] = self._printCPU()
self.sysMsg['mainboard'] = self._printMain_board()
self.sysMsg['BIOS'] = self._printBIOS()
self.sysMsg['disk'] = self._printDisk()
self.sysMsg['memory'] = self._printPhysicalMemory()
self.sysMsg['battery'] = self._printBattery()
self.sysMsg['MacAddr'] = self._printMacAddress()
self.getUserHardID()
return self.sysMsg
def initMacSystemHardMsg(self):
pass
def initLinuxSystemHardMsg(self):
pass
def getSysMsg(self):
return self.sysMsg
def getUserHardID(self):
if self.ostype == 1: #windwos
self.sysMsg['userHardID'] = ''
#windows下以电脑主板的UUID为编号
#当主板UUID不存存或者无效时,使用容量最大硬盘的UUID + CPUID的MD5值
#如果硬盘UUID无法获取,使用网卡MAC地址 + CPUID
elif self.ostype == 2: #mac
self.sysMsg['userHardID'] = ''
elif self.ostype == 3: #linux
self.sysMsg['userHardID'] = ''
#处理器
def _printCPU(self):
tmpdict = {}
tmpdict["CpuCores"] = 0
for cpu in self.c.Win32_Processor():
tmpdict["cpuid"] = cpu.ProcessorId.strip()
tmpdict["CpuType"] = cpu.Name
tmpdict['systemName'] = cpu.SystemName
try:
tmpdict["CpuCores"] = cpu.NumberOfCores
except:
tmpdict["CpuCores"] += 1
tmpdict["CpuClock"] = cpu.MaxClockSpeed
tmpdict['DataWidth'] = cpu.DataWidth
# print tmpdict
return tmpdict
#主板
def _printMain_board(self):
boards = []
# print len(c.Win32_BaseBoard()):
for board_id in self.c.Win32_BaseBoard():
tmpmsg = {}
tmpmsg['UUID'] = board_id.qualifiers['UUID'][1:-1] #主板UUID,有的主板这部分信息取到为空值,ffffff-ffffff这样的
tmpmsg['SerialNumber'] = board_id.SerialNumber #主板序列号
tmpmsg['Manufacturer'] = board_id.Manufacturer #主板生产品牌厂家
tmpmsg['Product'] = board_id.Product #主板型号
boards.append(tmpmsg)
print boards
return boards
#BIOS
def _printBIOS(self):
bioss = []
for bios_id in self.c.Win32_BIOS():
tmpmsg = {}
tmpmsg['BiosCharacteristics'] = bios_id.BiosCharacteristics #BIOS特征码
tmpmsg['version'] = bios_id.Version #BIOS版本
tmpmsg['Manufacturer'] = bios_id.Manufacturer.strip() #BIOS固件生产厂家
tmpmsg['ReleaseDate'] = bios_id.ReleaseDate #BIOS释放日期
tmpmsg['SMBIOSBIOSVersion'] = bios_id.SMBIOSBIOSVersion #系统管理规范版本
bioss.append(tmpmsg)
print bioss
return bioss
#硬盘
def _printDisk(self):
disks = []
for disk in self.c.Win32_DiskDrive():
# print disk.__dict__
tmpmsg = {}
tmpmsg['SerialNumber'] = disk.SerialNumber.strip()
tmpmsg['DeviceID'] = disk.DeviceID
tmpmsg['Caption'] = disk.Caption
tmpmsg['Size'] = disk.Size
tmpmsg['UUID'] = disk.qualifiers['UUID'][1:-1]
disks.append(tmpmsg)
for d in disks:
print d
return disks
#内存
def _printPhysicalMemory(self):
memorys = []
for mem in self.c.Win32_PhysicalMemory():
tmpmsg = {}
tmpmsg['UUID'] = mem.qualifiers['UUID'][1:-1]
tmpmsg['BankLabel'] = mem.BankLabel
tmpmsg['SerialNumber'] = mem.SerialNumber.strip()
tmpmsg['ConfiguredClockSpeed'] = mem.ConfiguredClockSpeed
tmpmsg['Capacity'] = mem.Capacity
tmpmsg['ConfiguredVoltage'] = mem.ConfiguredVoltage
memorys.append(tmpmsg)
for m in memorys:
print m
return memorys
#电池信息,只有笔记本才会有电池选项
def _printBattery(self):
isBatterys = False
for b in self.c.Win32_Battery():
isBatterys = True
return isBatterys
#网卡mac地址:
def _printMacAddress(self):
macs = []
for n in self.c.Win32_NetworkAdapter():
mactmp = n.MACAddress
if mactmp and len(mactmp.strip()) > 5:
tmpmsg = {}
tmpmsg['MACAddress'] = n.MACAddress
tmpmsg['Name'] = n.Name
tmpmsg['DeviceID'] = n.DeviceID
tmpmsg['AdapterType'] = n.AdapterType
tmpmsg['Speed'] = n.Speed
macs.append(tmpmsg)
print macs
return macs
def main():
ostmp = SystemMsgObj()
osmsg = ostmp.getSysMsg()
print osmsg
if __name__ == '__main__':
main() | PypiClean |
/pycalib_simple-2022.12.14.3-py3-none-any.whl/pycalib/calib.py | import numpy as np
import cv2
import pycalib
from pycalib.util import transpose_to_col
from skimage.transform import SimilarityTransform, EuclideanTransform
def undistort_points(pt2d, cameraMatrix, distCoeffs):
return cv2.undistortPoints(pt2d, cameraMatrix, distCoeffs, P=cameraMatrix)
def distort_points(pt2d, cameraMatrix, distCoeffs):
assert False, "not tested"
# a bit tricky.
# http://answers.opencv.org/question/148670/re-distorting-a-set-of-points-after-camera-calibration/
# step1. **undistort** without dist & P to get normalized coord.
n2d = cv2.undistortPoints(pt2d, cameraMatrix, distCoeffs=None, P=None)
# step2. get homogeneous coord
n3d = cv2.convertPointsToHomogeneous(n2d)
# step3. project WITH dist, and R=I, t=0
pt2d_d = cv2.projectPoints(n3d, np.zeros(3), np.zeros(3), cameraMatrix, distCoeffs)
return pt2d_d
def excalib(p1, p2, A, d):
"""
Returns R, t satisfying x2 = R * x1 + t (= p1 will be the world camera)
"""
p1 = transpose_to_col(p1, 2).reshape((-1,1,2)).astype(np.float)
p2 = transpose_to_col(p2, 2).reshape((-1,1,2)).astype(np.float)
# Undistort
n1 = undistort_points(p1, A, d)
n2 = undistort_points(p2, A, d)
E, status = cv2.findEssentialMat(n1, n2, A, method=cv2.RANSAC, prob=0.999, threshold=3.0)
_, R, t, _ = cv2.recoverPose(E, n1, n2, A, mask=status)
return R, t, E, status
def recoverPose2(E, n1, n2, K1, K2, mask):
n1 = n1.reshape((-1, 2))
n2 = n2.reshape((-1, 2))
R2a, R2b, t2 = cv2.decomposeEssentialMat(E)
R1 = np.eye(3)
t1 = np.zeros((3,1))
def z_count(R1, t1, R2, t2, K1, K2, n1, n2):
"""
Count number of points appeared in front of the cameras
"""
P1 = K1 @ np.hstack((R1, t1))
P2 = K2 @ np.hstack((R2, t2))
Xh1 = cv2.triangulatePoints(P1, P2, n1, n2)
Xh1 /= Xh1[3,:]
z1 = np.sum(Xh1[2,:]>0) # num of positive z points in Cam1 coordinate system
Xh2 = R2 @ Xh1[:3,:] + t2
z2 = np.sum(Xh2[2,:]>0) # num of positive z points in Cam2 coordinate system
return (z1 + z2), Xh1[:3,:]
zmax = -1
for R2x, t2x in [[R2a, t2], [R2a, -t2], [R2b, t2], [R2b, -t2]]:
z, Xx = z_count(R1, t1, R2x, t2x, K1, K2, n1.T, n2.T)
if zmax < z:
zmax = z
R2_est = R2x
t2_est = t2x
X_est = Xx
return R2_est, t2_est, X_est
def excalib2(p1, p2, A1, d1, A2, d2):
"""
Returns R, t satisfying x2 = R * x1 + t (= p1 will be the world camera)
"""
p1 = transpose_to_col(p1, 2).reshape((-1,1,2)).astype(np.float)
p2 = transpose_to_col(p2, 2).reshape((-1,1,2)).astype(np.float)
# Undistort
n1 = undistort_points(p1, A1, d1)
n2 = undistort_points(p2, A2, d2)
# Find E
F, status = cv2.findFundamentalMat(n1, n2, cv2.FM_RANSAC)
E = A2.T @ F @ A1
E = E / np.linalg.norm(E)
# Decompose E
R, t, X = recoverPose2(E, n1, n2, A1, A2, mask=status)
return R, t, E, status, X
def skew(x):
"""
Returns the skew-symmetric matrix [x]_\times of vector x.
"""
x = x.flatten()
return np.array([[0, -x[2], x[1]], [x[2], 0, -x[0]], [-x[1], x[0], 0]])
def pose_registration_R(N, Rt_pairs):
"""
Subfunction for pose_registration
"""
A = []
for (i, j), Rt in Rt_pairs.items():
Rij = Rt[0:3,0:3]
x = np.zeros((3, N*3))
x[:, i*3:i*3+3] = -Rij
x[:, j*3:j*3+3] = np.eye(3)
A.append(x)
A = np.vstack(A)
# solve Ax=0
w, v = np.linalg.eigh(A.T @ A)
err = np.sum(w[0:3]) / np.sum(w)
R = v[:,0:3]
# find a set of coeffs to make R0 be I
k = np.linalg.inv(R[:3, :3])
R = R @ k
# force R to be SO(3)
for i in range(N):
u, _, vt = np.linalg.svd(R[i*3:i*3+3,:])
R[i*3:i*3+3,:] = u @ vt
# let R[0] be identity
k = np.linalg.inv(R[0:3,0:3])
for i in range(1, N):
R[i*3:i*3+3,:] = R[i*3:i*3+3,:] @ k
R[0:3,0:3] = np.eye(3,3)
return R, err
def pose_registration_T(N, Rt_pairs, R_w2c):
"""
Subfunction for pose_registration
Does not work if camera motion is collinear
"""
B = []
for (i, j), Rt in Rt_pairs.items():
Tij = Rt[0:3,3]
Ri = R_w2c[3*i:3*i+3]
Rj = R_w2c[3*j:3*j+3]
# T vector -> skew-symmetric matrix
Tij = skew(Tij)
x = np.zeros((3, N*3))
x[:, 3*i:3*i+3] = Tij @ Rj @ Ri.T
x[:, 3*j:3*j+3] = -Tij
B.append(x)
B = np.vstack(B)
# solve Bx=0
_, s, vt = np.linalg.svd(B.T @ B)
err = np.sum(s[-4:]) / np.sum(s)
# null-space has 4-dim = any-translation for x/y/z + global-scale
k = vt.T[:,-4:]
# find a set of coeffs to make t0 be (0, 0, 0)
_, s, vt = np.linalg.svd(k[0:3,:])
T = k @ vt[3,:].T
T = T / np.linalg.norm(T[3:6])
# overwrite noisy zeros in t0
T[0:3] = 0
# fix T sign using the 1st pair
for (i, j), Rt in Rt_pairs.items():
Tij = Rt[0:3,3]
Ri = R_w2c[3*i:3*i+3,:]
Rj = R_w2c[3*j:3*j+3,:]
Ti = T[3*i:3*i+3]
Tj = T[3*j:3*j+3]
# compare Tij with the estimated one
tij = - Rj @ Ri.T @ Ti + Tj
if tij @ Tij < 0:
T = -T
# return immediately in the loop
return T, err
def pose_registration(N, Rt_pairs, get_c2w=False):
"""
Global pose registration from pair-wise R_ij, t_ij. The output R_i, t_i are W2C by default, i.e., they satisfy x_i = R_i x_w + t_i .
Parameters
----------
N : int
Number of cameras
Rt_pairs : dict
2D dict of R_ij, t_ij where Rt_pairs[i,j] holds R_ij, t_ij satisfying x_j = R_ij x_i + t_ij .
get_c2w : Bool
Output C2W rotation and translation. That is, R_i and t_i satisfy x_w = R_i x_i + t_i .
Returns
-------
R : ndarray
3N x 3 array of rotation matrices
t : ndarray
3N x 1 array of translation vectors
Notes
-----
Martinec and Padjla. "Robust Rotation and Translation Estimation in Multiview Reconstruction," CVPR 2007.
Nianjuan Jiang, Zhaopeng Cui, and Ping Tan. "A global linear method for camera pose registration," ICCV 2013.
"""
#print(Rt_pairs)
R_w2c, Rerr = pose_registration_R(N, Rt_pairs)
#print(R_w2c, Rerr)
T_w2c, Terr = pose_registration_T(N, Rt_pairs, R_w2c)
#print(T_w2c, Terr)
# W2C -> C2W
if get_c2w is True:
assert False, "not tested"
for i in range(N):
R[i*3:i*3+3,:] = R[i*3:i*3+3,:].T
T[i*3:i*3+3] = - R[i*3:i*3+3,:] @ T[i*3:i*3+3]
return R_w2c, T_w2c.reshape((-1,1)), Rerr, Terr
def quat2mat(q):
"""
Quaternion to rotation matrix conversion
"""
x, y, z, w = q[0], q[1], q[2], q[3]
return np.array([
[1 - 2*y*y - 2*z*z, 2*x*y + 2*w*z, 2*x*z - 2*w*y],
[ 2*x*y - 2*w*z, 1 - 2*x*x - 2*z*z, 2*y*z + 2*w*x],
[ 2*x*z + 2*w*y, 2*y*z - 2*w*x, 1 - 2*x*x - 2*y*y]])
def rebase(R0_w2c, t0_w2c, R_w2c, t_w2c):
"""Return R and t that satisfy c0 = R @ c + t. The camera c0 is specified by R0_w2c and t0_w2c, and the camera c is specified by R_w2c and t_w2c.
In other words, this computes the pose of camera c in the camera c0 coordinate system.
"""
assert R0_w2c.shape == (3, 3)
assert R_w2c.shape == (3, 3)
assert t0_w2c.size == 3
assert t_w2c.size == 3
t0_w2c = t0_w2c.reshape((3, 1))
t_w2c = t_w2c.reshape((3, 1))
if np.allclose(R0_w2c, R_w2c):
R = np.eye(3)
if np.allclose(t0_w2c, t_w2c):
return R, np.zeros(t_w2c.shape)
else:
R = R_w2c @ R0_w2c.T
return R, t_w2c - R @ t0_w2c
def rebase_all(R_w2c_Nx3x3, t_w2c_Nx3x1, *, normalize_scaling=False):
"""Transform all the poses to be in the first camera coordinate system"""
R_est = []
t_est = []
Nc = R_w2c_Nx3x3.shape[0]
assert R_w2c_Nx3x3.shape == (Nc, 3, 3), R_w2c_Nx3x3.shape
assert t_w2c_Nx3x1.shape == (Nc, 3, 1), t_w2c_Nx3x1.shape
for c in reversed(range(Nc)):
Rx, tx = rebase(R_w2c_Nx3x3[0], t_w2c_Nx3x1[0], R_w2c_Nx3x3[c], t_w2c_Nx3x1[c])
R_est.append(Rx)
t_est.append(tx)
R_est = np.array(R_est[::-1])
t_est = np.array(t_est[::-1])
if normalize_scaling:
for c in reversed(range(Nc)):
t_est[c] /= np.linalg.norm(t_est[1])
return R_est, t_est
def triangulate(pt2d, P):
"""
Triangulate a 3D point from two or more views by DLT.
"""
N = len(pt2d)
assert N == len(P)
assert N >= 2
AtA = np.zeros((4, 4))
x = np.zeros((2, 4))
for i in range(N):
x[0,:] = P[i][0,:] - pt2d[i][0] * P[i][2,:]
x[1,:] = P[i][1,:] - pt2d[i][1] * P[i][2,:]
AtA += x.T @ x
_, v = np.linalg.eigh(AtA)
if np.isclose(v[3, 0], 0):
return v[:,0]
else:
return v[:,0] / v[3,0]
def triangulate_Npts(pt2d_CxPx2, P_Cx3x4):
"""
Triangulate multiple 3D points from two or more views by DLT.
"""
assert pt2d_CxPx2.ndim == 3
assert P_Cx3x4.ndim == 3
Nc, Np, _ = pt2d_CxPx2.shape
assert P_Cx3x4.shape == (Nc, 3, 4)
# P0 - xP2
x = P_Cx3x4[:,0,:][:,None,:] - np.einsum('ij,ik->ijk', pt2d_CxPx2[:,:,0], P_Cx3x4[:,2,:])
# P1 - yP2
y = P_Cx3x4[:,1,:][:,None,:] - np.einsum('ij,ik->ijk', pt2d_CxPx2[:,:,1], P_Cx3x4[:,2,:])
Ab = np.concatenate([x, y])
Ab = np.swapaxes(Ab, 0, 1)
assert Ab.shape == (Np, Nc*2, 4)
A = Ab[:,:,:3]
b = - Ab[:,:,3]
AtA = np.linalg.pinv(A)
X = np.einsum('ijk,ik->ij', AtA, b)
return X
def undistortN(A, D, camera_indices, points_2d, *, allow_unused_camera=False):
Nc = A.shape[0]
assert A.ndim == 3
assert A.shape == (Nc, 3, 3)
assert D.ndim == 2
assert D.shape[0] == Nc
assert len(camera_indices) == len(points_2d)
if allow_unused_camera is False:
assert camera_indices.max() == Nc - 1
p_new = points_2d.copy()
CIDs = np.unique(camera_indices)
for c in CIDs:
p2d = points_2d[camera_indices == c]
p_new[camera_indices == c] = undistort_points(p2d, A[c], D[c]).reshape((-1, 2))
return p_new
def triangulateN(A, D, P, camera_indices, point_indices, points_2d):
Nc = A.shape[0]
assert A.ndim == 3
assert A.shape == (Nc, 3, 3)
assert D.ndim == 2
assert D.shape[0] == Nc
assert P.ndim == 3
assert P.shape == (Nc, 3, 4)
pycalib.util.check_observations(camera_indices, point_indices, points_2d, allow_unused_camera=True)
points_2d = undistortN(A, D, camera_indices, points_2d, allow_unused_camera=True)
PIDs = np.unique(point_indices.astype(np.int32))
Y_est = []
PIDs_ok = []
for pid in sorted(PIDs):
c = camera_indices[point_indices == pid]
x = points_2d[point_indices == pid].copy()
if len(c) < 2:
continue
PIDs_ok.append(pid)
p = []
for i in c:
p.append(P[i])
p = np.array(p)
y = triangulate(x, p)
Y_est.append(y)
Y_est = np.array(Y_est).T
Y_est = Y_est[:3,:] / Y_est[3,:]
return Y_est, PIDs_ok
def reprojection_error(pt3d, pt2d, P):
N = len(pt2d)
err = []
for i in range(N):
x = P[i] @ pt3d
x /= x[2]
x[0] -= pt2d[i][0]
x[1] -= pt2d[i][1]
err.append(x[0:2])
return err
def excalibN(A, D, camera_indices, point_indices, points_2d):
"""Calibrate N cameras from 2D correspondences
Args:
A: N x 3 x 3 matrix describing the N intrinsic parameters
D: N x (3 or 5) matrix describing the N dist coeffs
observations: M x 4 matrix describing M 2D observations in M x [camera id, point id, u, v] format
"""
Nc = A.shape[0]
assert A.ndim == 3
assert A.shape == (Nc, 3, 3)
assert D.ndim == 2
assert D.shape[0] == Nc
camera_indices = camera_indices.astype(np.int32)
point_indices = point_indices.astype(np.int32)
pycalib.util.check_observations(camera_indices, point_indices, points_2d)
def reproj_error(A, R, t, X, x):
y = R @ X + t
y[:2,:] /= y[2,:]
return np.array([y[0,:] - x[0,:], y[1,:] - x[1,:]])
# pairwise calibration
Rt_pairs = dict()
for i in range(Nc - 1):
# pid, u, v
pid_i = point_indices[camera_indices==i]
p2d_i = points_2d[camera_indices==i,:]
for j in range(i + 1, Nc):
# pid, u, v
pid_j = point_indices[camera_indices==j]
p2d_j = points_2d[camera_indices==j,:]
_, idx_i, idx_j = np.intersect1d(pid_i, pid_j, assume_unique=True, return_indices=True)
if len(idx_i) < 8:
continue
xi = p2d_i[idx_i,:]
xj = p2d_j[idx_j,:]
R, t, _, _, x3d = excalib2(xi, xj, A[i], D[i], A[j], D[j])
# debug
#ei = reproj_error(A[i], np.eye(3), np.zeros((3, 1)), x3d, xi.T)
#ej = reproj_error(A[j], R, t, x3d, xj.T)
#e = np.sqrt(np.linalg.norm(ei)+np.linalg.norm(ej)) / len(idx_i)
#print(f'{i}-{j} -> {e}')
#print(- R @ t)
Rt_pairs[i, j] = np.hstack((R, t))
# Registration
R, t, err_r, err_t = pose_registration(Nc, Rt_pairs)
# Transform to make Camera0 be WCS
R_est = []
t_est = []
for c in reversed(range(Nc)):
Rx, tx = rebase(R[:3, :3], t[:3], R[3*c:3*c+3, :3], t[3*c:3*c+3])
R_est.append(Rx)
t_est.append(tx)
R_est = np.array(R_est[::-1])
t_est = np.array(t_est[::-1])
# This estimation is up-to-scale. So normalize by the cam1-cam2 distance.
for c in reversed(range(Nc)):
t_est[c] /= np.linalg.norm(t_est[1])
# Projection matrix
P_est = []
for i in range(Nc):
P_est.append(A[i] @ np.hstack((R_est[i], t_est[i])))
P_est = np.array(P_est)
# Triangulate 3D points
Y_est, PIDs_ok = triangulateN(A, D, P_est, camera_indices, point_indices, points_2d)
return R_est, t_est, Y_est.T, PIDs_ok
class Camera:
__W2C = np.zeros((3, 4))
__A = np.eye(3)
__d = np.zeros(5)
def __init__(self):
pass
def set_A(self, *, f=None, u0=None, v0=None, A=None):
if f is not None:
assert u0 is not None
assert v0 is not None
__A = np.array([[f, 0, u0], [0, f, v0], [0, 0, 1]])
if A is not None:
__A = A
def set_d(self, *, dist_coeffs=None):
if dist_coeffs is not None:
__d = dist_coeffs
def undistort_points(self, pt2d):
return cv2.undistortPoints(pt2d, cameraMatrix=self.__A, distCoeffs=self.__d, P=self.__A)
def distort_points(self, pt2d):
# a bit tricky.
# http://answers.opencv.org/question/148670/re-distorting-a-set-of-points-after-camera-calibration/
# step1. **undistort** without dist & P to get normalized coord.
n2d = cv2.undistortPoints(pt2d, cameraMatrix=self.__A, distCoeffs=None, P=None)
# step2. get homogeneous coord
n3d = cv2.convertPointsToHomogeneous(n2d)
# step3. project WITH dist, and R=I, t=0
pt2d_d = cv2.projectPoints(n3d, np.zeros(3), np.zeros(3), cameraMatrix=self.__A, distCoeffs=self.__d)
return pt2d_d
def lookat(eye, center, up):
eye = transpose_to_col(eye, 3)
center = transpose_to_col(center, 3)
up = transpose_to_col(up, 3)
ez = center - eye
ez = ez / np.linalg.norm(ez)
ey = up
ey = ey / np.linalg.norm(ey)
ex = np.cross(ey.T, ez.T).reshape((3,1))
ex = ex / np.linalg.norm(ex)
ey = np.cross(ez.T, ex.T).reshape((3,1))
ey = ey / np.linalg.norm(ey)
t_c2w = eye
R_c2w = np.hstack((ex, ey, ez))
return R_c2w.T, -R_c2w.T @ t_c2w
def absolute_orientation(p, q, no_scaling=False):
"""
Returns R, t, s satisfying q = s * R * p + t
p and q must be 3xN matrices.
"""
if no_scaling:
st = EuclideanTransform()
else:
st = SimilarityTransform()
st.estimate(p.T, q.T)
R = st.params[:3, :3]
t = st.params[:3, 3]
s = np.linalg.norm(R) / np.sqrt(3)
R = R / s
return R, t, s
# def absolute_orientation(p, q, *, no_scaling=False):
# """
# Returns R, t, s satisfying q = s * R * p + t
#
# p and q must be 3xN matrices.
#
# Horn. Closed-form solution of absolute orientation using unit quaternions, JOSA 1987
# """
#
# assert len(p.shape) == len(q.shape) == 2
# assert p.shape[0] == q.shape[0] == 3
# assert p.shape[1] == q.shape[1]
#
# # Centerize
# mp = np.mean(p, axis=1)
# mq = np.mean(q, axis=1)
# p = p - mp[:, None]
# q = q - mq[:, None]
#
# # Scale
# if no_scaling is False:
# s = np.sum(np.linalg.norm(q, axis=0)) / np.sum(np.linalg.norm(p, axis=0))
# else:
# s = 1
#
# # orthogonal Procrustes problem
# u, _, vt = np.linalg.svd(q @ (s * p).T)
# R = u @ vt
#
# # translation
# t = mq - s * (R @ mp)
#
# return R, t, s | PypiClean |
/jax-moseq-0.1.0.tar.gz/jax-moseq-0.1.0/jax_moseq/models/keypoint_slds/gibbs.py | import jax
import jax.numpy as jnp
import jax.random as jr
from functools import partial
from jax_moseq.utils.kalman import kalman_sample
from jax_moseq.utils.distributions import sample_vonmises_fisher
from jax_moseq.models import arhmm, slds
from jax_moseq.models.keypoint_slds.alignment import (
to_vanilla_slds,
estimate_coordinates,
estimate_aligned,
apply_rotation,
vector_to_angle,
)
na = jnp.newaxis
@partial(jax.jit, static_argnames=("parallel_message_passing",))
def resample_continuous_stateseqs(
seed,
Y,
mask,
v,
h,
s,
z,
Cd,
sigmasq,
Ab,
Q,
jitter=1e-3,
parallel_message_passing=True,
**kwargs
):
"""
Resamples the latent trajectories ``x``.
Parameters
----------
seed : jr.PRNGKey
JAX random seed.
Y : jax array of shape (N, T, k, d)
Keypoint observations.
mask : jax array of shape (N, T)
Binary indicator for valid frames.
v : jax array of shape (N, T, d)
Centroid positions.
h : jax array of shape (N, T)
Heading angles.
s : jax array of shape (N, T, k)
Noise scales.
z : jax_array of shape (N, T - n_lags)
Discrete state sequences.
Cd : jax array of shape ((k - 1) * d, latent_dim + 1)
Observation transform.
sigmasq : jax_array of shape k
Unscaled noise.
Ab : jax array of shape (num_states, latent_dim, ar_dim)
Autoregressive transforms.
Q : jax array of shape (num_states, latent_dim, latent_dim)
Autoregressive noise covariances.
jitter : float, default=1e-3
Amount to boost the diagonal of the covariance matrix
during backward-sampling of the continuous states.
parallel_message_passing : bool, default=True,
Use associative scan for Kalman sampling, which is faster on
a GPU but has a significantly longer jit time.
**kwargs : dict
Overflow, for convenience.
Returns
------
x : jax array of shape (N, T, latent_dim)
Latent trajectories.
"""
Y, s, Cd, sigmasq = to_vanilla_slds(Y, v, h, s, Cd, sigmasq)
x = slds.resample_continuous_stateseqs(
seed,
Y,
mask,
z,
s,
Ab,
Q,
Cd,
sigmasq,
jitter=jitter,
parallel_message_passing=parallel_message_passing,
)
return x
@jax.jit
def resample_obs_variance(
seed, Y, mask, Cd, x, v, h, s, nu_sigma, sigmasq_0, **kwargs
):
"""
Resample the observation variance ``sigmasq``.
Parameters
----------
seed : jr.PRNGKey
JAX random seed.
Y : jax array of shape (N, T, k, d)
Keypoint observations.
mask : jax array of shape (N, T)
Binary indicator for valid frames.
Cd : jax array of shape ((k - 1) * d, latent_dim + 1)
Observation transform.
x : jax array of shape (N, T, latent_dim)
Latent trajectories.
v : jax array of shape (N, T, d)
Centroid positions.
h : jax array of shape (N, T)
Heading angles.
s : jax array of shape (N, T, k)
Noise scales.
nu_sigma : float
Chi-squared degrees of freedom in sigmasq.
sigmasq_0 : float
Scaled inverse chi-squared scaling parameter for sigmasq.
**kwargs : dict
Overflow, for convenience.
Returns
------
sigmasq : jax_array of shape k
Unscaled noise.
"""
sqerr = compute_squared_error(Y, x, v, h, Cd, mask)
return slds.resample_obs_variance_from_sqerr(
seed, sqerr, mask, s, nu_sigma, sigmasq_0
)
@jax.jit
def resample_scales(seed, Y, x, v, h, Cd, sigmasq, nu_s, s_0, **kwargs):
"""
Resample the scale values ``s``.
Parameters
----------
seed : jr.PRNGKey
JAX random seed.
Y : jax array of shape (N, T, k, d)
Keypoint observations.
x : jax array of shape (N, T, latent_dim)
Latent trajectories.
v : jax array of shape (N, T, d)
Centroid positions.
h : jax array of shape (N, T)
Heading angles.
Cd : jax array of shape ((k - 1) * d, latent_dim + 1)
Observation transform.
sigmasq : jax_array of shape k
Unscaled noise.
nu_s : int
Chi-squared degrees of freedom in noise prior.
s_0 : scalar or jax array broadcastable to ``Y``
Prior on noise scale.
**kwargs : dict
Overflow, for convenience.
Returns
------
s : jax array of shape (N, T, k)
Noise scales.
"""
sqerr = compute_squared_error(Y, x, v, h, Cd)
return slds.resample_scales_from_sqerr(seed, sqerr, sigmasq, nu_s, s_0)
@jax.jit
def compute_squared_error(Y, x, v, h, Cd, mask=None):
"""
Computes the squared error between model predicted
and true observations.
Parameters
----------
Y : jax array of shape (..., k, d)
Keypoint observations.
x : jax array of shape (..., latent_dim)
Latent trajectories.
v : jax array of shape (..., d)
Centroid positions.
h : jax array
Heading angles.
Cd : jax array of shape ((k - 1) * d, latent_dim + 1)
Observation transform.
mask : jax array, optional
Binary indicator for valid frames.
Returns
------
sqerr : jax array of shape (..., k)
Squared error between model predicted and
true observations.
"""
Y_est = estimate_coordinates(x, v, h, Cd)
sqerr = ((Y - Y_est) ** 2).sum(-1)
if mask is not None:
sqerr = mask[..., na] * sqerr
return sqerr
@jax.jit
def resample_heading(seed, Y, x, v, s, Cd, sigmasq, **kwargs):
"""
Resample the heading angles ``h``.
Parameters
----------
seed : jr.PRNGKey
JAX random seed.
Y : jax array of shape (N, T, k, d)
Keypoint observations.
x : jax array of shape (N, T, latent_dim)
Latent trajectories.
v : jax array of shape (N, T, d)
Centroid positions.
s : jax array of shape (N, T, k)
Noise scales.
Cd : jax array of shape ((k - 1) * d, latent_dim + 1)
Observation transform.
sigmasq : jax_array of shape k
Unscaled noise.
**kwargs : dict
Overflow, for convenience.
Returns
------
h : jax array of shape (N, T)
Heading angles.
"""
k = Y.shape[-2]
Y_bar = estimate_aligned(x, Cd, k)
Y_cent = Y - v[..., na, :]
variance = s * sigmasq
# [(..., t, k, d, na) * (..., t, k, na, d) / (..., t, k, na, na)] -> (..., t, d, d)
S = (Y_bar[..., :2, na] * Y_cent[..., na, :2] / variance[..., na, na]).sum(
-3
)
del Y_bar, Y_cent, variance # free up memory
kappa_cos = S[..., 0, 0] + S[..., 1, 1]
kappa_sin = S[..., 0, 1] - S[..., 1, 0]
del S
mean_direction = jnp.stack([kappa_cos, kappa_sin], axis=-1)
sampled_direction = sample_vonmises_fisher(seed, mean_direction)
h = vector_to_angle(sampled_direction)
return h
@jax.jit
def resample_location(
seed,
Y,
mask,
x,
h,
s,
Cd,
sigmasq,
sigmasq_loc,
parallel_message_passing=True,
**kwargs
):
"""
Resample the centroid positions ``v``.
Parameters
----------
seed : jr.PRNGKey
JAX random seed.
Y : jax array of shape (N, T, k, d)
Keypoint observations.
mask : jax array of shape (N, T)
Binary indicator for valid frames.
x : jax array of shape (N, T, latent_dim)
Latent trajectories.
h : jax array of shape (N, T)
Heading angles.
s : jax array of shape (N, T, k)
Noise scales.
Cd : jax array of shape ((k - 1) * d, latent_dim + 1)
Observation transform.
sigmasq : jax_array of shape k
Unscaled noise.
sigmasq_loc : float
Assumed variance in centroid displacements.
parallel_message_passing : bool, default=True,
Use associative scan for Kalman sampling, which is faster on
a GPU but has a significantly longer jit time.
**kwargs : dict
Overflow, for convenience.
Returns
------
v : jax array of shape (N, T, d)
Centroid positions.
"""
k, d = Y.shape[-2:]
Y_rot = apply_rotation(estimate_aligned(x, Cd, k), h)
variance = s * sigmasq
gammasq = 1 / (1 / variance).sum(-1, keepdims=True)
mu = jnp.einsum("...tkd, ...tk->...td", Y - Y_rot, gammasq / variance)
# Apply Kalman filter to get smooth headings
# TODO Parameterize these distributional hyperparameter
seed = jr.split(seed, mask.shape[0])
m0 = jnp.zeros(d)
S0 = jnp.eye(d) * 1e4
A = jnp.eye(d)[na]
B = jnp.zeros(d)[na]
Q = jnp.eye(d)[na] * sigmasq_loc
C = jnp.eye(d)
D = jnp.zeros(d)
R = jnp.repeat(gammasq, d, axis=-1)
zz = jnp.zeros_like(mask[:, 1:], dtype=int)
masked_dynamics_noise = sigmasq_loc * 10
masked_obs_noise = sigmasq.max() * 10
masked_dynamics_params = {
"weights": jnp.eye(d),
"bias": jnp.zeros(d),
"cov": jnp.eye(d) * masked_dynamics_noise,
}
masked_obs_noise_diag = jnp.ones(d) * masked_obs_noise
in_axes = (0, 0, 0, 0, na, na, na, na, na, na, na, 0, na, na)
v = jax.vmap(
partial(kalman_sample, parallel=parallel_message_passing), in_axes
)(
seed,
mu,
mask,
zz,
m0,
S0,
A,
B,
Q,
C,
D,
R,
masked_dynamics_params,
masked_obs_noise_diag,
)
return v
def resample_model(
data,
seed,
states,
params,
hypparams,
noise_prior,
ar_only=False,
states_only=False,
resample_global_noise_scale=False,
resample_local_noise_scale=True,
fix_heading=False,
verbose=False,
jitter=1e-3,
parallel_message_passing=False,
**kwargs
):
"""
Resamples the Keypoint SLDS model given the hyperparameters,
data, noise prior, current states, and current parameters.
Parameters
----------
data : dict
Data dictionary containing the observations and mask.
seed : jr.PRNGKey
JAX random seed.
states : dict
State values for each latent variable.
params : dict
Values for each model parameter.
hypparams : dict
Values for each group of hyperparameters.
noise_prior : scalar or jax array broadcastable to ``s``
Prior on noise scale.
ar_only : bool, default=False
Whether to restrict sampling to ARHMM components.
states_only : bool, default=False
Whether to restrict sampling to states.
resample_global_noise_scale : bool, default=False
Whether to resample the global noise scales (``sigmasq``)
resample_local_noise_scale : bool, default=True
Whether to resample the local noise scales (``s``)
fix_heading : bool, default=False
Whether to exclude ``h`` from resampling.
jitter : float, default=1e-3
Amount to boost the diagonal of the covariance matrix
during backward-sampling of the continuous states.
verbose : bool, default=False
Whether to print progress info during resampling.
parallel_message_passing : bool, default=True,
Use associative scan for Kalman sampling, which is faster on
a GPU but has a significantly longer jit time.
Returns
------
model : dict
Dictionary containing the hyperparameters and
updated seed, states, and parameters of the model.
"""
model = arhmm.resample_model(
data, seed, states, params, hypparams, states_only, verbose=verbose
)
if ar_only:
model["noise_prior"] = noise_prior
return model
seed = model["seed"]
params = model["params"].copy()
states = model["states"].copy()
if (not states_only) and resample_global_noise_scale:
if verbose:
print("Resampling sigmasq (global noise scales)")
params["sigmasq"] = resample_obs_variance(
seed,
**data,
**states,
**params,
s_0=noise_prior,
**hypparams["obs_hypparams"]
)
if verbose:
print("Resampling x (continuous latent states)")
states["x"] = resample_continuous_stateseqs(
seed,
**data,
**states,
**params,
jitter=jitter,
parallel_message_passing=parallel_message_passing
)
if not fix_heading:
if verbose:
print("Resampling h (heading)")
states["h"] = resample_heading(seed, **data, **states, **params)
if verbose:
print("Resampling v (location)")
states["v"] = resample_location(
seed, **data, **states, **params, **hypparams["cen_hypparams"]
)
if resample_local_noise_scale:
if verbose:
print("Resampling s (local noise scales)")
states["s"] = resample_scales(
seed,
**data,
**states,
**params,
s_0=noise_prior,
**hypparams["obs_hypparams"]
)
return {
"seed": seed,
"states": states,
"params": params,
"hypparams": hypparams,
"noise_prior": noise_prior,
} | PypiClean |
/boot-synth-1.2.0.tar.gz/boot-synth-1.2.0/synth/projects_master/nginx_router/frontend/react/node_modules/react-dev-utils/node_modules/inquirer/lib/prompts/password.js | 'use strict';
/**
* `password` type prompt
*/
var chalk = require('chalk');
var { map, takeUntil } = require('rxjs/operators');
var Base = require('./base');
var observe = require('../utils/events');
function mask(input, maskChar) {
input = String(input);
maskChar = typeof maskChar === 'string' ? maskChar : '*';
if (input.length === 0) {
return '';
}
return new Array(input.length + 1).join(maskChar);
}
class PasswordPrompt extends Base {
/**
* Start the Inquiry session
* @param {Function} cb Callback when prompt is done
* @return {this}
*/
_run(cb) {
this.done = cb;
var events = observe(this.rl);
// Once user confirm (enter key)
var submit = events.line.pipe(map(this.filterInput.bind(this)));
var validation = this.handleSubmitEvents(submit);
validation.success.forEach(this.onEnd.bind(this));
validation.error.forEach(this.onError.bind(this));
events.keypress
.pipe(takeUntil(validation.success))
.forEach(this.onKeypress.bind(this));
// Init
this.render();
return this;
}
/**
* Render the prompt to screen
* @return {PasswordPrompt} self
*/
render(error) {
var message = this.getQuestion();
var bottomContent = '';
if (this.status === 'answered') {
message += this.opt.mask
? chalk.cyan(mask(this.answer, this.opt.mask))
: chalk.italic.dim('[hidden]');
} else if (this.opt.mask) {
message += mask(this.rl.line || '', this.opt.mask);
} else {
message += chalk.italic.dim('[input is hidden] ');
}
if (error) {
bottomContent = '\n' + chalk.red('>> ') + error;
}
this.screen.render(message, bottomContent);
}
/**
* When user press `enter` key
*/
filterInput(input) {
if (!input) {
return this.opt.default == null ? '' : this.opt.default;
}
return input;
}
onEnd(state) {
this.status = 'answered';
this.answer = state.value;
// Re-render prompt
this.render();
this.screen.done();
this.done(state.value);
}
onError(state) {
this.render(state.isValid);
}
onKeypress() {
// If user press a key, just clear the default value
if (this.opt.default) {
this.opt.default = undefined;
}
this.render();
}
}
module.exports = PasswordPrompt; | PypiClean |
/wayback-0.4.2.tar.gz/wayback-0.4.2/CONTRIBUTING.rst | ============
Contributing
============
Contributions are welcome, and they are greatly appreciated! Every
little bit helps, and credit will always be given.
Before contributing, please be sure to take a look at our
`code of conduct <https://github.com/edgi-govdata-archiving/overview/blob/main/CONDUCT.md>`_.
You can contribute in many ways:
Types of Contributions
----------------------
Report Bugs
~~~~~~~~~~~
Report bugs at https://github.com/edgi-govdata-archiving/wayback/issues.
If you are reporting a bug, please include:
* Any details about your local setup that might be helpful in troubleshooting.
* Detailed steps to reproduce the bug.
Fix Bugs
~~~~~~~~
Look through the GitHub issues for bugs. Anything tagged with "bug"
is open to whoever wants to implement it.
Implement Features
~~~~~~~~~~~~~~~~~~
Look through the GitHub issues for features. Anything tagged with "feature"
is open to whoever wants to implement it.
Write Documentation
~~~~~~~~~~~~~~~~~~~
wayback could always use more documentation, whether
as part of the official wayback docs, in docstrings,
or even on the web in blog posts, articles, and such.
Submit Feedback
~~~~~~~~~~~~~~~
The best way to send feedback is to file an issue at https://github.com/edgi-govdata-archiving/wayback/issues.
If you are proposing a feature:
* Explain in detail how it would work.
* Keep the scope as narrow as possible, to make it easier to implement.
* Remember that this is a volunteer-driven project, and that contributions
are welcome :)
Get Started!
------------
Ready to contribute? Here's how to set up `wayback` for local development.
1. Fork the `wayback` repo on GitHub.
2. Clone your fork locally::
$ git clone [email protected]:your_name_here/wayback.git
3. Install your local copy into a virtualenv. Assuming you have virtualenvwrapper installed, this is how you set up your fork for local development::
$ mkvirtualenv wayback
$ cd wayback/
$ python setup.py develop
4. Create a branch for local development::
$ git checkout -b name-of-your-bugfix-or-feature
Now you can make your changes locally.
5. When you're done making changes, check that your changes pass flake8 and the tests, including testing other Python versions with tox::
$ flake8 wayback tests
$ pytest
$ tox
To get flake8, pytest and tox, just pip install them into your virtualenv using `pip install -r requirements-dev.txt`.
6. Commit your changes and push your branch to GitHub::
$ git add .
$ git commit -m "Your detailed description of your changes."
$ git push origin name-of-your-bugfix-or-feature
7. Submit a pull request through the GitHub website.
Pull Request Guidelines
-----------------------
Before you submit a pull request, check that it meets these guidelines:
1. The pull request should include tests.
2. If the pull request adds functionality, the docs should be updated. Put
your new functionality into a function with a docstring, and add the
feature to the list in README.rst.
3. The pull request should work for Python 2.7, 3.3, 3.4, 3.5 and for PyPy. Check
https://travis-ci.org/edgi-govdata-archiving/wayback/pull_requests
and make sure that the tests pass for all supported Python versions.
| PypiClean |
/pyqode3_core-4.0.1-py3-none-any.whl/pyqode/core/modes/backspace.py | from qtpy.QtCore import Qt
from pyqode.core.api import Mode
class SmartBackSpaceMode(Mode):
"""Improves backspace and delete behaviour. The exact behavior is intended
to be as intuitive as possible, but is quite complex and described in more
detail in the functions below.
"""
def on_state_changed(self, state):
if state:
self.editor.key_pressed.connect(self._on_key_pressed)
else:
self.editor.key_pressed.disconnect(self._on_key_pressed)
def _on_key_pressed(self, event):
if (event.modifiers() != Qt.NoModifier or event.isAccepted()):
return
key = event.key()
if key == Qt.Key_Backspace:
do_backspace = True
do_delete = False
elif key == Qt.Key_Delete:
do_delete = True
do_backspace = False
else:
return
cursor = self.editor.textCursor()
cursor.beginEditBlock()
if cursor.hasSelection():
cursor.removeSelectedText()
elif do_backspace:
if cursor.atBlockStart():
self._do_backspace_at_block_start(cursor)
else:
self._do_regular_backspace(cursor)
else:
if cursor.atBlockEnd():
self._do_delete_at_block_end(cursor)
else:
self._do_regular_delete(cursor)
cursor.endEditBlock()
self.editor.setTextCursor(cursor)
event.accept()
def _do_delete_at_block_end(self, cursor):
"""When deleting while the cursor is at the end of a block, the next
newline and all subsequent whitespace is deleted.
"""
cursor.deleteChar()
while not cursor.atBlockEnd():
cursor.movePosition(cursor.Right, cursor.KeepAnchor)
if not cursor.selectedText().isspace():
cursor.movePosition(cursor.Left)
break
cursor.removeSelectedText()
def _do_regular_delete(self, cursor):
"""A delete does different things depending on the context.
1. If the cursor is in the trailing whitespace of a block, then all
trailing whitespace is removed.
`x = 1| ` -> `x = 1|`
2. If the cursor is followed by whitespace, then what follows is
de-indented by one tab stop, or until the cursor position is reached.
`| x = 1` -> `|x = 1`
` | x = 1` -> ` |x = 1`
` | x = 1` -> ` | x = 1`
` | x = 1` -> ` | x = 1`
` | x = 1` -> ` |x = 1`
3. Else, the next character is deleted:
`|x = 1` -> ` = 1`
"""
orig_pos = cursor.position()
selected_text, selected_whitespace, selected_entire_block = \
self._select_until_block_end(cursor)
if selected_whitespace:
cursor.removeSelectedText()
return
cursor.setPosition(orig_pos)
# For tab-based indentation, no specific de-indenting logic is
# necessary.
if not self.editor.use_spaces_instead_of_tabs:
cursor.deleteChar()
return
new_pos = self._move_right_until_non_whitespace(cursor)
# If there was no whitespace after, simply delete the next character
if orig_pos == new_pos:
cursor.setPosition(orig_pos)
cursor.deleteChar()
# Determine the maximum number of characters to delete
n_del = cursor.positionInBlock() % self.editor.tab_length
if not n_del:
n_del = self.editor.tab_length
n_del = min(new_pos - orig_pos, n_del) # don't delete beyond cursor
cursor.movePosition(cursor.Left, cursor.KeepAnchor, n=n_del)
cursor.removeSelectedText()
cursor.setPosition(orig_pos)
def _do_backspace_at_block_start(self, cursor):
"""When backspacing at the start of a block, first delete the previous
character, which is the newline that takes the cursor to the previous
block. If the cursor was initially at a line of only whitespace, we
delete the whitespace so that it's not carried over to the previous
block.
"""
if cursor.block().text().isspace():
cursor.movePosition(cursor.EndOfBlock, cursor.KeepAnchor)
cursor.removeSelectedText()
cursor.deletePreviousChar()
def _do_regular_backspace(self, cursor):
"""A backspace does different things depending on the context.
If the cursor is in the trailing white space of a block that is not
only whitespace, then all trailing whitespace is deleted.
`x = 1 | ` -> `x = 1|`
Otherwise, we deindent to the previous tab stop, or until the first non
whitespace character, while deleting at least one character even if it
is non whitespace.
`x = 1|` -> `x = |`
`y = 1; |x = 1` -> `y = 1; x = 1`
If the block is only whitespace, then the trailing whitespace is also
deleted:
` | ` -> `|`
"""
orig_pos = cursor.position()
self._move_left_until_non_whitespace(cursor)
selected_text, selected_whitespace, selected_entire_block = \
self._select_until_block_end(cursor)
# If we've selected some whitespace, delete this selection. But not
# if the entire line is whitespace, because then we want
# to de-indent.
if selected_whitespace and not selected_entire_block:
cursor.removeSelectedText()
# Otherwise, return the cursor to its original position and
# fall back to a de-indent-like behavior, such that as many
# whitespaces are removed as are necessary to de-indent by one
# level.
else:
cursor.setPosition(orig_pos)
# If there's only whitespace on the line, we also remove the
# trailing whitespace.
if selected_whitespace:
cursor.movePosition(cursor.EndOfBlock, cursor.KeepAnchor)
cursor.removeSelectedText()
if self.editor.use_spaces_instead_of_tabs:
cursor_pos = cursor.positionInBlock()
n_del = cursor_pos % self.editor.tab_length
ch_del = ' '
if not n_del:
n_del = self.editor.tab_length
if n_del > cursor_pos: # Don't delete beyond the line
n_del = cursor_pos
else:
n_del = 1
ch_del = '\t'
for i in range(n_del):
cursor.movePosition(
cursor.PreviousCharacter,
cursor.KeepAnchor
)
if cursor.selectedText() == ch_del:
cursor.removeSelectedText()
# The first time, we also delete non-whitespace characters.
# However, this means that we are not de-indenting, and
# therefore we break out of the loop. In other words, this
# is a regular backspace.
else:
if not i:
cursor.removeSelectedText()
else:
cursor.clearSelection()
cursor.movePosition(cursor.Right)
break
def _move_left_until_non_whitespace(self, cursor):
"""Moves the cursor left until the first non-whitespace character
or until the start of the block.
"""
while not cursor.atBlockStart():
cursor.movePosition(
cursor.Left,
cursor.KeepAnchor
)
if not cursor.selectedText().isspace():
cursor.setPosition(cursor.position() + 1)
break
cursor.setPosition(cursor.position())
return cursor.position()
def _move_right_until_non_whitespace(self, cursor):
"""Moves the cursor right until the first non-whitespace character
or until the end of the block.
"""
while not cursor.atBlockEnd():
cursor.movePosition(
cursor.Right,
cursor.KeepAnchor
)
if not cursor.selectedText().isspace():
cursor.setPosition(cursor.position() - 1)
break
cursor.setPosition(cursor.position())
return cursor.position()
def _select_until_block_end(self, cursor):
"""Select all the characters until the end of the block. Returns the
selected text, whether this text contains only whitespace (and is not)
empty, and whether this text corresponds to the entire block
"""
cursor.movePosition(cursor.EndOfBlock, cursor.KeepAnchor)
selected_text = cursor.selectedText()
return (
selected_text,
selected_text.isspace() and selected_text,
cursor.block().text() == selected_text
) | PypiClean |
/pulumi_azure_nextgen-0.6.2a1613157620.tar.gz/pulumi_azure_nextgen-0.6.2a1613157620/pulumi_azure_nextgen/network/latest/get_private_link_service_private_endpoint_connection.py |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetPrivateLinkServicePrivateEndpointConnectionResult',
'AwaitableGetPrivateLinkServicePrivateEndpointConnectionResult',
'get_private_link_service_private_endpoint_connection',
]
@pulumi.output_type
class GetPrivateLinkServicePrivateEndpointConnectionResult:
"""
PrivateEndpointConnection resource.
"""
def __init__(__self__, etag=None, id=None, link_identifier=None, name=None, private_endpoint=None, private_link_service_connection_state=None, provisioning_state=None, type=None):
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if link_identifier and not isinstance(link_identifier, str):
raise TypeError("Expected argument 'link_identifier' to be a str")
pulumi.set(__self__, "link_identifier", link_identifier)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if private_endpoint and not isinstance(private_endpoint, dict):
raise TypeError("Expected argument 'private_endpoint' to be a dict")
pulumi.set(__self__, "private_endpoint", private_endpoint)
if private_link_service_connection_state and not isinstance(private_link_service_connection_state, dict):
raise TypeError("Expected argument 'private_link_service_connection_state' to be a dict")
pulumi.set(__self__, "private_link_service_connection_state", private_link_service_connection_state)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="linkIdentifier")
def link_identifier(self) -> str:
"""
The consumer link id.
"""
return pulumi.get(self, "link_identifier")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> 'outputs.PrivateEndpointResponse':
"""
The resource of private end point.
"""
return pulumi.get(self, "private_endpoint")
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> Optional['outputs.PrivateLinkServiceConnectionStateResponse']:
"""
A collection of information about the state of the connection between service consumer and provider.
"""
return pulumi.get(self, "private_link_service_connection_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the private endpoint connection resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> str:
"""
The resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetPrivateLinkServicePrivateEndpointConnectionResult(GetPrivateLinkServicePrivateEndpointConnectionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPrivateLinkServicePrivateEndpointConnectionResult(
etag=self.etag,
id=self.id,
link_identifier=self.link_identifier,
name=self.name,
private_endpoint=self.private_endpoint,
private_link_service_connection_state=self.private_link_service_connection_state,
provisioning_state=self.provisioning_state,
type=self.type)
def get_private_link_service_private_endpoint_connection(expand: Optional[str] = None,
pe_connection_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
service_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPrivateLinkServicePrivateEndpointConnectionResult:
"""
Use this data source to access information about an existing resource.
:param str expand: Expands referenced resources.
:param str pe_connection_name: The name of the private end point connection.
:param str resource_group_name: The name of the resource group.
:param str service_name: The name of the private link service.
"""
__args__ = dict()
__args__['expand'] = expand
__args__['peConnectionName'] = pe_connection_name
__args__['resourceGroupName'] = resource_group_name
__args__['serviceName'] = service_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:network/latest:getPrivateLinkServicePrivateEndpointConnection', __args__, opts=opts, typ=GetPrivateLinkServicePrivateEndpointConnectionResult).value
return AwaitableGetPrivateLinkServicePrivateEndpointConnectionResult(
etag=__ret__.etag,
id=__ret__.id,
link_identifier=__ret__.link_identifier,
name=__ret__.name,
private_endpoint=__ret__.private_endpoint,
private_link_service_connection_state=__ret__.private_link_service_connection_state,
provisioning_state=__ret__.provisioning_state,
type=__ret__.type) | PypiClean |
/fmu_tools-1.12.1.tar.gz/fmu_tools-1.12.1/src/fmu/tools/qcproperties/qcproperties.py | from pathlib import Path
from typing import Any, Optional
import pandas as pd
import yaml
from fmu.tools._common import _QCCommon
from fmu.tools.qcdata import QCData
from fmu.tools.qcproperties._aggregate_df import PropertyAggregation
from fmu.tools.qcproperties._grid2df import GridProps2df
from fmu.tools.qcproperties._well2df import WellLogs2df
QCC = _QCCommon()
class QCProperties:
"""
The QCProperties class consists of a set of methods for extracting
property statistics from 3D Grids, Raw and Blocked wells.
Statistics can be collected from either discrete or continous properties.
Dependent on the property different statistics are collected.
The methods for statistics extraction can be run individually, or a
yaml-configuration file can be used to enable an automatic run of the
methods. See the method 'from_yaml'.
When several methods of statistics extraction has been run within the instance,
a merged dataframe is available through the 'dataframe' property.
All methods can be run from either RMS python, or from files.
XTGeo is being utilized to get a dataframe from the input parameter data.
XTGeo data is reused in the instance to increase performance.
"""
def __init__(self):
self._xtgdata = QCData() # QCData instance, general XTGeo data
self._dfs = [] # list of dataframes with aggregated statistics
self._selectors_all = []
self._proptypes_all = []
self._ids = []
self._dataframe = pd.DataFrame() # merged dataframe with statistics
# Properties:
# ==================================================================================
@property
def dataframe(self):
"""Dataframe with statistics"""
self._dataframe = self._create_or_return_dataframe()
return self._dataframe
# Hidden methods:
# ==================================================================================
def _initiate_from_config(self, cfg: str, project: Optional[object]):
"""Run methods for statistics extraction based on entries in yaml-config"""
with open(cfg, "r", encoding="utf-8") as stream:
data = yaml.safe_load(stream)
if "grid" in data:
for item in data["grid"]:
self.get_grid_statistics(data=item, project=project)
if "wells" in data:
for item in data["wells"]:
self.get_well_statistics(data=item, project=project)
if "blockedwells" in data:
for item in data["blockedwells"]:
self.get_bwell_statistics(data=item, project=project)
def _create_or_return_dataframe(self):
"""
Combine dataframes from all runs within the instance.
Only update dataframe if more data have been run within the
instance, else return previous dataframe.
"""
dframe = self._dataframe
dframes = self._dfs
if dframe.empty or len(dframes) > len(dframe["ID"].unique()):
QCC.print_debug("Updating combined dataframe")
self._warn_if_different_property_types()
dframe = pd.concat(dframes)
# fill NaN with "Total" for dataframes with missing selectors
dframe[self._selectors_all] = dframe[self._selectors_all].fillna("Total")
# Specify column order in statistics dataframe
cols_first = ["PROPERTY"] + self._selectors_all
dframe = dframe[
cols_first + [x for x in dframe.columns if x not in cols_first]
]
return dframe
def _warn_if_different_property_types(self):
"""Give warning if dataframes have different property types"""
if not all(ptype == self._proptypes_all[0] for ptype in self._proptypes_all):
QCC.give_warn(
"Merging statistics dataframes from different property types "
"(continous/discrete). Is this intentional?"
)
def _adjust_id_if_duplicate(self, run_id: str) -> str:
"""
Check for equal run ids, modify ids
by adding a number to get them unique.
"""
check_id = run_id
count = 0
while check_id in self._ids:
check_id = f"{run_id}({count+1})"
count += 1
return check_id
def _set_dataframe_id_and_class_attributes(
self, statistics: PropertyAggregation, source: str, run_id: str
):
"""
Set source and id column of statistics datframe, and different
class attributes.
"""
run_id = self._adjust_id_if_duplicate(run_id)
# set id and source columns in statistics dataframe
statistics.dataframe["ID"] = run_id
statistics.dataframe["SOURCE"] = source
self._ids.append(run_id)
self._dfs.append(statistics.dataframe)
for selector in statistics.controls["selectors"]:
if selector not in self._selectors_all:
self._selectors_all.append(selector)
self._proptypes_all.append(statistics.controls["property_type"])
# pylint: disable = no-self-argument, not-callable
def _check_multiple_filters(method: Any):
"""Decorator function for extracting statistics with different filters"""
def wrapper(self, **kwargs):
if "multiple_filters" in kwargs["data"]:
for name, filters in kwargs["data"]["multiple_filters"].items():
kwargs["data"].update(filters=filters, name=name)
method(self, **kwargs)
return self.dataframe
return method(self, **kwargs)
return wrapper
@_check_multiple_filters
def _extract_statistics(
self, dtype: str, data: dict, project: Optional[object], source: str
):
"""Create dataframe from properties and extract statistics"""
QCC.verbosity = data.get("verbosity", 0)
QCC.print_info("Starting run...")
# Create Property dataframe from input (using XTGeo)
property_data = (
GridProps2df(project=project, data=data, xtgdata=self._xtgdata)
if dtype == "grid"
else WellLogs2df(
project=project,
data=data,
xtgdata=self._xtgdata,
blockedwells=dtype == "bwells",
)
)
# Compute statistics
stats = PropertyAggregation(property_data)
self._set_dataframe_id_and_class_attributes(
stats,
source=source,
run_id=data.get("name", source),
)
return stats.dataframe
# QC methods:
# ==================================================================================
def get_grid_statistics(
self,
data: dict,
project: Optional[object] = None,
) -> pd.DataFrame:
"""Extract property statistics from 3D Grid"""
return self._extract_statistics(
dtype="grid",
data=data,
project=project,
source=data.get("source", Path(data["grid"]).stem),
)
def get_well_statistics(
self,
data: dict,
project: Optional[object] = None,
) -> pd.DataFrame:
"""Extract property statistics from wells"""
return self._extract_statistics(
dtype="wells",
data=data,
project=project,
source=data.get("source", "wells"),
)
def get_bwell_statistics(
self,
data: dict,
project: Optional[object] = None,
) -> pd.DataFrame:
"""Extract property statistics from blocked wells"""
return self._extract_statistics(
dtype="bwells",
data=data,
project=project,
source=data.get(
"source",
"bwells" if project is None else data["wells"].get("bwname", "BW"),
),
)
def from_yaml(self, cfg: str, project: Optional[object] = None):
"""Use yaml-configuration file to run the statistics extractions methods"""
self._initiate_from_config(cfg, project)
def to_csv(self, csvfile: str):
"""Write combined dataframe to csv"""
self.dataframe.to_csv(csvfile, index=False)
QCC.print_info(f"Dataframe with statistics written to {csvfile}") | PypiClean |
/dmb-cli-3.1.10.tar.gz/dmb-cli-3.1.10/cli/compiler.py | import hashlib
import re
from lxml import etree
import json
from base64 import b64encode, b64decode
import xml2tree
from common import RawInput, MetaInfo, CompileResult
from debug_bridge import DebugBridge
import logging
import constant
# 需要在XML格式验证前,以字符串格式对一些特殊的字符进行替换,这样最终的xml验证能通过,然后转成的json也没问题
MAGIC_URL = {
'<': '<',
'>': '>',
'&': '&',
'\'': ''',
'\"': '"'
}
def replace_url_magic(src_str_):
p = re.compile(r'[src|srcMock|url]=\"(https?:\/\/[^\"]*)\"')
found = p.findall(src_str_)
target_str = src_str_
for url in found:
new_url = url
for source, target in MAGIC_URL.items():
new_url = new_url.replace(source, target)
if new_url != url:
target_str = target_str.replace(url, new_url)
return target_str
class CompileTask:
result: CompileResult
raw_input: RawInput
def __init__(self, raw: RawInput, exist_bridge: DebugBridge = None) -> None:
super().__init__()
self.raw_input = raw
self.bridge = exist_bridge
def compile(self):
logging.debug('编译中...')
if self.raw_input.src_64 is not None:
"传入的是base64的xml内容字符串"
raw_xml_string = str(b64decode(self.raw_input.src_64), encoding='utf-8')
else:
"传入的是xml文件"
with open(self.raw_input.src_file, mode='r', encoding='utf-8') as fx:
raw_xml_string = fx.read()
if raw_xml_string is None:
raise Exception('无效的编译输入')
self.result = self._compileString(raw_xml_string)
if self.raw_input.only_compile_str:
# DEBUG状态下只要求输出编译字符串
print(self.result.compiled_str)
return
if self.raw_input.debug:
if self.bridge is not None:
self.bridge.update(self.result)
else:
b = DebugBridge(self.raw_input)
b.run(self.result)
if self.raw_input.release:
print(self.result.compiled_str)
def _compileString(self, xml_string) -> CompileResult:
xml_string_ = xml_string
try:
# 对其中的http URL中可能存在的特殊字符做替换
xml_string_ = replace_url_magic(xml_string_)
etree.XML(xml_string_)
logging.debug('通过XML有效性检查')
except etree.XMLSyntaxError as err:
raise Exception('不是有效的XML,请检查输入文件\n', err)
json_objects = xml2tree.convert(xml_string_, self.raw_input)
converted_json = json.dumps(json_objects, indent=1)
json_bytes_ = bytes(converted_json, encoding='utf-8')
md5 = hashlib.md5(json_bytes_).hexdigest()
base64_string_ = b64encode(json_bytes_).decode('utf-8')
cli_ver_str = "CLI ver: %s" % constant.CLI_VER
runtime_ver_str = "min support RUNTIME ver: %s" % constant.TARGET_RUNTIME_VER
# 20位保留字
keep_internal = '0' * 20
logging.debug("\tMD5: %s " % md5)
logging.debug("\t%s" % cli_ver_str)
logging.debug('\t%s' % runtime_ver_str)
logging.debug("\tKeep space len: %s" % len(keep_internal))
logging.debug("\tJson source len: %s" % len(json_bytes_.decode('utf-8')))
logging.debug("\tBase64 data len: %s" % len(base64_string_))
meta_info = MetaInfo(md5, cli_ver_str, runtime_ver_str)
ret = CompileResult()
ver_str = []
for num in constant.TARGET_RUNTIME_VER.split('.'):
ver_str.append('%02d' % int(num))
ret.compiled_str = md5 + ''.join(ver_str) + keep_internal + base64_string_
ret.raw_json = converted_json
ret.raw_xml = xml_string_
ret.meta_info = meta_info
return ret | PypiClean |
/sycc-0.8.5.tar.gz/sycc-0.8.5/sycc_/pip/_vendor/html5lib/_ihatexml.py | from __future__ import absolute_import, division, unicode_literals
import re
import warnings
from .constants import DataLossWarning
baseChar = """
[#x0041-#x005A] | [#x0061-#x007A] | [#x00C0-#x00D6] | [#x00D8-#x00F6] |
[#x00F8-#x00FF] | [#x0100-#x0131] | [#x0134-#x013E] | [#x0141-#x0148] |
[#x014A-#x017E] | [#x0180-#x01C3] | [#x01CD-#x01F0] | [#x01F4-#x01F5] |
[#x01FA-#x0217] | [#x0250-#x02A8] | [#x02BB-#x02C1] | #x0386 |
[#x0388-#x038A] | #x038C | [#x038E-#x03A1] | [#x03A3-#x03CE] |
[#x03D0-#x03D6] | #x03DA | #x03DC | #x03DE | #x03E0 | [#x03E2-#x03F3] |
[#x0401-#x040C] | [#x040E-#x044F] | [#x0451-#x045C] | [#x045E-#x0481] |
[#x0490-#x04C4] | [#x04C7-#x04C8] | [#x04CB-#x04CC] | [#x04D0-#x04EB] |
[#x04EE-#x04F5] | [#x04F8-#x04F9] | [#x0531-#x0556] | #x0559 |
[#x0561-#x0586] | [#x05D0-#x05EA] | [#x05F0-#x05F2] | [#x0621-#x063A] |
[#x0641-#x064A] | [#x0671-#x06B7] | [#x06BA-#x06BE] | [#x06C0-#x06CE] |
[#x06D0-#x06D3] | #x06D5 | [#x06E5-#x06E6] | [#x0905-#x0939] | #x093D |
[#x0958-#x0961] | [#x0985-#x098C] | [#x098F-#x0990] | [#x0993-#x09A8] |
[#x09AA-#x09B0] | #x09B2 | [#x09B6-#x09B9] | [#x09DC-#x09DD] |
[#x09DF-#x09E1] | [#x09F0-#x09F1] | [#x0A05-#x0A0A] | [#x0A0F-#x0A10] |
[#x0A13-#x0A28] | [#x0A2A-#x0A30] | [#x0A32-#x0A33] | [#x0A35-#x0A36] |
[#x0A38-#x0A39] | [#x0A59-#x0A5C] | #x0A5E | [#x0A72-#x0A74] |
[#x0A85-#x0A8B] | #x0A8D | [#x0A8F-#x0A91] | [#x0A93-#x0AA8] |
[#x0AAA-#x0AB0] | [#x0AB2-#x0AB3] | [#x0AB5-#x0AB9] | #x0ABD | #x0AE0 |
[#x0B05-#x0B0C] | [#x0B0F-#x0B10] | [#x0B13-#x0B28] | [#x0B2A-#x0B30] |
[#x0B32-#x0B33] | [#x0B36-#x0B39] | #x0B3D | [#x0B5C-#x0B5D] |
[#x0B5F-#x0B61] | [#x0B85-#x0B8A] | [#x0B8E-#x0B90] | [#x0B92-#x0B95] |
[#x0B99-#x0B9A] | #x0B9C | [#x0B9E-#x0B9F] | [#x0BA3-#x0BA4] |
[#x0BA8-#x0BAA] | [#x0BAE-#x0BB5] | [#x0BB7-#x0BB9] | [#x0C05-#x0C0C] |
[#x0C0E-#x0C10] | [#x0C12-#x0C28] | [#x0C2A-#x0C33] | [#x0C35-#x0C39] |
[#x0C60-#x0C61] | [#x0C85-#x0C8C] | [#x0C8E-#x0C90] | [#x0C92-#x0CA8] |
[#x0CAA-#x0CB3] | [#x0CB5-#x0CB9] | #x0CDE | [#x0CE0-#x0CE1] |
[#x0D05-#x0D0C] | [#x0D0E-#x0D10] | [#x0D12-#x0D28] | [#x0D2A-#x0D39] |
[#x0D60-#x0D61] | [#x0E01-#x0E2E] | #x0E30 | [#x0E32-#x0E33] |
[#x0E40-#x0E45] | [#x0E81-#x0E82] | #x0E84 | [#x0E87-#x0E88] | #x0E8A |
#x0E8D | [#x0E94-#x0E97] | [#x0E99-#x0E9F] | [#x0EA1-#x0EA3] | #x0EA5 |
#x0EA7 | [#x0EAA-#x0EAB] | [#x0EAD-#x0EAE] | #x0EB0 | [#x0EB2-#x0EB3] |
#x0EBD | [#x0EC0-#x0EC4] | [#x0F40-#x0F47] | [#x0F49-#x0F69] |
[#x10A0-#x10C5] | [#x10D0-#x10F6] | #x1100 | [#x1102-#x1103] |
[#x1105-#x1107] | #x1109 | [#x110B-#x110C] | [#x110E-#x1112] | #x113C |
#x113E | #x1140 | #x114C | #x114E | #x1150 | [#x1154-#x1155] | #x1159 |
[#x115F-#x1161] | #x1163 | #x1165 | #x1167 | #x1169 | [#x116D-#x116E] |
[#x1172-#x1173] | #x1175 | #x119E | #x11A8 | #x11AB | [#x11AE-#x11AF] |
[#x11B7-#x11B8] | #x11BA | [#x11BC-#x11C2] | #x11EB | #x11F0 | #x11F9 |
[#x1E00-#x1E9B] | [#x1EA0-#x1EF9] | [#x1F00-#x1F15] | [#x1F18-#x1F1D] |
[#x1F20-#x1F45] | [#x1F48-#x1F4D] | [#x1F50-#x1F57] | #x1F59 | #x1F5B |
#x1F5D | [#x1F5F-#x1F7D] | [#x1F80-#x1FB4] | [#x1FB6-#x1FBC] | #x1FBE |
[#x1FC2-#x1FC4] | [#x1FC6-#x1FCC] | [#x1FD0-#x1FD3] | [#x1FD6-#x1FDB] |
[#x1FE0-#x1FEC] | [#x1FF2-#x1FF4] | [#x1FF6-#x1FFC] | #x2126 |
[#x212A-#x212B] | #x212E | [#x2180-#x2182] | [#x3041-#x3094] |
[#x30A1-#x30FA] | [#x3105-#x312C] | [#xAC00-#xD7A3]"""
ideographic = """[#x4E00-#x9FA5] | #x3007 | [#x3021-#x3029]"""
combiningCharacter = """
[#x0300-#x0345] | [#x0360-#x0361] | [#x0483-#x0486] | [#x0591-#x05A1] |
[#x05A3-#x05B9] | [#x05BB-#x05BD] | #x05BF | [#x05C1-#x05C2] | #x05C4 |
[#x064B-#x0652] | #x0670 | [#x06D6-#x06DC] | [#x06DD-#x06DF] |
[#x06E0-#x06E4] | [#x06E7-#x06E8] | [#x06EA-#x06ED] | [#x0901-#x0903] |
#x093C | [#x093E-#x094C] | #x094D | [#x0951-#x0954] | [#x0962-#x0963] |
[#x0981-#x0983] | #x09BC | #x09BE | #x09BF | [#x09C0-#x09C4] |
[#x09C7-#x09C8] | [#x09CB-#x09CD] | #x09D7 | [#x09E2-#x09E3] | #x0A02 |
#x0A3C | #x0A3E | #x0A3F | [#x0A40-#x0A42] | [#x0A47-#x0A48] |
[#x0A4B-#x0A4D] | [#x0A70-#x0A71] | [#x0A81-#x0A83] | #x0ABC |
[#x0ABE-#x0AC5] | [#x0AC7-#x0AC9] | [#x0ACB-#x0ACD] | [#x0B01-#x0B03] |
#x0B3C | [#x0B3E-#x0B43] | [#x0B47-#x0B48] | [#x0B4B-#x0B4D] |
[#x0B56-#x0B57] | [#x0B82-#x0B83] | [#x0BBE-#x0BC2] | [#x0BC6-#x0BC8] |
[#x0BCA-#x0BCD] | #x0BD7 | [#x0C01-#x0C03] | [#x0C3E-#x0C44] |
[#x0C46-#x0C48] | [#x0C4A-#x0C4D] | [#x0C55-#x0C56] | [#x0C82-#x0C83] |
[#x0CBE-#x0CC4] | [#x0CC6-#x0CC8] | [#x0CCA-#x0CCD] | [#x0CD5-#x0CD6] |
[#x0D02-#x0D03] | [#x0D3E-#x0D43] | [#x0D46-#x0D48] | [#x0D4A-#x0D4D] |
#x0D57 | #x0E31 | [#x0E34-#x0E3A] | [#x0E47-#x0E4E] | #x0EB1 |
[#x0EB4-#x0EB9] | [#x0EBB-#x0EBC] | [#x0EC8-#x0ECD] | [#x0F18-#x0F19] |
#x0F35 | #x0F37 | #x0F39 | #x0F3E | #x0F3F | [#x0F71-#x0F84] |
[#x0F86-#x0F8B] | [#x0F90-#x0F95] | #x0F97 | [#x0F99-#x0FAD] |
[#x0FB1-#x0FB7] | #x0FB9 | [#x20D0-#x20DC] | #x20E1 | [#x302A-#x302F] |
#x3099 | #x309A"""
digit = """
[#x0030-#x0039] | [#x0660-#x0669] | [#x06F0-#x06F9] | [#x0966-#x096F] |
[#x09E6-#x09EF] | [#x0A66-#x0A6F] | [#x0AE6-#x0AEF] | [#x0B66-#x0B6F] |
[#x0BE7-#x0BEF] | [#x0C66-#x0C6F] | [#x0CE6-#x0CEF] | [#x0D66-#x0D6F] |
[#x0E50-#x0E59] | [#x0ED0-#x0ED9] | [#x0F20-#x0F29]"""
extender = """
#x00B7 | #x02D0 | #x02D1 | #x0387 | #x0640 | #x0E46 | #x0EC6 | #x3005 |
#[#x3031-#x3035] | [#x309D-#x309E] | [#x30FC-#x30FE]"""
letter = " | ".join([baseChar, ideographic])
# Without the
name = " | ".join([letter, digit, ".", "-", "_", combiningCharacter,
extender])
nameFirst = " | ".join([letter, "_"])
reChar = re.compile(r"#x([\d|A-F]{4,4})")
reCharRange = re.compile(r"\[#x([\d|A-F]{4,4})-#x([\d|A-F]{4,4})\]")
def charStringToList(chars):
charRanges = [item.strip() for item in chars.split(" | ")]
rv = []
for item in charRanges:
foundMatch = False
for regexp in (reChar, reCharRange):
match = regexp.match(item)
if match is not None:
rv.append([hexToInt(item) for item in match.groups()])
if len(rv[-1]) == 1:
rv[-1] = rv[-1] * 2
foundMatch = True
break
if not foundMatch:
assert len(item) == 1
rv.append([ord(item)] * 2)
rv = normaliseCharList(rv)
return rv
def normaliseCharList(charList):
charList = sorted(charList)
for item in charList:
assert item[1] >= item[0]
rv = []
i = 0
while i < len(charList):
j = 1
rv.append(charList[i])
while i + j < len(charList) and charList[i + j][0] <= rv[-1][1] + 1:
rv[-1][1] = charList[i + j][1]
j += 1
i += j
return rv
# We don't really support characters above the BMP :(
max_unicode = int("FFFF", 16)
def missingRanges(charList):
rv = []
if charList[0] != 0:
rv.append([0, charList[0][0] - 1])
for i, item in enumerate(charList[:-1]):
rv.append([item[1] + 1, charList[i + 1][0] - 1])
if charList[-1][1] != max_unicode:
rv.append([charList[-1][1] + 1, max_unicode])
return rv
def listToRegexpStr(charList):
rv = []
for item in charList:
if item[0] == item[1]:
rv.append(escapeRegexp(chr(item[0])))
else:
rv.append(escapeRegexp(chr(item[0])) + "-" +
escapeRegexp(chr(item[1])))
return "[%s]" % "".join(rv)
def hexToInt(hex_str):
return int(hex_str, 16)
def escapeRegexp(string):
specialCharacters = (".", "^", "$", "*", "+", "?", "{", "}",
"[", "]", "|", "(", ")", "-")
for char in specialCharacters:
string = string.replace(char, "\\" + char)
return string
# output from the above
nonXmlNameBMPRegexp = re.compile('[\x00-,/:-@\\[-\\^`\\{-\xb6\xb8-\xbf\xd7\xf7\u0132-\u0133\u013f-\u0140\u0149\u017f\u01c4-\u01cc\u01f1-\u01f3\u01f6-\u01f9\u0218-\u024f\u02a9-\u02ba\u02c2-\u02cf\u02d2-\u02ff\u0346-\u035f\u0362-\u0385\u038b\u038d\u03a2\u03cf\u03d7-\u03d9\u03db\u03dd\u03df\u03e1\u03f4-\u0400\u040d\u0450\u045d\u0482\u0487-\u048f\u04c5-\u04c6\u04c9-\u04ca\u04cd-\u04cf\u04ec-\u04ed\u04f6-\u04f7\u04fa-\u0530\u0557-\u0558\u055a-\u0560\u0587-\u0590\u05a2\u05ba\u05be\u05c0\u05c3\u05c5-\u05cf\u05eb-\u05ef\u05f3-\u0620\u063b-\u063f\u0653-\u065f\u066a-\u066f\u06b8-\u06b9\u06bf\u06cf\u06d4\u06e9\u06ee-\u06ef\u06fa-\u0900\u0904\u093a-\u093b\u094e-\u0950\u0955-\u0957\u0964-\u0965\u0970-\u0980\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09bb\u09bd\u09c5-\u09c6\u09c9-\u09ca\u09ce-\u09d6\u09d8-\u09db\u09de\u09e4-\u09e5\u09f2-\u0a01\u0a03-\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a3b\u0a3d\u0a43-\u0a46\u0a49-\u0a4a\u0a4e-\u0a58\u0a5d\u0a5f-\u0a65\u0a75-\u0a80\u0a84\u0a8c\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abb\u0ac6\u0aca\u0ace-\u0adf\u0ae1-\u0ae5\u0af0-\u0b00\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34-\u0b35\u0b3a-\u0b3b\u0b44-\u0b46\u0b49-\u0b4a\u0b4e-\u0b55\u0b58-\u0b5b\u0b5e\u0b62-\u0b65\u0b70-\u0b81\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bb6\u0bba-\u0bbd\u0bc3-\u0bc5\u0bc9\u0bce-\u0bd6\u0bd8-\u0be6\u0bf0-\u0c00\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c3d\u0c45\u0c49\u0c4e-\u0c54\u0c57-\u0c5f\u0c62-\u0c65\u0c70-\u0c81\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cbd\u0cc5\u0cc9\u0cce-\u0cd4\u0cd7-\u0cdd\u0cdf\u0ce2-\u0ce5\u0cf0-\u0d01\u0d04\u0d0d\u0d11\u0d29\u0d3a-\u0d3d\u0d44-\u0d45\u0d49\u0d4e-\u0d56\u0d58-\u0d5f\u0d62-\u0d65\u0d70-\u0e00\u0e2f\u0e3b-\u0e3f\u0e4f\u0e5a-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eaf\u0eba\u0ebe-\u0ebf\u0ec5\u0ec7\u0ece-\u0ecf\u0eda-\u0f17\u0f1a-\u0f1f\u0f2a-\u0f34\u0f36\u0f38\u0f3a-\u0f3d\u0f48\u0f6a-\u0f70\u0f85\u0f8c-\u0f8f\u0f96\u0f98\u0fae-\u0fb0\u0fb8\u0fba-\u109f\u10c6-\u10cf\u10f7-\u10ff\u1101\u1104\u1108\u110a\u110d\u1113-\u113b\u113d\u113f\u1141-\u114b\u114d\u114f\u1151-\u1153\u1156-\u1158\u115a-\u115e\u1162\u1164\u1166\u1168\u116a-\u116c\u116f-\u1171\u1174\u1176-\u119d\u119f-\u11a7\u11a9-\u11aa\u11ac-\u11ad\u11b0-\u11b6\u11b9\u11bb\u11c3-\u11ea\u11ec-\u11ef\u11f1-\u11f8\u11fa-\u1dff\u1e9c-\u1e9f\u1efa-\u1eff\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fbd\u1fbf-\u1fc1\u1fc5\u1fcd-\u1fcf\u1fd4-\u1fd5\u1fdc-\u1fdf\u1fed-\u1ff1\u1ff5\u1ffd-\u20cf\u20dd-\u20e0\u20e2-\u2125\u2127-\u2129\u212c-\u212d\u212f-\u217f\u2183-\u3004\u3006\u3008-\u3020\u3030\u3036-\u3040\u3095-\u3098\u309b-\u309c\u309f-\u30a0\u30fb\u30ff-\u3104\u312d-\u4dff\u9fa6-\uabff\ud7a4-\uffff]') # noqa
nonXmlNameFirstBMPRegexp = re.compile('[\x00-@\\[-\\^`\\{-\xbf\xd7\xf7\u0132-\u0133\u013f-\u0140\u0149\u017f\u01c4-\u01cc\u01f1-\u01f3\u01f6-\u01f9\u0218-\u024f\u02a9-\u02ba\u02c2-\u0385\u0387\u038b\u038d\u03a2\u03cf\u03d7-\u03d9\u03db\u03dd\u03df\u03e1\u03f4-\u0400\u040d\u0450\u045d\u0482-\u048f\u04c5-\u04c6\u04c9-\u04ca\u04cd-\u04cf\u04ec-\u04ed\u04f6-\u04f7\u04fa-\u0530\u0557-\u0558\u055a-\u0560\u0587-\u05cf\u05eb-\u05ef\u05f3-\u0620\u063b-\u0640\u064b-\u0670\u06b8-\u06b9\u06bf\u06cf\u06d4\u06d6-\u06e4\u06e7-\u0904\u093a-\u093c\u093e-\u0957\u0962-\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09db\u09de\u09e2-\u09ef\u09f2-\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a58\u0a5d\u0a5f-\u0a71\u0a75-\u0a84\u0a8c\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abc\u0abe-\u0adf\u0ae1-\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34-\u0b35\u0b3a-\u0b3c\u0b3e-\u0b5b\u0b5e\u0b62-\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bb6\u0bba-\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c5f\u0c62-\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cdd\u0cdf\u0ce2-\u0d04\u0d0d\u0d11\u0d29\u0d3a-\u0d5f\u0d62-\u0e00\u0e2f\u0e31\u0e34-\u0e3f\u0e46-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eaf\u0eb1\u0eb4-\u0ebc\u0ebe-\u0ebf\u0ec5-\u0f3f\u0f48\u0f6a-\u109f\u10c6-\u10cf\u10f7-\u10ff\u1101\u1104\u1108\u110a\u110d\u1113-\u113b\u113d\u113f\u1141-\u114b\u114d\u114f\u1151-\u1153\u1156-\u1158\u115a-\u115e\u1162\u1164\u1166\u1168\u116a-\u116c\u116f-\u1171\u1174\u1176-\u119d\u119f-\u11a7\u11a9-\u11aa\u11ac-\u11ad\u11b0-\u11b6\u11b9\u11bb\u11c3-\u11ea\u11ec-\u11ef\u11f1-\u11f8\u11fa-\u1dff\u1e9c-\u1e9f\u1efa-\u1eff\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fbd\u1fbf-\u1fc1\u1fc5\u1fcd-\u1fcf\u1fd4-\u1fd5\u1fdc-\u1fdf\u1fed-\u1ff1\u1ff5\u1ffd-\u2125\u2127-\u2129\u212c-\u212d\u212f-\u217f\u2183-\u3006\u3008-\u3020\u302a-\u3040\u3095-\u30a0\u30fb-\u3104\u312d-\u4dff\u9fa6-\uabff\ud7a4-\uffff]') # noqa
# Simpler things
nonPubidCharRegexp = re.compile("[^\x20\x0D\x0Aa-zA-Z0-9\\-'()+,./:=?;!*#@$_%]")
class InfosetFilter(object):
replacementRegexp = re.compile(r"U[\dA-F]{5,5}")
def __init__(self,
dropXmlnsLocalName=False,
dropXmlnsAttrNs=False,
preventDoubleDashComments=False,
preventDashAtCommentEnd=False,
replaceFormFeedCharacters=True,
preventSingleQuotePubid=False):
self.dropXmlnsLocalName = dropXmlnsLocalName
self.dropXmlnsAttrNs = dropXmlnsAttrNs
self.preventDoubleDashComments = preventDoubleDashComments
self.preventDashAtCommentEnd = preventDashAtCommentEnd
self.replaceFormFeedCharacters = replaceFormFeedCharacters
self.preventSingleQuotePubid = preventSingleQuotePubid
self.replaceCache = {}
def coerceAttribute(self, name, namespace=None):
if self.dropXmlnsLocalName and name.startswith("xmlns:"):
warnings.warn("Attributes cannot begin with xmlns", DataLossWarning)
return None
elif (self.dropXmlnsAttrNs and
namespace == "http://www.w3.org/2000/xmlns/"):
warnings.warn("Attributes cannot be in the xml namespace", DataLossWarning)
return None
else:
return self.toXmlName(name)
def coerceElement(self, name):
return self.toXmlName(name)
def coerceComment(self, data):
if self.preventDoubleDashComments:
while "--" in data:
warnings.warn("Comments cannot contain adjacent dashes", DataLossWarning)
data = data.replace("--", "- -")
if data.endswith("-"):
warnings.warn("Comments cannot end in a dash", DataLossWarning)
data += " "
return data
def coerceCharacters(self, data):
if self.replaceFormFeedCharacters:
for _ in range(data.count("\x0C")):
warnings.warn("Text cannot contain U+000C", DataLossWarning)
data = data.replace("\x0C", " ")
# Other non-xml characters
return data
def coercePubid(self, data):
dataOutput = data
for char in nonPubidCharRegexp.findall(data):
warnings.warn("Coercing non-XML pubid", DataLossWarning)
replacement = self.getReplacementCharacter(char)
dataOutput = dataOutput.replace(char, replacement)
if self.preventSingleQuotePubid and dataOutput.find("'") >= 0:
warnings.warn("Pubid cannot contain single quote", DataLossWarning)
dataOutput = dataOutput.replace("'", self.getReplacementCharacter("'"))
return dataOutput
def toXmlName(self, name):
nameFirst = name[0]
nameRest = name[1:]
m = nonXmlNameFirstBMPRegexp.match(nameFirst)
if m:
warnings.warn("Coercing non-XML name: %s" % name, DataLossWarning)
nameFirstOutput = self.getReplacementCharacter(nameFirst)
else:
nameFirstOutput = nameFirst
nameRestOutput = nameRest
replaceChars = set(nonXmlNameBMPRegexp.findall(nameRest))
for char in replaceChars:
warnings.warn("Coercing non-XML name: %s" % name, DataLossWarning)
replacement = self.getReplacementCharacter(char)
nameRestOutput = nameRestOutput.replace(char, replacement)
return nameFirstOutput + nameRestOutput
def getReplacementCharacter(self, char):
if char in self.replaceCache:
replacement = self.replaceCache[char]
else:
replacement = self.escapeChar(char)
return replacement
def fromXmlName(self, name):
for item in set(self.replacementRegexp.findall(name)):
name = name.replace(item, self.unescapeChar(item))
return name
def escapeChar(self, char):
replacement = "U%05X" % ord(char)
self.replaceCache[char] = replacement
return replacement
def unescapeChar(self, charcode):
return chr(int(charcode[1:], 16)) | PypiClean |
/upf_to_json-0.9.5.tar.gz/upf_to_json-0.9.5/README.md | # UPF to JSON converter
UPF is a file format to describe atomic pseudo-potentials used by (DFT) density
functional theory codes. This package provides scripts for converting UPF v1 and
UPF v2 (Unified Pseudopotential Format) to json format as required by the SIRIUS
DFT library [SIRIUS github](https://github.com/electronic-structure/SIRIUS).
More information about the unified pseudopotential format can be found here:
https://www.quantum-espresso.org/pseudopotentials/unified-pseudopotential-format
| PypiClean |
/SALTISE_course_flow-0.8.9.tar.gz/SALTISE_course_flow-0.8.9/course_flow/static/course_flow/js/ExportMenu.js | import * as Redux from "redux";
import * as React from "react";
import * as reactDom from "react-dom";
import {setUserPermission,getUsersForObject,getUserList} from "./PostFunctions";
import {Loader} from "./Constants";
export class ExportMenu extends React.Component{
constructor(props){
super(props);
this.state={type:"outcome"}
}
render(){
let object_sets;
if(this.props.data.object_sets.length>0){
object_sets = (
[<h4>{gettext("Object Set Visibility")}:</h4>,
this.props.data.object_sets.map(objectset=>
<div>
<input onChange={this.inputChange.bind(this,"set",objectset.id)} name="object_sets[]" value={objectset.id} type="checkbox" id={objectset.id} checked={(!this.state[objectset.id])}/><label>{objectset.title}</label>
</div>
)
]
)
}
return(
<div class="message-wrap">
<h2>{gettext("Export files")}</h2>
<p>{gettext("Use this menu to export files.")}</p>
<form id="export-form" enctype="multipart/form-data" action={post_paths.get_export} method="POST" id="export-form" target="redirect-iframe" onSubmit={this.submit.bind(this)}>
<input type="hidden" name="csrfmiddlewaretoken" value={root.getCsrfToken()}/>
<h4>{gettext("Export Type")}:</h4>
{this.getExportTypes()}
<h4>{gettext("Export Format")}:</h4>
<select name="export_format">
<option value="excel">Excel</option>
<option value="csv">CSV</option>
</select>
{object_sets}
<input type="hidden" id="objectID" name="objectID" value={JSON.stringify(this.props.data.id)}/>
<input type="hidden" id="objectType" name="objectType" value={JSON.stringify(this.props.data.type)}/>
<input onClick={this.click.bind(this)} id="submit-button" type="submit"/>
</form>
<iframe hidden name="redirect-iframe" id='redirect-iframe'></iframe>
<div class="window-close-button" onClick = {this.props.actionFunction}>
<img src = {iconpath+"close.svg"}/>
</div>
</div>
);
}
getExportTypes(){
let type = this.props.data.type;
let exports=[];
exports.push(
[<input name="export_type" type="radio" value="outcome" onChange={this.inputChange.bind(this,"type","")} checked={this.state.type=="outcome"}/>,<label for="export_type">{gettext("Outcomes")}</label>]
);
exports.push(
[<input name="export_type" type="radio" value="node" onChange={this.inputChange.bind(this,"type","")} checked={this.state.type=="node"}/>,<label for="export_type">{gettext("Nodes")}</label>]
);
if(type=="project"||type=="course")exports.push(
[<input name="export_type" type="radio" value="framework" onChange={this.inputChange.bind(this,"type","")} checked={this.state.type=="framework"}/>,<label for="export_type">{gettext("Course Framework")}</label>]
);
if(type=="project"||type=="program")exports.push(
[<input name="export_type" type="radio" value="matrix" onChange={this.inputChange.bind(this,"type","")} checked={this.state.type=="matrix"}/>,<label for="export_type">{gettext("Competency Matrix")}</label>]
);
return exports;
}
inputChange(type,id,evt){
if(type=="set"){
let new_state={};
new_state[id]=!evt.target.checked;
this.setState(new_state);
}else if(type=="type" && evt.target.checked){
this.setState({type:evt.target.value});
}
}
click(evt){
if(evt.ctrlKey){
this.ctrlKey=true;
$("#export-form")[0].action=post_paths.get_export_download
}
}
submit(evt){
$("#submit-button").attr("disabled",true);
setTimeout(()=>{
if(!this.ctrlKey)this.props.actionFunction();
alert(gettext("Your file is being generated and will be emailed to you shortly."));
},100);
return true;
}
} | PypiClean |
/lemon-tinymce-2.1.1.tar.gz/lemon-tinymce-2.1.1/tinymce/static/tinymce/js/utils/mctabs.js | function MCTabs() {
this.settings = [];
this.onChange = tinyMCEPopup.editor.windowManager.createInstance('tinymce.util.Dispatcher');
};
MCTabs.prototype.init = function(settings) {
this.settings = settings;
};
MCTabs.prototype.getParam = function(name, default_value) {
var value = null;
value = (typeof(this.settings[name]) == "undefined") ? default_value : this.settings[name];
// Fix bool values
if (value == "true" || value == "false")
return (value == "true");
return value;
};
MCTabs.prototype.showTab =function(tab){
tab.className = 'current';
tab.setAttribute("aria-selected", true);
tab.setAttribute("aria-expanded", true);
tab.tabIndex = 0;
};
MCTabs.prototype.hideTab =function(tab){
var t=this;
tab.className = '';
tab.setAttribute("aria-selected", false);
tab.setAttribute("aria-expanded", false);
tab.tabIndex = -1;
};
MCTabs.prototype.showPanel = function(panel) {
panel.className = 'current';
panel.setAttribute("aria-hidden", false);
};
MCTabs.prototype.hidePanel = function(panel) {
panel.className = 'panel';
panel.setAttribute("aria-hidden", true);
};
MCTabs.prototype.getPanelForTab = function(tabElm) {
return tinyMCEPopup.dom.getAttrib(tabElm, "aria-controls");
};
MCTabs.prototype.displayTab = function(tab_id, panel_id, avoid_focus) {
var panelElm, panelContainerElm, tabElm, tabContainerElm, selectionClass, nodes, i, t = this;
tabElm = document.getElementById(tab_id);
if (panel_id === undefined) {
panel_id = t.getPanelForTab(tabElm);
}
panelElm= document.getElementById(panel_id);
panelContainerElm = panelElm ? panelElm.parentNode : null;
tabContainerElm = tabElm ? tabElm.parentNode : null;
selectionClass = t.getParam('selection_class', 'current');
if (tabElm && tabContainerElm) {
nodes = tabContainerElm.childNodes;
// Hide all other tabs
for (i = 0; i < nodes.length; i++) {
if (nodes[i].nodeName == "LI") {
t.hideTab(nodes[i]);
}
}
// Show selected tab
t.showTab(tabElm);
}
if (panelElm && panelContainerElm) {
nodes = panelContainerElm.childNodes;
// Hide all other panels
for (i = 0; i < nodes.length; i++) {
if (nodes[i].nodeName == "DIV")
t.hidePanel(nodes[i]);
}
if (!avoid_focus) {
tabElm.focus();
}
// Show selected panel
t.showPanel(panelElm);
}
};
MCTabs.prototype.getAnchor = function() {
var pos, url = document.location.href;
if ((pos = url.lastIndexOf('#')) != -1)
return url.substring(pos + 1);
return "";
};
//Global instance
var mcTabs = new MCTabs();
tinyMCEPopup.onInit.add(function() {
var tinymce = tinyMCEPopup.getWin().tinymce, dom = tinyMCEPopup.dom, each = tinymce.each;
each(dom.select('div.tabs'), function(tabContainerElm) {
var keyNav;
dom.setAttrib(tabContainerElm, "role", "tablist");
var items = tinyMCEPopup.dom.select('li', tabContainerElm);
var action = function(id) {
mcTabs.displayTab(id, mcTabs.getPanelForTab(id));
mcTabs.onChange.dispatch(id);
};
each(items, function(item) {
dom.setAttrib(item, 'role', 'tab');
dom.bind(item, 'click', function(evt) {
action(item.id);
});
});
dom.bind(dom.getRoot(), 'keydown', function(evt) {
if (evt.keyCode === 9 && evt.ctrlKey && !evt.altKey) { // Tab
keyNav.moveFocus(evt.shiftKey ? -1 : 1);
tinymce.dom.Event.cancel(evt);
}
});
each(dom.select('a', tabContainerElm), function(a) {
dom.setAttrib(a, 'tabindex', '-1');
});
keyNav = tinyMCEPopup.editor.windowManager.createInstance('tinymce.ui.KeyboardNavigation', {
root: tabContainerElm,
items: items,
onAction: action,
actOnFocus: true,
enableLeftRight: true,
enableUpDown: true
}, tinyMCEPopup.dom);
});
}); | PypiClean |
/pyappnvn-0.0.4.tar.gz/pyappnvn-0.0.4/appnvn/balstock/guibls.py | import tkinter as tk
from tkinter import *
from tkinter import ttk
from tkinter import filedialog
from pynvn.csv.rcsv import rcsv
from pynvn.csv.tocsv import wrcsv
from pathlib import Path
from pynvn.path.ppath import (getpathfromtk,
PathSteel,
ExtractFileNameFromPath,
PathFromFileNameAndDirpath,
abspath,
getdirpath,
ExtractFileNameFromPath,
getfilenamewoexten,
credirfol)
from pynvn.data.filename import namefile
from tkinter import messagebox
from datetime import datetime
from appnvn.balstock.dataexc import comparetwofile
import pandas as pd
def getdirpathfromorigin(output1):
# Get path full
global pathinout
pathinout = getpathfromtk(output1)
filename =ExtractFileNameFromPath(pathinout)
filename1 = getfilenamewoexten(filename)
# get dirpath from full path
dn = getdirpath(pathinout)
ps = PathSteel(dir_path =dn,FileName = filename1 + ".csv")
pathf = ps.refpath()
return pathf
class bl (tk.Tk):
def __init__(self,*args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs)
#self.setcfbs()
nmgui(tktk = self).setcfbs()
self.container = tk.Frame(self)
#container.config(anchor=CENTER)
self.container.pack(side="top",
fill=Y, expand=YES)
nmgui(tktk = self).createmenu()
frame = nameuser(self.container, self)
#self.frames[F] = frame
frame.grid(row=0,
column=0,
sticky="nsew")
def show_frame(self, cont):
frame = self.frames[cont]
frame.tkraise()
class nmgui:
def __init__(self,tktk = None):
# set logo and title
self.tktk = tktk
def setcfbs (self):
#self.tktk.iconbitmap('clienticon.ico')
self.tktk.title (
"ATAD STEEL STRUCTURE CORPORATION"
)
self.tktk.configure(background='khaki1')
def createmenu (self):
menubar = Menu(self.tktk)
filemenu = Menu(menubar, tearoff=0)
filemenu.add_command(label="New",
command=lambda: self.donothing())
filemenu.add_command(label="Open",
command=lambda: self.donothing())
filemenu.add_command(label="Save",
command=lambda: self.donothing())
filemenu.add_command(label="Backup",
command=lambda: self.donothing())
filemenu.add_command(label="Close",
command=lambda: self.donothing())
filemenu.add_separator()
filemenu.add_command(label="Exit",
command=self.tktk.quit)
menubar.add_cascade(label="Option",
menu=filemenu)
self.tktk.config(menu=menubar)
def donothing(self):
filewin = Toplevel(self.tktk)
columns = ("#1","#2")
self.tktk.tree = ttk.Treeview(filewin,
show = "headings",
columns = columns)
self.tktk.tree.heading("#1", text="Name User")
self.tktk.tree.heading("#2", text="Time")
ysb = ttk.Scrollbar(filewin, orient=tk.VERTICAL,
command=self.tktk.tree.yview)
self.tktk.tree.configure(yscroll=ysb.set)
pathf = getdirpathfromorigin(output1)
dtpd = pd.read_csv(pathf,usecols=[0, 1],
header=None)
sh = dtpd.shape
for ix in range(sh[0]):
k = dtpd.iloc[ix]
k1 = k.values.tolist()
self.tktk.tree.insert("", tk.END,
values=k1)
self.tktk.tree.grid(row=0,
column=0)
ysb.grid(row=0,
column=1,
sticky=tk.N + tk.S)
self.tktk.rowconfigure(0, weight=1)
self.tktk.columnconfigure(0, weight=1)
button = Button(filewin,command=lambda: self.dowloadfilexcelfromeven(fullnamekkk), text="DownLoad")
button.grid(row = 1,
column = 0,
sticky = "we"
)
self.tktk.tree.column("#1",anchor=tk.CENTER)
self.tktk.tree.column("#2",anchor=tk.CENTER)
self.tktk.tree.bind("<<TreeviewSelect>>", self.print_selection)
def returndirpath(self,filename1):
pathfulloutput = self.getfullnamefromoutput()
#filename1 = self.getfilenamefromoutput()
dbk = credirfol(getdirpath(pathfulloutput),
filename1)
return dbk
# get file name
def getfilenamefromoutput(self):
pathfulloutput = self.getfullnamefromoutput()
filename1 = getfilenamewoexten(ExtractFileNameFromPath(pathfulloutput))
return filename1
def getfullnamefromoutput(self):
pathfulloutput = getpathfromtk(output1)
return pathfulloutput
def print_selection(self,event):
for selection in self.tktk.tree.selection():
item = self.tktk.tree.item(selection)
last_name, first_name = item["values"][0:2]
#A = (last_name + first_name)
namefilefromtkk = first_name.translate({ord(c): None for c in '!@#$?/: '}) + "_" + last_name
namefilefromtkk1 = namefilefromtkk.replace(" ", "")
dbk = self.returndirpath(self.getfilenamefromoutput())
nf = namefile (dirpath = dbk,fnamesub = namefilefromtkk1)
nflist = nf.returnfilfullnamefromsubname()
global fullnamekkk
fullnamekkk = PathFromFileNameAndDirpath(dir_path = dbk,
filename = nflist)
def dowloadfilexcelfromeven(self,fullname):
# save as file path from path original
pathst = PathSteel (pathorigrn = fullname)
pathst.saveasfiletopathAndopen()
class nameuser(tk.Frame):
def __init__(self,parent,
controller):
self.parent = parent
self.controller = controller
tk.Frame.__init__(self,
parent)
self.inyname = Label(self,
text = "INPUT YOUR NAME",
width = 17,
bg = "SteelBlue2",
fg="black"
)
# create full path
self.pathcre = r"C:\NLT"
Path(self.pathcre).mkdir(parents=True,
exist_ok=True)
# return path full
ps = PathSteel(dir_path =self.pathcre,
FileName = "nhuan.csv")
self.pathf = ps.refpath()
rinput = rcsv(pathtor = self.pathf,indexarrtoget = [0])
row = rinput.Rerowbyindxaindexarr()
row = list(set(row))
self.inynamein = ttk.Combobox(self,
values=row)
self.button = tk.Button(self, text="Next",
command=lambda: self.checkinputyourname())
self.inyname.pack()
self.inynamein.pack()
self.button.pack()
def checkinputyourname(self):
global inynameing
inynameing = self.inynamein.get()
if inynameing is "":
print ("Check your name input:")
else:
global dt_string
global dt_string_sr
now = datetime.now()
dt_string = now.strftime("%H:%M:%S %d/%m/%Y")
dt_string_sr = now.strftime("%H%M%S%d%m%Y")
self.controller.withdraw()
filewin = Toplevel(self)
app = primaryc(filewin)
wv = wrcsv(pathtow = self.pathf,list =[inynameing])
wv.writefilecsvFromRowArr()
class primaryc(bl):
def __init__(self,master):
self.master = master
self.container = tk.Frame(self.master)
self.container.pack()
nmgui(tktk = master).createmenu()
#bl.createmenu(self.master)
large_font = ('Verdana',10)
#bl.__init__ (self)
nmgui(tktk = master).setcfbs()
#create buttom for open file
button = tk.Button(self.container,text = "Directory file 1",
width = 10,
height = 2,
command = self.mfileopen
)
button.grid(row = 0,
column = 0,
sticky = "we"
)
self.master.protocol('WM_DELETE_WINDOW',
self.doSomething)
# create output text, it is used to save directory
self.output1 = tk.Text (self.container,
width = 60,
height = 2,
font = large_font,
selectborderwidth = 10,
bg = "yellow"
)
self.output1.grid(row = 0,
column = 1,
)
global output1
output1 = self.output1
# open file 1
self.openfile = tk.Button(self.container,text = "OPEN FILE 1",
width = 10,
height = 2,
command = lambda: self.openfile1(self.output1)
)
self.openfile.grid(row = 0,
column = 2,
sticky = "we"
)
# syn on server
self.syn = tk.Button(self.container,text = "SYN",
width = 5,
height = 2,
command = lambda: self.synserverfileexc(self.pathinout)
)
self.syn.grid(row = 0,
column = 3,
sticky = "we"
)
#quit widget
buttom_quit = tk.Button (self.container,
text = "Exit",
width = 20,
command = self.container.quit
)
buttom_quit.grid(row = 3,
column = 1,
)
# open file follow directory
def mfileopen(self):
files = filedialog.askopenfilename()
self.output1.insert(tk.END,
files)
# open file out put
def mfileopenout(self):
files = filedialog.askopenfilename()
self.output2.insert(tk.END,
files)
# Open file 1
def openfile1 (self,output):
# get path full
self.pathinout = getpathfromtk(output)
# save as file path from path original
pathst = PathSteel (pathorigrn = self.pathinout)
pathst.saveasfiletopathAndopen()
def synserverfileexc (self,pathtemp,indexcol = None):
filenametemp = ExtractFileNameFromPath(path = pathtemp)
dirname = abspath("")
fullname = PathFromFileNameAndDirpath(dir_path = dirname,
filename = filenametemp)
########################################
pathfulloutput = getpathfromtk(output1)
filename =ExtractFileNameFromPath(pathfulloutput)
dbk = nmgui(tktk = self.master).returndirpath(getfilenamewoexten(filename))
########################################
#create diff forder for diff file
dirpathdiff = nmgui(tktk = self.master).returndirpath("diff_history")
inynameing1 = inynameing.replace(" ", " ")
pathdiff = PathFromFileNameAndDirpath(dir_path = dirpathdiff,
filename = dt_string_sr +\
"_" + inynameing1 +\
"_" + filename)
ps = PathSteel(dir_path =dbk,
FileName = dt_string_sr +\
"_" + inynameing1 +\
"_" + filename)
dbk_fullpath = ps.refpath()
#get path to orginal location with file name diff
comparetwofile1 = comparetwofile(path_OLD = pathtemp,
path_NEW = fullname,
index_col = None,
usernamein = inynameing1,
pathtcsvtosavedata = getdirpathfromorigin(output1),
difpathtobk = dbk_fullpath,
pathtorgindiff = pathdiff,
dt = dt_string)
comparetwofile1.excel_diff()
def doSomething(self):
if messagebox.askyesno("Exit",
"Do you want to quit the application?"):
self.master.quit()
app = bl()
app.mainloop() | PypiClean |
/pexpect-py3-patch-4.6.0.tar.gz/pexpect-py3-patch-4.6.0/doc/install.rst | Installation
============
Pexpect is on PyPI, and can be installed with standard tools::
pip install pexpect
Or::
easy_install pexpect
Requirements
------------
This version of Pexpect requires Python 3.3 or above, or Python 2.7.
As of version 4.0, Pexpect can be used on Windows and POSIX systems. However,
:class:`pexpect.spawn` and :func:`pexpect.run` are only available on POSIX,
where the :mod:`pty` module is present in the standard library. See
:ref:`windows` for more information.
| PypiClean |
/apexxapi-1.0.5.tar.gz/apexxapi-1.0.5/README.md | This is a simple library of wrapper functions for accessing the most common Apex API 2.0
requests.
The included functions are createCardTransaction, captureCardTransaction, refundCardTransaction, cancelCardTransaction and hostedPaymentPage. Each of
These functions is associated with an API request of the same name. The parameters
listed for each API call are the same as those for the associated functions; view
The documentation for a more in-depth look at the parameters for each function.
A full list of API requests can be found at https://sandmgw.apexxfintech.com/mgw/v2/api/doc#.
| PypiClean |
/torchal-0.0.2-py3-none-any.whl/al_utils/ActiveLearning.py | import numpy as np
import torch
from .Sampling import Sampling, CoreSetMIPSampling
import pycls.utils.logging as lu
import os
logger = lu.get_logger(__name__)
class ActiveLearning:
"""
Implements standard active learning methods.
"""
def __init__(self, dataObj, cfg):
self.dataObj = dataObj
self.sampler = Sampling(dataObj=dataObj, cfg=cfg)
self.cfg = cfg
def sample_from_uSet(
self, clf_model, lSet, uSet, trainDataset, supportingModels=None
):
"""
Sample from uSet using args.sampling_method.
INPUT
------
clf_model: Reference of task classifier model class [Typically VGG]
supportingModels: List of models which are used for sampling process.
OUTPUT
-------
Returns activeSet, uSet
NOTE: args is obtained in class property
"""
assert (
self.cfg.ACTIVE_LEARNING.BUDGET_SIZE > 0
), "Expected a positive budgetSize"
assert self.cfg.ACTIVE_LEARNING.BUDGET_SIZE < len(
uSet
), "BudgetSet cannot exceed length of unlabelled set. Length of unlabelled set: {} and budgetSize: {}".format(
len(uSet), self.cfg.ACTIVE_LEARNING.BUDGET_SIZE
)
if self.cfg.ACTIVE_LEARNING.SAMPLING_FN == "random":
activeSet, uSet = self.sampler.random(
uSet=uSet, budgetSize=self.cfg.ACTIVE_LEARNING.BUDGET_SIZE
)
elif self.cfg.ACTIVE_LEARNING.SAMPLING_FN == "uncertainty":
oldmode = clf_model.training
clf_model.eval()
activeSet, uSet = self.sampler.uncertainty(
budgetSize=self.cfg.ACTIVE_LEARNING.BUDGET_SIZE,
lSet=lSet,
uSet=uSet,
model=clf_model,
dataset=trainDataset,
)
clf_model.train(oldmode)
elif self.cfg.ACTIVE_LEARNING.SAMPLING_FN == "uncertainty_mix":
oldmode = clf_model.training
clf_model.eval()
activeSet, uSet = self.sampler.uncertainty_mix(
budgetSize=self.cfg.ACTIVE_LEARNING.BUDGET_SIZE,
lSet=lSet,
uSet=uSet,
model=clf_model,
dataset=trainDataset,
)
clf_model.train(oldmode)
# torch.cuda.empty_cache()
elif self.cfg.ACTIVE_LEARNING.SAMPLING_FN == "uncertainty_uniform_discretize":
# if self.cfg.MODEL.TYPE == "vgg": clf_model.penultimate_active=False
old_train_mode = clf_model.training
old_penultimate_mode = clf_model.penultimate_active
clf_model.eval()
activeSet, uSet = self.sampler.uncertainty_uniform_discretize(
budgetSize=self.cfg.ACTIVE_LEARNING.BUDGET_SIZE,
lSet=lSet,
uSet=uSet,
model=clf_model,
dataset=trainDataset,
)
clf_model.train(old_train_mode)
clf_model.penultimate_active = old_penultimate_mode
# if self.cfg.MODEL.TYPE == "vgg": clf_model.penultimate_active=True
elif (
self.cfg.ACTIVE_LEARNING.SAMPLING_FN == "centre_of_gravity"
or self.cfg.ACTIVE_LEARNING.SAMPLING_FN == "cog"
):
wastrain = clf_model.training
clf_model.eval()
waslatent = clf_model.penultimate_active
clf_model.penultimate_active = True
activeSet, uSet = self.sampler.centre_of_gravity(
budgetSize=self.cfg.ACTIVE_LEARNING.BUDGET_SIZE,
lSet=lSet,
uSet=uSet,
model=clf_model,
dataset=trainDataset,
istopK=True,
)
clf_model.train(wastrain)
clf_model.penultimate_active = waslatent
elif self.cfg.ACTIVE_LEARNING.SAMPLING_FN == "coreset":
# print("Is MIP Optimization: {}".format(self.args.isMIP))
waslatent = clf_model.penultimate_active
wastrain = clf_model.training
clf_model.penultimate_active = True
if self.cfg.TRAIN.DATASET == "IMAGENET":
clf_model.cuda(0)
clf_model.eval()
coreSetSampler = CoreSetMIPSampling(cfg=self.cfg, dataObj=self.dataObj)
activeSet, uSet = coreSetSampler.query(
lSet=lSet, uSet=uSet, clf_model=clf_model, dataset=trainDataset
)
clf_model.penultimate_active = waslatent
clf_model.train(wastrain)
elif self.cfg.ACTIVE_LEARNING.SAMPLING_FN.lower() == "dbal":
activeSet, uSet = self.sampler.dbal(
budgetSize=self.cfg.ACTIVE_LEARNING.BUDGET_SIZE,
uSet=uSet,
clf_model=clf_model,
dataset=trainDataset,
)
elif self.cfg.ACTIVE_LEARNING.SAMPLING_FN.lower() == "bald":
activeSet, uSet = self.sampler.bald(
budgetSize=self.cfg.ACTIVE_LEARNING.BUDGET_SIZE,
uSet=uSet,
clf_model=clf_model,
dataset=trainDataset,
)
elif self.cfg.ACTIVE_LEARNING.SAMPLING_FN == "ensemble_dbal":
activeSet, uSet = self.sampler.ensemble_dbal(
budgetSize=self.cfg.ACTIVE_LEARNING.BUDGET_SIZE,
uSet=uSet,
clf_models=supportingModels,
dataset=trainDataset,
)
elif self.cfg.ACTIVE_LEARNING.SAMPLING_FN == "ensemble_bald":
activeSet, uSet = self.sampler.ensemble_bald(
budgetSize=self.cfg.ACTIVE_LEARNING.BUDGET_SIZE,
uSet=uSet,
clf_models=supportingModels,
dataset=trainDataset,
)
elif self.cfg.ACTIVE_LEARNING.SAMPLING_FN == "ensemble_var_R":
activeSet, uSet = self.sampler.ensemble_var_R(
budgetSize=self.cfg.ACTIVE_LEARNING.BUDGET_SIZE,
uSet=uSet,
clf_models=supportingModels,
dataset=trainDataset,
)
elif self.cfg.ACTIVE_LEARNING.SAMPLING_FN == "core_gcn":
oldmode = clf_model.training
clf_model.eval()
activeSet, uSet = self.sampler.core_gcn(
budgetSize=self.cfg.ACTIVE_LEARNING.BUDGET_SIZE,
lSet=lSet,
uSet=uSet,
model=clf_model,
dataset=trainDataset,
)
clf_model.train(oldmode)
elif self.cfg.ACTIVE_LEARNING.SAMPLING_FN == "tod":
oldmode = clf_model.training
clf_model.eval()
activeSet, uSet = self.sampler.tod(
budgetSize=self.cfg.ACTIVE_LEARNING.BUDGET_SIZE,
lSet=lSet,
uSet=uSet,
model=clf_model,
dataset=trainDataset,
)
clf_model.train(oldmode)
else:
print(
f"{self.cfg.ACTIVE_LEARNING.SAMPLING_FN} is either not implemented or there is some spelling mistake."
)
raise NotImplementedError
return activeSet, uSet | PypiClean |
/airgen-0.0.1-py3-none-any.whl/third_party/msgpackrpc/tornado/platform/twisted.py | from __future__ import absolute_import, division, print_function
import datetime
import functools
import numbers
import socket
import sys
import twisted.internet.abstract # type: ignore
from twisted.internet.defer import Deferred # type: ignore
from twisted.internet.posixbase import PosixReactorBase # type: ignore
from twisted.internet.interfaces import IReactorFDSet, IDelayedCall, IReactorTime, IReadDescriptor, IWriteDescriptor # type: ignore
from twisted.python import failure, log # type: ignore
from twisted.internet import error # type: ignore
import twisted.names.cache # type: ignore
import twisted.names.client # type: ignore
import twisted.names.hosts # type: ignore
import twisted.names.resolve # type: ignore
from zope.interface import implementer # type: ignore
from msgpackrpc.tornado.concurrent import Future
from msgpackrpc.tornado.escape import utf8
from msgpackrpc.tornado import gen
import msgpackrpc.tornado.ioloop
from msgpackrpc.tornado.log import app_log
from msgpackrpc.tornado.netutil import Resolver
from msgpackrpc.tornado.stack_context import NullContext, wrap
from msgpackrpc.tornado.ioloop import IOLoop
from msgpackrpc.tornado.util import timedelta_to_seconds
@implementer(IDelayedCall)
class TornadoDelayedCall(object):
"""DelayedCall object for Tornado."""
def __init__(self, reactor, seconds, f, *args, **kw):
self._reactor = reactor
self._func = functools.partial(f, *args, **kw)
self._time = self._reactor.seconds() + seconds
self._timeout = self._reactor._io_loop.add_timeout(self._time,
self._called)
self._active = True
def _called(self):
self._active = False
self._reactor._removeDelayedCall(self)
try:
self._func()
except:
app_log.error("_called caught exception", exc_info=True)
def getTime(self):
return self._time
def cancel(self):
self._active = False
self._reactor._io_loop.remove_timeout(self._timeout)
self._reactor._removeDelayedCall(self)
def delay(self, seconds):
self._reactor._io_loop.remove_timeout(self._timeout)
self._time += seconds
self._timeout = self._reactor._io_loop.add_timeout(self._time,
self._called)
def reset(self, seconds):
self._reactor._io_loop.remove_timeout(self._timeout)
self._time = self._reactor.seconds() + seconds
self._timeout = self._reactor._io_loop.add_timeout(self._time,
self._called)
def active(self):
return self._active
@implementer(IReactorTime, IReactorFDSet)
class TornadoReactor(PosixReactorBase):
"""Twisted reactor built on the Tornado IOLoop.
`TornadoReactor` implements the Twisted reactor interface on top of
the Tornado IOLoop. To use it, simply call `install` at the beginning
of the application::
import tornado.platform.twisted
tornado.platform.twisted.install()
from twisted.internet import reactor
When the app is ready to start, call ``IOLoop.current().start()``
instead of ``reactor.run()``.
It is also possible to create a non-global reactor by calling
``tornado.platform.twisted.TornadoReactor(io_loop)``. However, if
the `.IOLoop` and reactor are to be short-lived (such as those used in
unit tests), additional cleanup may be required. Specifically, it is
recommended to call::
reactor.fireSystemEvent('shutdown')
reactor.disconnectAll()
before closing the `.IOLoop`.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
"""
def __init__(self, io_loop=None):
if not io_loop:
io_loop = msgpackrpc.tornado.ioloop.IOLoop.current()
self._io_loop = io_loop
self._readers = {} # map of reader objects to fd
self._writers = {} # map of writer objects to fd
self._fds = {} # a map of fd to a (reader, writer) tuple
self._delayedCalls = {}
PosixReactorBase.__init__(self)
self.addSystemEventTrigger('during', 'shutdown', self.crash)
# IOLoop.start() bypasses some of the reactor initialization.
# Fire off the necessary events if they weren't already triggered
# by reactor.run().
def start_if_necessary():
if not self._started:
self.fireSystemEvent('startup')
self._io_loop.add_callback(start_if_necessary)
# IReactorTime
def seconds(self):
return self._io_loop.time()
def callLater(self, seconds, f, *args, **kw):
dc = TornadoDelayedCall(self, seconds, f, *args, **kw)
self._delayedCalls[dc] = True
return dc
def getDelayedCalls(self):
return [x for x in self._delayedCalls if x._active]
def _removeDelayedCall(self, dc):
if dc in self._delayedCalls:
del self._delayedCalls[dc]
# IReactorThreads
def callFromThread(self, f, *args, **kw):
assert callable(f), "%s is not callable" % f
with NullContext():
# This NullContext is mainly for an edge case when running
# TwistedIOLoop on top of a TornadoReactor.
# TwistedIOLoop.add_callback uses reactor.callFromThread and
# should not pick up additional StackContexts along the way.
self._io_loop.add_callback(f, *args, **kw)
# We don't need the waker code from the super class, Tornado uses
# its own waker.
def installWaker(self):
pass
def wakeUp(self):
pass
# IReactorFDSet
def _invoke_callback(self, fd, events):
if fd not in self._fds:
return
(reader, writer) = self._fds[fd]
if reader:
err = None
if reader.fileno() == -1:
err = error.ConnectionLost()
elif events & IOLoop.READ:
err = log.callWithLogger(reader, reader.doRead)
if err is None and events & IOLoop.ERROR:
err = error.ConnectionLost()
if err is not None:
self.removeReader(reader)
reader.readConnectionLost(failure.Failure(err))
if writer:
err = None
if writer.fileno() == -1:
err = error.ConnectionLost()
elif events & IOLoop.WRITE:
err = log.callWithLogger(writer, writer.doWrite)
if err is None and events & IOLoop.ERROR:
err = error.ConnectionLost()
if err is not None:
self.removeWriter(writer)
writer.writeConnectionLost(failure.Failure(err))
def addReader(self, reader):
if reader in self._readers:
# Don't add the reader if it's already there
return
fd = reader.fileno()
self._readers[reader] = fd
if fd in self._fds:
(_, writer) = self._fds[fd]
self._fds[fd] = (reader, writer)
if writer:
# We already registered this fd for write events,
# update it for read events as well.
self._io_loop.update_handler(fd, IOLoop.READ | IOLoop.WRITE)
else:
with NullContext():
self._fds[fd] = (reader, None)
self._io_loop.add_handler(fd, self._invoke_callback,
IOLoop.READ)
def addWriter(self, writer):
if writer in self._writers:
return
fd = writer.fileno()
self._writers[writer] = fd
if fd in self._fds:
(reader, _) = self._fds[fd]
self._fds[fd] = (reader, writer)
if reader:
# We already registered this fd for read events,
# update it for write events as well.
self._io_loop.update_handler(fd, IOLoop.READ | IOLoop.WRITE)
else:
with NullContext():
self._fds[fd] = (None, writer)
self._io_loop.add_handler(fd, self._invoke_callback,
IOLoop.WRITE)
def removeReader(self, reader):
if reader in self._readers:
fd = self._readers.pop(reader)
(_, writer) = self._fds[fd]
if writer:
# We have a writer so we need to update the IOLoop for
# write events only.
self._fds[fd] = (None, writer)
self._io_loop.update_handler(fd, IOLoop.WRITE)
else:
# Since we have no writer registered, we remove the
# entry from _fds and unregister the handler from the
# IOLoop
del self._fds[fd]
self._io_loop.remove_handler(fd)
def removeWriter(self, writer):
if writer in self._writers:
fd = self._writers.pop(writer)
(reader, _) = self._fds[fd]
if reader:
# We have a reader so we need to update the IOLoop for
# read events only.
self._fds[fd] = (reader, None)
self._io_loop.update_handler(fd, IOLoop.READ)
else:
# Since we have no reader registered, we remove the
# entry from the _fds and unregister the handler from
# the IOLoop.
del self._fds[fd]
self._io_loop.remove_handler(fd)
def removeAll(self):
return self._removeAll(self._readers, self._writers)
def getReaders(self):
return self._readers.keys()
def getWriters(self):
return self._writers.keys()
# The following functions are mainly used in twisted-style test cases;
# it is expected that most users of the TornadoReactor will call
# IOLoop.start() instead of Reactor.run().
def stop(self):
PosixReactorBase.stop(self)
fire_shutdown = functools.partial(self.fireSystemEvent, "shutdown")
self._io_loop.add_callback(fire_shutdown)
def crash(self):
PosixReactorBase.crash(self)
self._io_loop.stop()
def doIteration(self, delay):
raise NotImplementedError("doIteration")
def mainLoop(self):
# Since this class is intended to be used in applications
# where the top-level event loop is ``io_loop.start()`` rather
# than ``reactor.run()``, it is implemented a little
# differently than other Twisted reactors. We override
# ``mainLoop`` instead of ``doIteration`` and must implement
# timed call functionality on top of `.IOLoop.add_timeout`
# rather than using the implementation in
# ``PosixReactorBase``.
self._io_loop.start()
class _TestReactor(TornadoReactor):
"""Subclass of TornadoReactor for use in unittests.
This can't go in the test.py file because of import-order dependencies
with the Twisted reactor test builder.
"""
def __init__(self):
# always use a new ioloop
super(_TestReactor, self).__init__(IOLoop())
def listenTCP(self, port, factory, backlog=50, interface=''):
# default to localhost to avoid firewall prompts on the mac
if not interface:
interface = '127.0.0.1'
return super(_TestReactor, self).listenTCP(
port, factory, backlog=backlog, interface=interface)
def listenUDP(self, port, protocol, interface='', maxPacketSize=8192):
if not interface:
interface = '127.0.0.1'
return super(_TestReactor, self).listenUDP(
port, protocol, interface=interface, maxPacketSize=maxPacketSize)
def install(io_loop=None):
"""Install this package as the default Twisted reactor.
``install()`` must be called very early in the startup process,
before most other twisted-related imports. Conversely, because it
initializes the `.IOLoop`, it cannot be called before
`.fork_processes` or multi-process `~.TCPServer.start`. These
conflicting requirements make it difficult to use `.TornadoReactor`
in multi-process mode, and an external process manager such as
``supervisord`` is recommended instead.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
"""
if not io_loop:
io_loop = msgpackrpc.tornado.ioloop.IOLoop.current()
reactor = TornadoReactor(io_loop)
from twisted.internet.main import installReactor # type: ignore
installReactor(reactor)
return reactor
@implementer(IReadDescriptor, IWriteDescriptor)
class _FD(object):
def __init__(self, fd, fileobj, handler):
self.fd = fd
self.fileobj = fileobj
self.handler = handler
self.reading = False
self.writing = False
self.lost = False
def fileno(self):
return self.fd
def doRead(self):
if not self.lost:
self.handler(self.fileobj, msgpackrpc.tornado.ioloop.IOLoop.READ)
def doWrite(self):
if not self.lost:
self.handler(self.fileobj, msgpackrpc.tornado.ioloop.IOLoop.WRITE)
def connectionLost(self, reason):
if not self.lost:
self.handler(self.fileobj, msgpackrpc.tornado.ioloop.IOLoop.ERROR)
self.lost = True
def logPrefix(self):
return ''
class TwistedIOLoop(msgpackrpc.tornado.ioloop.IOLoop):
"""IOLoop implementation that runs on Twisted.
`TwistedIOLoop` implements the Tornado IOLoop interface on top of
the Twisted reactor. Recommended usage::
from tornado.platform.twisted import TwistedIOLoop
from twisted.internet import reactor
TwistedIOLoop().install()
# Set up your tornado application as usual using `IOLoop.instance`
reactor.run()
Uses the global Twisted reactor by default. To create multiple
``TwistedIOLoops`` in the same process, you must pass a unique reactor
when constructing each one.
Not compatible with `tornado.process.Subprocess.set_exit_callback`
because the ``SIGCHLD`` handlers used by Tornado and Twisted conflict
with each other.
See also :meth:`tornado.ioloop.IOLoop.install` for general notes on
installing alternative IOLoops.
"""
def initialize(self, reactor=None, **kwargs):
super(TwistedIOLoop, self).initialize(**kwargs)
if reactor is None:
import twisted.internet.reactor # type: ignore
reactor = twisted.internet.reactor
self.reactor = reactor
self.fds = {}
def close(self, all_fds=False):
fds = self.fds
self.reactor.removeAll()
for c in self.reactor.getDelayedCalls():
c.cancel()
if all_fds:
for fd in fds.values():
self.close_fd(fd.fileobj)
def add_handler(self, fd, handler, events):
if fd in self.fds:
raise ValueError('fd %s added twice' % fd)
fd, fileobj = self.split_fd(fd)
self.fds[fd] = _FD(fd, fileobj, wrap(handler))
if events & msgpackrpc.tornado.ioloop.IOLoop.READ:
self.fds[fd].reading = True
self.reactor.addReader(self.fds[fd])
if events & msgpackrpc.tornado.ioloop.IOLoop.WRITE:
self.fds[fd].writing = True
self.reactor.addWriter(self.fds[fd])
def update_handler(self, fd, events):
fd, fileobj = self.split_fd(fd)
if events & msgpackrpc.tornado.ioloop.IOLoop.READ:
if not self.fds[fd].reading:
self.fds[fd].reading = True
self.reactor.addReader(self.fds[fd])
else:
if self.fds[fd].reading:
self.fds[fd].reading = False
self.reactor.removeReader(self.fds[fd])
if events & msgpackrpc.tornado.ioloop.IOLoop.WRITE:
if not self.fds[fd].writing:
self.fds[fd].writing = True
self.reactor.addWriter(self.fds[fd])
else:
if self.fds[fd].writing:
self.fds[fd].writing = False
self.reactor.removeWriter(self.fds[fd])
def remove_handler(self, fd):
fd, fileobj = self.split_fd(fd)
if fd not in self.fds:
return
self.fds[fd].lost = True
if self.fds[fd].reading:
self.reactor.removeReader(self.fds[fd])
if self.fds[fd].writing:
self.reactor.removeWriter(self.fds[fd])
del self.fds[fd]
def start(self):
old_current = IOLoop.current(instance=False)
try:
self._setup_logging()
self.make_current()
self.reactor.run()
finally:
if old_current is None:
IOLoop.clear_current()
else:
old_current.make_current()
def stop(self):
self.reactor.crash()
def add_timeout(self, deadline, callback, *args, **kwargs):
# This method could be simplified (since tornado 4.0) by
# overriding call_at instead of add_timeout, but we leave it
# for now as a test of backwards-compatibility.
if isinstance(deadline, numbers.Real):
delay = max(deadline - self.time(), 0)
elif isinstance(deadline, datetime.timedelta):
delay = timedelta_to_seconds(deadline)
else:
raise TypeError("Unsupported deadline %r")
return self.reactor.callLater(
delay, self._run_callback,
functools.partial(wrap(callback), *args, **kwargs))
def remove_timeout(self, timeout):
if timeout.active():
timeout.cancel()
def add_callback(self, callback, *args, **kwargs):
self.reactor.callFromThread(
self._run_callback,
functools.partial(wrap(callback), *args, **kwargs))
def add_callback_from_signal(self, callback, *args, **kwargs):
self.add_callback(callback, *args, **kwargs)
class TwistedResolver(Resolver):
"""Twisted-based asynchronous resolver.
This is a non-blocking and non-threaded resolver. It is
recommended only when threads cannot be used, since it has
limitations compared to the standard ``getaddrinfo``-based
`~tornado.netutil.Resolver` and
`~tornado.netutil.ThreadedResolver`. Specifically, it returns at
most one result, and arguments other than ``host`` and ``family``
are ignored. It may fail to resolve when ``family`` is not
``socket.AF_UNSPEC``.
Requires Twisted 12.1 or newer.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
"""
def initialize(self, io_loop=None):
self.io_loop = io_loop or IOLoop.current()
# partial copy of twisted.names.client.createResolver, which doesn't
# allow for a reactor to be passed in.
self.reactor = msgpackrpc.tornado.platform.twisted.TornadoReactor(io_loop)
host_resolver = twisted.names.hosts.Resolver('/etc/hosts')
cache_resolver = twisted.names.cache.CacheResolver(reactor=self.reactor)
real_resolver = twisted.names.client.Resolver('/etc/resolv.conf',
reactor=self.reactor)
self.resolver = twisted.names.resolve.ResolverChain(
[host_resolver, cache_resolver, real_resolver])
@gen.coroutine
def resolve(self, host, port, family=0):
# getHostByName doesn't accept IP addresses, so if the input
# looks like an IP address just return it immediately.
if twisted.internet.abstract.isIPAddress(host):
resolved = host
resolved_family = socket.AF_INET
elif twisted.internet.abstract.isIPv6Address(host):
resolved = host
resolved_family = socket.AF_INET6
else:
deferred = self.resolver.getHostByName(utf8(host))
resolved = yield gen.Task(deferred.addBoth)
if isinstance(resolved, failure.Failure):
try:
resolved.raiseException()
except twisted.names.error.DomainError as e:
raise IOError(e)
elif twisted.internet.abstract.isIPAddress(resolved):
resolved_family = socket.AF_INET
elif twisted.internet.abstract.isIPv6Address(resolved):
resolved_family = socket.AF_INET6
else:
resolved_family = socket.AF_UNSPEC
if family != socket.AF_UNSPEC and family != resolved_family:
raise Exception('Requested socket family %d but got %d' %
(family, resolved_family))
result = [
(resolved_family, (resolved, port)),
]
raise gen.Return(result)
if hasattr(gen.convert_yielded, 'register'):
@gen.convert_yielded.register(Deferred) # type: ignore
def _(d):
f = Future()
def errback(failure):
try:
failure.raiseException()
# Should never happen, but just in case
raise Exception("errback called without error")
except:
f.set_exc_info(sys.exc_info())
d.addCallbacks(f.set_result, errback)
return f | PypiClean |
/alipay-sdk-python-pycryptodome-3.3.202.tar.gz/alipay-sdk-python-pycryptodome-3.3.202/alipay/aop/api/request/AlipayEcoRenthouseRoomStateSyncRequest.py | import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayEcoRenthouseRoomStateSyncModel import AlipayEcoRenthouseRoomStateSyncModel
class AlipayEcoRenthouseRoomStateSyncRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayEcoRenthouseRoomStateSyncModel):
self._biz_content = value
else:
self._biz_content = AlipayEcoRenthouseRoomStateSyncModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.eco.renthouse.room.state.sync'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params | PypiClean |
/apache_superset_iteco-2.1.1.4-py3-none-any.whl/superset/dashboards/api.py | import functools
import json
import logging
from datetime import datetime
from io import BytesIO
from typing import Any, Callable, cast, Optional
from zipfile import is_zipfile, ZipFile
from flask import make_response, redirect, request, Response, send_file, url_for
from flask_appbuilder import permission_name
from flask_appbuilder.api import expose, protect, rison, safe
from flask_appbuilder.hooks import before_request
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask_babel import gettext, ngettext
from marshmallow import ValidationError
from werkzeug.wrappers import Response as WerkzeugResponse
from werkzeug.wsgi import FileWrapper
from superset import is_feature_enabled, thumbnail_cache
from superset.charts.schemas import ChartEntityResponseSchema
from superset.commands.importers.exceptions import NoValidFilesFoundError
from superset.commands.importers.v1.utils import get_contents_from_bundle
from superset.constants import MODEL_API_RW_METHOD_PERMISSION_MAP, RouteMethod
from superset.dashboards.commands.bulk_delete import BulkDeleteDashboardCommand
from superset.dashboards.commands.create import CreateDashboardCommand
from superset.dashboards.commands.delete import DeleteDashboardCommand
from superset.dashboards.commands.exceptions import (
DashboardAccessDeniedError,
DashboardBulkDeleteFailedError,
DashboardCreateFailedError,
DashboardDeleteFailedError,
DashboardForbiddenError,
DashboardInvalidError,
DashboardNotFoundError,
DashboardUpdateFailedError,
)
from superset.dashboards.commands.export import ExportDashboardsCommand
from superset.dashboards.commands.importers.dispatcher import ImportDashboardsCommand
from superset.dashboards.commands.update import UpdateDashboardCommand
from superset.dashboards.dao import DashboardDAO
from superset.dashboards.filters import (
DashboardAccessFilter,
DashboardCertifiedFilter,
DashboardCreatedByMeFilter,
DashboardFavoriteFilter,
DashboardHasCreatedByFilter,
DashboardTitleOrSlugFilter,
FilterRelatedRoles,
)
from superset.dashboards.schemas import (
DashboardDatasetSchema,
DashboardGetResponseSchema,
DashboardPostSchema,
DashboardPutSchema,
EmbeddedDashboardConfigSchema,
EmbeddedDashboardResponseSchema,
get_delete_ids_schema,
get_export_ids_schema,
get_fav_star_ids_schema,
GetFavStarIdsSchema,
openapi_spec_methods_override,
thumbnail_query_schema,
)
from superset.embedded.dao import EmbeddedDAO
from superset.extensions import event_logger
from superset.models.dashboard import Dashboard
from superset.models.embedded_dashboard import EmbeddedDashboard
from superset.tasks.thumbnails import cache_dashboard_thumbnail
from superset.tasks.utils import get_current_user
from superset.utils.cache import etag_cache
from superset.utils.screenshots import DashboardScreenshot
from superset.utils.urls import get_url_path
from superset.views.base import generate_download_headers
from superset.views.base_api import (
BaseSupersetModelRestApi,
RelatedFieldFilter,
requires_form_data,
requires_json,
statsd_metrics,
)
from superset.views.filters import (
BaseFilterRelatedRoles,
BaseFilterRelatedUsers,
FilterRelatedOwners,
)
logger = logging.getLogger(__name__)
def with_dashboard(
f: Callable[[BaseSupersetModelRestApi, Dashboard], Response]
) -> Callable[[BaseSupersetModelRestApi, str], Response]:
"""
A decorator that looks up the dashboard by id or slug and passes it to the api.
Route must include an <id_or_slug> parameter.
Responds with 403 or 404 without calling the route, if necessary.
"""
def wraps(self: BaseSupersetModelRestApi, id_or_slug: str) -> Response:
try:
dash = DashboardDAO.get_by_id_or_slug(id_or_slug)
return f(self, dash)
except DashboardAccessDeniedError:
return self.response_403()
except DashboardNotFoundError:
return self.response_404()
return functools.update_wrapper(wraps, f)
class DashboardRestApi(BaseSupersetModelRestApi):
datamodel = SQLAInterface(Dashboard)
@before_request(only=["thumbnail"])
def ensure_thumbnails_enabled(self) -> Optional[Response]:
if not is_feature_enabled("THUMBNAILS"):
return self.response_404()
return None
include_route_methods = RouteMethod.REST_MODEL_VIEW_CRUD_SET | {
RouteMethod.EXPORT,
RouteMethod.IMPORT,
RouteMethod.RELATED,
"bulk_delete", # not using RouteMethod since locally defined
"favorite_status",
"get_charts",
"get_datasets",
"get_embedded",
"set_embedded",
"delete_embedded",
"thumbnail",
}
resource_name = "dashboard"
allow_browser_login = True
class_permission_name = "Dashboard"
method_permission_name = MODEL_API_RW_METHOD_PERMISSION_MAP
list_columns = [
"id",
"published",
"status",
"slug",
"url",
"css",
"position_json",
"json_metadata",
"thumbnail_url",
"certified_by",
"certification_details",
"changed_by.first_name",
"changed_by.last_name",
"changed_by.username",
"changed_by.id",
"changed_by_name",
"changed_by_url",
"changed_on_utc",
"changed_on_delta_humanized",
"created_on_delta_humanized",
"created_by.first_name",
"created_by.id",
"created_by.last_name",
"dashboard_title",
"owners.id",
"owners.username",
"owners.first_name",
"owners.last_name",
"owners.email",
"roles.id",
"roles.name",
"is_managed_externally",
]
list_select_columns = list_columns + ["changed_on", "created_on", "changed_by_fk"]
order_columns = [
"changed_by.first_name",
"changed_on_delta_humanized",
"created_by.first_name",
"dashboard_title",
"published",
"changed_on",
]
add_columns = [
"certified_by",
"certification_details",
"dashboard_title",
"slug",
"owners",
"roles",
"position_json",
"css",
"json_metadata",
"published",
]
edit_columns = add_columns
search_columns = (
"created_by",
"changed_by",
"dashboard_title",
"id",
"owners",
"published",
"roles",
"slug",
)
search_filters = {
"dashboard_title": [DashboardTitleOrSlugFilter],
"id": [DashboardFavoriteFilter, DashboardCertifiedFilter],
"created_by": [DashboardCreatedByMeFilter, DashboardHasCreatedByFilter],
}
base_order = ("changed_on", "desc")
add_model_schema = DashboardPostSchema()
edit_model_schema = DashboardPutSchema()
chart_entity_response_schema = ChartEntityResponseSchema()
dashboard_get_response_schema = DashboardGetResponseSchema()
dashboard_dataset_schema = DashboardDatasetSchema()
embedded_response_schema = EmbeddedDashboardResponseSchema()
embedded_config_schema = EmbeddedDashboardConfigSchema()
base_filters = [
["id", DashboardAccessFilter, lambda: []],
]
order_rel_fields = {
"slices": ("slice_name", "asc"),
"owners": ("first_name", "asc"),
"roles": ("name", "asc"),
}
base_related_field_filters = {
"owners": [["id", BaseFilterRelatedUsers, lambda: []]],
"created_by": [["id", BaseFilterRelatedUsers, lambda: []]],
"roles": [["id", BaseFilterRelatedRoles, lambda: []]],
}
related_field_filters = {
"owners": RelatedFieldFilter("first_name", FilterRelatedOwners),
"roles": RelatedFieldFilter("name", FilterRelatedRoles),
"created_by": RelatedFieldFilter("first_name", FilterRelatedOwners),
}
allowed_rel_fields = {"owners", "roles", "created_by"}
openapi_spec_tag = "Dashboards"
""" Override the name set for this collection of endpoints """
openapi_spec_component_schemas = (
ChartEntityResponseSchema,
DashboardGetResponseSchema,
DashboardDatasetSchema,
GetFavStarIdsSchema,
EmbeddedDashboardResponseSchema,
)
apispec_parameter_schemas = {
"get_delete_ids_schema": get_delete_ids_schema,
"get_export_ids_schema": get_export_ids_schema,
"thumbnail_query_schema": thumbnail_query_schema,
"get_fav_star_ids_schema": get_fav_star_ids_schema,
}
openapi_spec_methods = openapi_spec_methods_override
""" Overrides GET methods OpenApi descriptions """
def __repr__(self) -> str:
"""Deterministic string representation of the API instance for etag_cache."""
return "Superset.dashboards.api.DashboardRestApi@v{}{}".format(
self.appbuilder.app.config["VERSION_STRING"],
self.appbuilder.app.config["VERSION_SHA"],
)
@expose("/<id_or_slug>", methods=["GET"])
@protect()
@etag_cache(
get_last_modified=lambda _self, id_or_slug: DashboardDAO.get_dashboard_changed_on( # pylint: disable=line-too-long,useless-suppression
id_or_slug
),
max_age=0,
raise_for_access=lambda _self, id_or_slug: DashboardDAO.get_by_id_or_slug(
id_or_slug
),
skip=lambda _self, id_or_slug: not is_feature_enabled("DASHBOARD_CACHE"),
)
@safe
@statsd_metrics
@with_dashboard
@event_logger.log_this_with_extra_payload
# pylint: disable=arguments-differ
def get(
self,
dash: Dashboard,
add_extra_log_payload: Callable[..., None] = lambda **kwargs: None,
) -> Response:
"""Gets a dashboard
---
get:
description: >-
Get a dashboard
parameters:
- in: path
schema:
type: string
name: id_or_slug
description: Either the id of the dashboard, or its slug
responses:
200:
description: Dashboard
content:
application/json:
schema:
type: object
properties:
result:
$ref: '#/components/schemas/DashboardGetResponseSchema'
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
403:
$ref: '#/components/responses/403'
404:
$ref: '#/components/responses/404'
"""
result = self.dashboard_get_response_schema.dump(dash)
add_extra_log_payload(
dashboard_id=dash.id, action=f"{self.__class__.__name__}.get"
)
return self.response(200, result=result)
@expose("/<id_or_slug>/datasets", methods=["GET"])
@protect()
@etag_cache(
get_last_modified=lambda _self, id_or_slug: DashboardDAO.get_dashboard_and_datasets_changed_on( # pylint: disable=line-too-long,useless-suppression
id_or_slug
),
max_age=0,
raise_for_access=lambda _self, id_or_slug: DashboardDAO.get_by_id_or_slug(
id_or_slug
),
skip=lambda _self, id_or_slug: not is_feature_enabled("DASHBOARD_CACHE"),
)
@safe
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.get_datasets",
log_to_statsd=False,
)
def get_datasets(self, id_or_slug: str) -> Response:
"""Gets a dashboard's datasets
---
get:
description: >-
Returns a list of a dashboard's datasets. Each dataset includes only
the information necessary to render the dashboard's charts.
parameters:
- in: path
schema:
type: string
name: id_or_slug
description: Either the id of the dashboard, or its slug
responses:
200:
description: Dashboard dataset definitions
content:
application/json:
schema:
type: object
properties:
result:
type: array
items:
$ref: '#/components/schemas/DashboardDatasetSchema'
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
403:
$ref: '#/components/responses/403'
404:
$ref: '#/components/responses/404'
"""
try:
datasets = DashboardDAO.get_datasets_for_dashboard(id_or_slug)
result = [
self.dashboard_dataset_schema.dump(dataset) for dataset in datasets
]
return self.response(200, result=result)
except (TypeError, ValueError) as err:
return self.response_400(
message=gettext(
"Dataset schema is invalid, caused by: %(error)s", error=str(err)
)
)
except DashboardAccessDeniedError:
return self.response_403()
except DashboardNotFoundError:
return self.response_404()
@expose("/<id_or_slug>/charts", methods=["GET"])
@protect()
@etag_cache(
get_last_modified=lambda _self, id_or_slug: DashboardDAO.get_dashboard_and_slices_changed_on( # pylint: disable=line-too-long,useless-suppression
id_or_slug
),
max_age=0,
raise_for_access=lambda _self, id_or_slug: DashboardDAO.get_by_id_or_slug(
id_or_slug
),
skip=lambda _self, id_or_slug: not is_feature_enabled("DASHBOARD_CACHE"),
)
@safe
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.get_charts",
log_to_statsd=False,
)
def get_charts(self, id_or_slug: str) -> Response:
"""Gets the chart definitions for a given dashboard
---
get:
description: >-
Get the chart definitions for a given dashboard
parameters:
- in: path
schema:
type: string
name: id_or_slug
responses:
200:
description: Dashboard chart definitions
content:
application/json:
schema:
type: object
properties:
result:
type: array
items:
$ref: '#/components/schemas/ChartEntityResponseSchema'
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
403:
$ref: '#/components/responses/403'
404:
$ref: '#/components/responses/404'
"""
try:
charts = DashboardDAO.get_charts_for_dashboard(id_or_slug)
result = [self.chart_entity_response_schema.dump(chart) for chart in charts]
if is_feature_enabled("REMOVE_SLICE_LEVEL_LABEL_COLORS"):
# dashboard metadata has dashboard-level label_colors,
# so remove slice-level label_colors from its form_data
for chart in result:
form_data = chart.get("form_data")
form_data.pop("label_colors", None)
return self.response(200, result=result)
except DashboardAccessDeniedError:
return self.response_403()
except DashboardNotFoundError:
return self.response_404()
@expose("/", methods=["POST"])
@protect()
@safe
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.post",
log_to_statsd=False,
)
@requires_json
def post(self) -> Response:
"""Creates a new Dashboard
---
post:
description: >-
Create a new Dashboard.
requestBody:
description: Dashboard schema
required: true
content:
application/json:
schema:
$ref: '#/components/schemas/{{self.__class__.__name__}}.post'
responses:
201:
description: Dashboard added
content:
application/json:
schema:
type: object
properties:
id:
type: number
result:
$ref: '#/components/schemas/{{self.__class__.__name__}}.post'
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
404:
$ref: '#/components/responses/404'
500:
$ref: '#/components/responses/500'
"""
try:
item = self.add_model_schema.load(request.json)
# This validates custom Schema with custom validations
except ValidationError as error:
return self.response_400(message=error.messages)
try:
new_model = CreateDashboardCommand(item).run()
return self.response(201, id=new_model.id, result=item)
except DashboardInvalidError as ex:
return self.response_422(message=ex.normalized_messages())
except DashboardCreateFailedError as ex:
logger.error(
"Error creating model %s: %s",
self.__class__.__name__,
str(ex),
exc_info=True,
)
return self.response_422(message=str(ex))
@expose("/<pk>", methods=["PUT"])
@protect()
@safe
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.put",
log_to_statsd=False,
)
@requires_json
def put(self, pk: int) -> Response:
"""Changes a Dashboard
---
put:
description: >-
Changes a Dashboard.
parameters:
- in: path
schema:
type: integer
name: pk
requestBody:
description: Dashboard schema
required: true
content:
application/json:
schema:
$ref: '#/components/schemas/{{self.__class__.__name__}}.put'
responses:
200:
description: Dashboard changed
content:
application/json:
schema:
type: object
properties:
id:
type: number
result:
$ref: '#/components/schemas/{{self.__class__.__name__}}.put'
last_modified_time:
type: number
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
403:
$ref: '#/components/responses/403'
404:
$ref: '#/components/responses/404'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
try:
item = self.edit_model_schema.load(request.json)
# This validates custom Schema with custom validations
except ValidationError as error:
return self.response_400(message=error.messages)
try:
changed_model = UpdateDashboardCommand(pk, item).run()
last_modified_time = changed_model.changed_on.replace(
microsecond=0
).timestamp()
response = self.response(
200,
id=changed_model.id,
result=item,
last_modified_time=last_modified_time,
)
except DashboardNotFoundError:
response = self.response_404()
except DashboardForbiddenError:
response = self.response_403()
except DashboardInvalidError as ex:
return self.response_422(message=ex.normalized_messages())
except DashboardUpdateFailedError as ex:
logger.error(
"Error updating model %s: %s",
self.__class__.__name__,
str(ex),
exc_info=True,
)
response = self.response_422(message=str(ex))
return response
@expose("/<pk>", methods=["DELETE"])
@protect()
@safe
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.delete",
log_to_statsd=False,
)
def delete(self, pk: int) -> Response:
"""Deletes a Dashboard
---
delete:
description: >-
Deletes a Dashboard.
parameters:
- in: path
schema:
type: integer
name: pk
responses:
200:
description: Dashboard deleted
content:
application/json:
schema:
type: object
properties:
message:
type: string
401:
$ref: '#/components/responses/401'
403:
$ref: '#/components/responses/403'
404:
$ref: '#/components/responses/404'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
try:
DeleteDashboardCommand(pk).run()
return self.response(200, message="OK")
except DashboardNotFoundError:
return self.response_404()
except DashboardForbiddenError:
return self.response_403()
except DashboardDeleteFailedError as ex:
logger.error(
"Error deleting model %s: %s",
self.__class__.__name__,
str(ex),
exc_info=True,
)
return self.response_422(message=str(ex))
@expose("/", methods=["DELETE"])
@protect()
@safe
@statsd_metrics
@rison(get_delete_ids_schema)
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.bulk_delete",
log_to_statsd=False,
)
def bulk_delete(self, **kwargs: Any) -> Response:
"""Delete bulk Dashboards
---
delete:
description: >-
Deletes multiple Dashboards in a bulk operation.
parameters:
- in: query
name: q
content:
application/json:
schema:
$ref: '#/components/schemas/get_delete_ids_schema'
responses:
200:
description: Dashboard bulk delete
content:
application/json:
schema:
type: object
properties:
message:
type: string
401:
$ref: '#/components/responses/401'
403:
$ref: '#/components/responses/403'
404:
$ref: '#/components/responses/404'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
item_ids = kwargs["rison"]
try:
BulkDeleteDashboardCommand(item_ids).run()
return self.response(
200,
message=ngettext(
"Deleted %(num)d dashboard",
"Deleted %(num)d dashboards",
num=len(item_ids),
),
)
except DashboardNotFoundError:
return self.response_404()
except DashboardForbiddenError:
return self.response_403()
except DashboardBulkDeleteFailedError as ex:
return self.response_422(message=str(ex))
@expose("/export/", methods=["GET"])
@protect()
@safe
@statsd_metrics
@rison(get_export_ids_schema)
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.export",
log_to_statsd=False,
) # pylint: disable=too-many-locals
def export(self, **kwargs: Any) -> Response:
"""Export dashboards
---
get:
description: >-
Exports multiple Dashboards and downloads them as YAML files.
parameters:
- in: query
name: q
content:
application/json:
schema:
$ref: '#/components/schemas/get_export_ids_schema'
responses:
200:
description: Dashboard export
content:
text/plain:
schema:
type: string
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
404:
$ref: '#/components/responses/404'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
requested_ids = kwargs["rison"]
token = request.args.get("token")
if is_feature_enabled("VERSIONED_EXPORT"):
timestamp = datetime.now().strftime("%Y%m%dT%H%M%S")
root = f"dashboard_export_{timestamp}"
filename = f"{root}.zip"
buf = BytesIO()
with ZipFile(buf, "w") as bundle:
try:
for file_name, file_content in ExportDashboardsCommand(
requested_ids
).run():
with bundle.open(f"{root}/{file_name}", "w") as fp:
fp.write(file_content.encode())
except DashboardNotFoundError:
return self.response_404()
buf.seek(0)
response = send_file(
buf,
mimetype="application/zip",
as_attachment=True,
attachment_filename=filename,
)
if token:
response.set_cookie(token, "done", max_age=600)
return response
query = self.datamodel.session.query(Dashboard).filter(
Dashboard.id.in_(requested_ids)
)
query = self._base_filters.apply_all(query)
ids = [item.id for item in query.all()]
if not ids:
return self.response_404()
export = Dashboard.export_dashboards(ids)
resp = make_response(export, 200)
resp.headers["Content-Disposition"] = generate_download_headers("json")[
"Content-Disposition"
]
if token:
resp.set_cookie(token, "done", max_age=600)
return resp
@expose("/<pk>/thumbnail/<digest>/", methods=["GET"])
@protect()
@safe
@rison(thumbnail_query_schema)
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.thumbnail",
log_to_statsd=False,
)
def thumbnail(self, pk: int, digest: str, **kwargs: Any) -> WerkzeugResponse:
"""Get Dashboard thumbnail
---
get:
description: >-
Compute async or get already computed dashboard thumbnail from cache.
parameters:
- in: path
schema:
type: integer
name: pk
- in: path
name: digest
description: A hex digest that makes this dashboard unique
schema:
type: string
- in: query
name: q
content:
application/json:
schema:
$ref: '#/components/schemas/thumbnail_query_schema'
responses:
200:
description: Dashboard thumbnail image
content:
image/*:
schema:
type: string
format: binary
202:
description: Thumbnail does not exist on cache, fired async to compute
content:
application/json:
schema:
type: object
properties:
message:
type: string
302:
description: Redirects to the current digest
401:
$ref: '#/components/responses/401'
404:
$ref: '#/components/responses/404'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
dashboard = cast(Dashboard, self.datamodel.get(pk, self._base_filters))
if not dashboard:
return self.response_404()
dashboard_url = get_url_path(
"Superset.dashboard", dashboard_id_or_slug=dashboard.id
)
# If force, request a screenshot from the workers
current_user = get_current_user()
if kwargs["rison"].get("force", False):
cache_dashboard_thumbnail.delay(
current_user=current_user,
dashboard_id=dashboard.id,
force=True,
)
return self.response(202, message="OK Async")
# fetch the dashboard screenshot using the current user and cache if set
screenshot = DashboardScreenshot(
dashboard_url, dashboard.digest
).get_from_cache(cache=thumbnail_cache)
# If the screenshot does not exist, request one from the workers
if not screenshot:
self.incr_stats("async", self.thumbnail.__name__)
cache_dashboard_thumbnail.delay(
current_user=current_user,
dashboard_id=dashboard.id,
force=True,
)
return self.response(202, message="OK Async")
# If digests
if dashboard.digest != digest:
self.incr_stats("redirect", self.thumbnail.__name__)
return redirect(
url_for(
f"{self.__class__.__name__}.thumbnail",
pk=pk,
digest=dashboard.digest,
)
)
self.incr_stats("from_cache", self.thumbnail.__name__)
return Response(
FileWrapper(screenshot), mimetype="image/png", direct_passthrough=True
)
@expose("/favorite_status/", methods=["GET"])
@protect()
@safe
@statsd_metrics
@rison(get_fav_star_ids_schema)
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}"
f".favorite_status",
log_to_statsd=False,
)
def favorite_status(self, **kwargs: Any) -> Response:
"""Favorite Stars for Dashboards
---
get:
description: >-
Check favorited dashboards for current user
parameters:
- in: query
name: q
content:
application/json:
schema:
$ref: '#/components/schemas/get_fav_star_ids_schema'
responses:
200:
description:
content:
application/json:
schema:
$ref: "#/components/schemas/GetFavStarIdsSchema"
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
404:
$ref: '#/components/responses/404'
500:
$ref: '#/components/responses/500'
"""
requested_ids = kwargs["rison"]
dashboards = DashboardDAO.find_by_ids(requested_ids)
if not dashboards:
return self.response_404()
favorited_dashboard_ids = DashboardDAO.favorited_ids(dashboards)
res = [
{"id": request_id, "value": request_id in favorited_dashboard_ids}
for request_id in requested_ids
]
return self.response(200, result=res)
@expose("/import/", methods=["POST"])
@protect()
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.import_",
log_to_statsd=False,
)
@requires_form_data
def import_(self) -> Response:
"""Import dashboard(s) with associated charts/datasets/databases
---
post:
requestBody:
required: true
content:
multipart/form-data:
schema:
type: object
properties:
formData:
description: upload file (ZIP or JSON)
type: string
format: binary
passwords:
description: >-
JSON map of passwords for each featured database in the
ZIP file. If the ZIP includes a database config in the path
`databases/MyDatabase.yaml`, the password should be provided
in the following format:
`{"databases/MyDatabase.yaml": "my_password"}`.
type: string
overwrite:
description: overwrite existing dashboards?
type: boolean
responses:
200:
description: Dashboard import result
content:
application/json:
schema:
type: object
properties:
message:
type: string
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
upload = request.files.get("formData")
if not upload:
return self.response_400()
if is_zipfile(upload):
with ZipFile(upload) as bundle:
contents = get_contents_from_bundle(bundle)
else:
upload.seek(0)
contents = {upload.filename: upload.read()}
if not contents:
raise NoValidFilesFoundError()
passwords = (
json.loads(request.form["passwords"])
if "passwords" in request.form
else None
)
overwrite = request.form.get("overwrite") == "true"
command = ImportDashboardsCommand(
contents, passwords=passwords, overwrite=overwrite
)
command.run()
return self.response(200, message="OK")
@expose("/<id_or_slug>/embedded", methods=["GET"])
@protect()
@safe
@permission_name("read")
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.get_embedded",
log_to_statsd=False,
)
@with_dashboard
def get_embedded(self, dashboard: Dashboard) -> Response:
"""Response
Returns the dashboard's embedded configuration
---
get:
description: >-
Returns the dashboard's embedded configuration
parameters:
- in: path
schema:
type: string
name: id_or_slug
description: The dashboard id or slug
responses:
200:
description: Result contains the embedded dashboard config
content:
application/json:
schema:
type: object
properties:
result:
$ref: '#/components/schemas/EmbeddedDashboardResponseSchema'
401:
$ref: '#/components/responses/401'
500:
$ref: '#/components/responses/500'
"""
if not dashboard.embedded:
return self.response(404)
embedded: EmbeddedDashboard = dashboard.embedded[0]
result = self.embedded_response_schema.dump(embedded)
return self.response(200, result=result)
@expose("/<id_or_slug>/embedded", methods=["POST", "PUT"])
@protect()
@safe
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.set_embedded",
log_to_statsd=False,
)
@with_dashboard
def set_embedded(self, dashboard: Dashboard) -> Response:
"""Response
Sets a dashboard's embedded configuration.
---
post:
description: >-
Sets a dashboard's embedded configuration.
parameters:
- in: path
schema:
type: string
name: id_or_slug
description: The dashboard id or slug
requestBody:
description: The embedded configuration to set
required: true
content:
application/json:
schema: EmbeddedDashboardConfigSchema
responses:
200:
description: Successfully set the configuration
content:
application/json:
schema:
type: object
properties:
result:
$ref: '#/components/schemas/EmbeddedDashboardResponseSchema'
401:
$ref: '#/components/responses/401'
500:
$ref: '#/components/responses/500'
put:
description: >-
Sets a dashboard's embedded configuration.
parameters:
- in: path
schema:
type: string
name: id_or_slug
description: The dashboard id or slug
requestBody:
description: The embedded configuration to set
required: true
content:
application/json:
schema: EmbeddedDashboardConfigSchema
responses:
200:
description: Successfully set the configuration
content:
application/json:
schema:
type: object
properties:
result:
$ref: '#/components/schemas/EmbeddedDashboardResponseSchema'
401:
$ref: '#/components/responses/401'
500:
$ref: '#/components/responses/500'
"""
try:
body = self.embedded_config_schema.load(request.json)
embedded = EmbeddedDAO.upsert(dashboard, body["allowed_domains"])
result = self.embedded_response_schema.dump(embedded)
return self.response(200, result=result)
except ValidationError as error:
return self.response_400(message=error.messages)
@expose("/<id_or_slug>/embedded", methods=["DELETE"])
@protect()
@safe
@permission_name("set_embedded")
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.delete_embedded",
log_to_statsd=False,
)
@with_dashboard
def delete_embedded(self, dashboard: Dashboard) -> Response:
"""Response
Removes a dashboard's embedded configuration.
---
delete:
description: >-
Removes a dashboard's embedded configuration.
parameters:
- in: path
schema:
type: string
name: id_or_slug
description: The dashboard id or slug
responses:
200:
description: Successfully removed the configuration
content:
application/json:
schema:
type: object
properties:
message:
type: string
401:
$ref: '#/components/responses/401'
500:
$ref: '#/components/responses/500'
"""
for embedded in dashboard.embedded:
DashboardDAO.delete(embedded)
return self.response(200, message="OK") | PypiClean |
/tvb-rest-client-2.8.1.tar.gz/tvb-rest-client-2.8.1/tvb/core/services/operation_service.py | import json
import os
import sys
import uuid
import zipfile
from inspect import isclass
from tvb.basic.exceptions import TVBException
from tvb.basic.logger.builder import get_logger
from tvb.basic.profile import TvbProfile
from tvb.config import MEASURE_METRICS_MODULE, MEASURE_METRICS_CLASS, MEASURE_METRICS_MODEL_CLASS, ALGORITHMS
from tvb.core.adapters.abcadapter import ABCAdapter, AdapterLaunchModeEnum
from tvb.core.adapters.exceptions import LaunchException
from tvb.core.entities.generic_attributes import GenericAttributes
from tvb.core.entities.load import get_class_by_name
from tvb.core.entities.model.model_burst import PARAM_RANGE_PREFIX, RANGE_PARAMETER_1, RANGE_PARAMETER_2, \
BurstConfiguration
from tvb.core.entities.model.model_datatype import DataTypeGroup
from tvb.core.entities.model.model_operation import STATUS_FINISHED, STATUS_ERROR, Operation
from tvb.core.entities.storage import dao, transactional
from tvb.core.neocom import h5
from tvb.core.neotraits.h5 import ViewModelH5
from tvb.core.services.backend_client_factory import BackendClientFactory
from tvb.core.services.burst_service import BurstService
from tvb.core.services.exceptions import OperationException
from tvb.core.services.project_service import ProjectService
from tvb.datatypes.time_series import TimeSeries
from tvb.storage.storage_interface import StorageInterface
RANGE_PARAMETER_1 = RANGE_PARAMETER_1
RANGE_PARAMETER_2 = RANGE_PARAMETER_2
GROUP_BURST_PENDING = {}
class OperationService:
"""
Class responsible for preparing an operation launch.
It will prepare parameters, and decide if the operation is to be executed
immediately, or to be sent on the cluster.
"""
ATT_UID = "uid"
def __init__(self):
self.logger = get_logger(self.__class__.__module__)
self.storage_interface = StorageInterface()
##########################################################################################
######## Methods related to launching operations start here ##############################
##########################################################################################
def fits_max_operation_size(self, adapter_instance, view_model, project_id, range_length=1):
project = dao.get_project_by_id(project_id)
if project.max_operation_size is None:
return True
adapter_instance.configure(view_model)
adapter_required_memory = adapter_instance.get_required_disk_size(view_model)
return adapter_required_memory * range_length < project.max_operation_size
def initiate_operation(self, current_user, project, adapter_instance, visible=True, model_view=None):
"""
Gets the parameters of the computation from the previous inputs form,
and launches a computation (on the cluster or locally).
Invoke custom method on an Adapter Instance. Make sure when the
operation has finished that the correct results are stored into DB.
"""
if not isinstance(adapter_instance, ABCAdapter):
self.logger.warning("Inconsistent Adapter Class:" + str(adapter_instance.__class__))
raise LaunchException("Developer Exception!!")
algo = adapter_instance.stored_adapter
operation = self.prepare_operation(current_user.id, project, algo, visible, model_view)
if adapter_instance.launch_mode == AdapterLaunchModeEnum.SYNC_SAME_MEM:
return self.initiate_prelaunch(operation, adapter_instance)
else:
return self._send_to_cluster(operation, adapter_instance, current_user.username)
@staticmethod
def prepare_metadata(algo_category, burst=None, current_ga=GenericAttributes()):
"""
Gather generic_metadata from submitted fields and current to be execute algorithm.
Will populate STATE, GROUP, etc in generic_metadata
"""
generic_metadata = GenericAttributes()
generic_metadata.state = algo_category.defaultdatastate
generic_metadata.parent_burst = burst
generic_metadata.fill_from(current_ga)
return generic_metadata
@staticmethod
def _read_set(values):
""" Parse a committed UI possible list of values, into a set converted into string."""
if isinstance(values, list):
set_values = []
values_str = ""
for val in values:
if val not in set_values:
set_values.append(val)
values_str = values_str + " " + str(val)
values = values_str
return str(values).strip()
def group_operation_launch(self, user_id, project, algorithm_id, category_id):
"""
Create and prepare the launch of a group of operations.
"""
algorithm = dao.get_algorithm_by_id(algorithm_id)
ops, _ = self.prepare_operation(user_id, project, algorithm)
for operation in ops:
self.launch_operation(operation.id, True)
def _prepare_metric_operation(self, sim_operation):
# type: (Operation) -> Operation
metric_algo = dao.get_algorithm_by_module(MEASURE_METRICS_MODULE, MEASURE_METRICS_CLASS)
datatype_index = h5.REGISTRY.get_index_for_datatype(TimeSeries)
time_series_index = dao.get_generic_entity(datatype_index, sim_operation.id, 'fk_from_operation')[0]
ga = self.prepare_metadata(metric_algo.algorithm_category, time_series_index.fk_parent_burst)
ga.visible = False
view_model = get_class_by_name("{}.{}".format(MEASURE_METRICS_MODULE, MEASURE_METRICS_MODEL_CLASS))()
view_model.time_series = time_series_index.gid
view_model.algorithms = tuple(ALGORITHMS.keys())
view_model.generic_attributes = ga
parent_burst = dao.get_generic_entity(BurstConfiguration, time_series_index.fk_parent_burst, 'gid')[0]
metric_op_group = dao.get_operationgroup_by_id(parent_burst.fk_metric_operation_group)
metric_operation_group_id = parent_burst.fk_metric_operation_group
range_values = sim_operation.range_values
view_model.operation_group_gid = uuid.UUID(metric_op_group.gid)
view_model.ranges = json.dumps(parent_burst.ranges)
view_model.range_values = range_values
view_model.is_metric_operation = True
metric_operation = Operation(view_model.gid.hex, sim_operation.fk_launched_by, sim_operation.fk_launched_in,
metric_algo.id, user_group=ga.operation_tag, op_group_id=metric_operation_group_id,
range_values=range_values)
metric_operation.visible = False
metric_operation = dao.store_entity(metric_operation)
metrics_datatype_group = dao.get_generic_entity(DataTypeGroup, metric_operation_group_id,
'fk_operation_group')[0]
if metrics_datatype_group.fk_from_operation is None:
metrics_datatype_group.fk_from_operation = metric_operation.id
dao.store_entity(metrics_datatype_group)
self.store_view_model(metric_operation, sim_operation.project, view_model)
return metric_operation
@transactional
def prepare_operation(self, user_id, project, algorithm, visible=True, view_model=None, ranges=None,
burst_gid=None, op_group_id=None):
"""
Do all the necessary preparations for storing an operation. If it's the case of a
range of values create an operation group and multiple operations for each possible
instance from the range.
"""
algo_category = dao.get_category_by_id(algorithm.fk_category)
ga = self.prepare_metadata(algo_category, current_ga=view_model.generic_attributes, burst=burst_gid)
ga.visible = visible
view_model.generic_attributes = ga
self.logger.debug("Saving Operation(userId=" + str(user_id) + ",projectId=" + str(project.id) +
",algorithmId=" + str(algorithm.id) + ")")
operation = Operation(view_model.gid.hex, user_id, project.id, algorithm.id, user_group=ga.operation_tag,
op_group_id=op_group_id, range_values=ranges)
operation = dao.store_entity(operation)
self.store_view_model(operation, project, view_model)
return operation
@staticmethod
def store_view_model(operation, project, view_model):
storage_path = StorageInterface().get_project_folder(project.name, str(operation.id))
h5.store_view_model(view_model, storage_path)
view_model_size_on_disk = StorageInterface.compute_recursive_h5_disk_usage(storage_path)
operation.view_model_disk_size = view_model_size_on_disk
dao.store_entity(operation)
def initiate_prelaunch(self, operation, adapter_instance):
"""
Public method.
This should be the common point in calling an adapter- method.
"""
result_msg = ""
nr_datatypes = 0
temp_files = []
try:
operation = dao.get_operation_by_id(operation.id) # Load Lazy fields
disk_space_per_user = TvbProfile.current.MAX_DISK_SPACE
pending_op_disk_space = dao.compute_disk_size_for_started_ops(operation.fk_launched_by)
user_disk_space = dao.compute_user_generated_disk_size(operation.fk_launched_by) # From kB to Bytes
available_space = disk_space_per_user - pending_op_disk_space - user_disk_space
view_model = adapter_instance.load_view_model(operation)
try:
form = adapter_instance.get_form()
form = form() if isclass(form) else form
fields = form.get_upload_field_names()
project = dao.get_project_by_id(operation.fk_launched_in)
tmp_folder = self.storage_interface.get_temp_folder(project.name)
for upload_field in fields:
if hasattr(view_model, upload_field):
file = getattr(view_model, upload_field)
if file.startswith(tmp_folder) or file.startswith(TvbProfile.current.TVB_TEMP_FOLDER):
temp_files.append(file)
except AttributeError:
# Skip if we don't have upload fields on current form
pass
result_msg, nr_datatypes = adapter_instance._prelaunch(operation, view_model, available_space)
operation = dao.get_operation_by_id(operation.id)
operation.mark_complete(STATUS_FINISHED)
dao.store_entity(operation)
self._update_vm_generic_operation_tag(view_model, operation)
self._remove_files(temp_files)
except zipfile.BadZipfile as excep:
msg = "The uploaded file is not a valid ZIP!"
self._handle_exception(excep, temp_files, msg, operation)
except TVBException as excep:
self._handle_exception(excep, temp_files, excep.message, operation)
except MemoryError:
msg = ("Could not execute operation because there is not enough free memory." +
" Please adjust operation parameters and re-launch it.")
self._handle_exception(Exception(msg), temp_files, msg, operation)
except Exception as excep1:
msg = "Could not launch Operation with the given input data!"
self._handle_exception(excep1, temp_files, msg, operation)
if operation.fk_operation_group and 'SimulatorAdapter' in operation.algorithm.classname and nr_datatypes == 1:
next_op = self._prepare_metric_operation(operation)
self.launch_operation(next_op.id)
return result_msg
def _send_to_cluster(self, operation, adapter_instance, current_username="unknown"):
""" Initiate operation on cluster"""
try:
BackendClientFactory.execute(str(operation.id), current_username, adapter_instance)
except TVBException as ex:
self._handle_exception(ex, {}, ex.message, operation)
except Exception as excep:
self._handle_exception(excep, {}, "Could not start operation!", operation)
return operation
@staticmethod
def _update_vm_generic_operation_tag(view_model, operation):
project = dao.get_project_by_id(operation.fk_launched_in)
h5_path = h5.path_for(operation.id, ViewModelH5, view_model.gid, project.name, type(view_model).__name__)
if not os.path.exists(h5_path):
return
with ViewModelH5(h5_path, view_model) as vm_h5:
vm_h5.operation_tag.store(operation.user_group)
def launch_operation(self, operation_id, send_to_cluster=False, adapter_instance=None):
"""
Method exposed for Burst-Workflow related calls.
It is used for cascading operation in the same workflow.
"""
if operation_id is not None:
operation = dao.get_operation_by_id(operation_id)
if adapter_instance is None:
algorithm = operation.algorithm
adapter_instance = ABCAdapter.build_adapter(algorithm)
if send_to_cluster:
self._send_to_cluster(operation, adapter_instance, operation.user.username)
else:
self.initiate_prelaunch(operation, adapter_instance)
def _handle_exception(self, exception, temp_files, message, operation=None):
"""
Common way to treat exceptions:
- remove temporary files, if any
- set status ERROR on current operation (if any)
- log exception
"""
self.logger.exception(message)
if operation is not None:
BurstService().persist_operation_state(operation, STATUS_ERROR, str(exception))
self._remove_files(temp_files)
exception.message = message
raise exception.with_traceback(
sys.exc_info()[2]) # when rethrowing in python this is required to preserve the stack trace
def _remove_files(self, file_list):
"""
Remove any files that exist in the file_dictionary.
Currently used to delete temporary files created during an operation.
"""
for pth in file_list:
if pth is not None:
pth = str(pth)
try:
if os.path.exists(pth) and os.path.isfile(pth):
os.remove(pth)
if len(os.listdir(os.path.dirname(pth))) == 0:
self.storage_interface.remove_folder(os.path.dirname(pth))
self.logger.debug("We no longer need file:" + pth + " => deleted")
else:
self.logger.warning("Trying to remove not existent file:" + pth)
except OSError:
self.logger.exception("Could not cleanup file!")
@staticmethod
def _range_name(range_no):
return PARAM_RANGE_PREFIX + str(range_no)
def fire_operation(self, adapter_instance, current_user, project_id, visible=True, view_model=None):
"""
Launch an operation, specified by AdapterInstance, for current_user and project with project_id.
"""
operation_name = str(adapter_instance.__class__.__name__)
try:
self.logger.info("Starting operation " + operation_name)
project = dao.get_project_by_id(project_id)
result = self.initiate_operation(current_user, project, adapter_instance, visible,
model_view=view_model)
self.logger.info("Finished operation launch:" + operation_name)
return result
except TVBException as excep:
self.logger.exception("Could not launch operation " + operation_name +
" with the given set of input data, because: " + excep.message)
raise OperationException(excep.message, excep)
except Exception as excep:
self.logger.exception("Could not launch operation " + operation_name + " with the given set of input data!")
raise OperationException(str(excep))
@staticmethod
def load_operation(operation_id):
""" Retrieve previously stored Operation from DB, and load operation.burst attribute"""
operation = dao.get_operation_by_id(operation_id)
operation.burst = dao.get_burst_for_operation_id(operation_id)
return operation
@staticmethod
def stop_operation(operation_id, is_group=False, remove_after_stop=False):
# type: (int, bool, bool) -> bool
"""
Stop (also named Cancel) the operation given by operation_id,
and potentially also remove it after (with all linked data).
In case the Operation has a linked Burst, remove that too.
:param operation_id: ID for Operation (or OperationGroup) to be canceled/removed
:param is_group: When true stop all the operations from that group.
:param remove_after_stop: if True, also remove the operation(s) after stopping
:returns True if the stop step was successfully
"""
result = False
if is_group:
op_group = ProjectService.get_operation_group_by_id(operation_id)
operations_in_group = ProjectService.get_operations_in_group(op_group)
for operation in operations_in_group:
result = OperationService.stop_operation(operation.id, False, remove_after_stop) or result
elif dao.try_get_operation_by_id(operation_id) is not None:
result = BackendClientFactory.stop_operation(operation_id)
if remove_after_stop:
burst_config = dao.get_burst_for_direct_operation_id(operation_id)
ProjectService().remove_operation(operation_id)
if burst_config is not None:
result = dao.remove_entity(BurstConfiguration, burst_config.id) or result
return result | PypiClean |
/Telethon-v1.24-1.24.8.tar.gz/Telethon-v1.24-1.24.8/telethon/tl/functions/langpack.py | from ...tl.tlobject import TLObject
from ...tl.tlobject import TLRequest
from typing import Optional, List, Union, TYPE_CHECKING
import os
import struct
from datetime import datetime
class GetDifferenceRequest(TLRequest):
CONSTRUCTOR_ID = 0xcd984aa5
SUBCLASS_OF_ID = 0x52662d55
def __init__(self, lang_pack: str, lang_code: str, from_version: int):
"""
:returns LangPackDifference: Instance of LangPackDifference.
"""
self.lang_pack = lang_pack
self.lang_code = lang_code
self.from_version = from_version
def to_dict(self):
return {
'_': 'GetDifferenceRequest',
'lang_pack': self.lang_pack,
'lang_code': self.lang_code,
'from_version': self.from_version
}
def _bytes(self):
return b''.join((
b'\xa5J\x98\xcd',
self.serialize_bytes(self.lang_pack),
self.serialize_bytes(self.lang_code),
struct.pack('<i', self.from_version),
))
@classmethod
def from_reader(cls, reader):
_lang_pack = reader.tgread_string()
_lang_code = reader.tgread_string()
_from_version = reader.read_int()
return cls(lang_pack=_lang_pack, lang_code=_lang_code, from_version=_from_version)
class GetLangPackRequest(TLRequest):
CONSTRUCTOR_ID = 0xf2f2330a
SUBCLASS_OF_ID = 0x52662d55
def __init__(self, lang_pack: str, lang_code: str):
"""
:returns LangPackDifference: Instance of LangPackDifference.
"""
self.lang_pack = lang_pack
self.lang_code = lang_code
def to_dict(self):
return {
'_': 'GetLangPackRequest',
'lang_pack': self.lang_pack,
'lang_code': self.lang_code
}
def _bytes(self):
return b''.join((
b'\n3\xf2\xf2',
self.serialize_bytes(self.lang_pack),
self.serialize_bytes(self.lang_code),
))
@classmethod
def from_reader(cls, reader):
_lang_pack = reader.tgread_string()
_lang_code = reader.tgread_string()
return cls(lang_pack=_lang_pack, lang_code=_lang_code)
class GetLanguageRequest(TLRequest):
CONSTRUCTOR_ID = 0x6a596502
SUBCLASS_OF_ID = 0xabac89b7
def __init__(self, lang_pack: str, lang_code: str):
"""
:returns LangPackLanguage: Instance of LangPackLanguage.
"""
self.lang_pack = lang_pack
self.lang_code = lang_code
def to_dict(self):
return {
'_': 'GetLanguageRequest',
'lang_pack': self.lang_pack,
'lang_code': self.lang_code
}
def _bytes(self):
return b''.join((
b'\x02eYj',
self.serialize_bytes(self.lang_pack),
self.serialize_bytes(self.lang_code),
))
@classmethod
def from_reader(cls, reader):
_lang_pack = reader.tgread_string()
_lang_code = reader.tgread_string()
return cls(lang_pack=_lang_pack, lang_code=_lang_code)
class GetLanguagesRequest(TLRequest):
CONSTRUCTOR_ID = 0x42c6978f
SUBCLASS_OF_ID = 0x280912c9
def __init__(self, lang_pack: str):
"""
:returns Vector<LangPackLanguage>: This type has no constructors.
"""
self.lang_pack = lang_pack
def to_dict(self):
return {
'_': 'GetLanguagesRequest',
'lang_pack': self.lang_pack
}
def _bytes(self):
return b''.join((
b'\x8f\x97\xc6B',
self.serialize_bytes(self.lang_pack),
))
@classmethod
def from_reader(cls, reader):
_lang_pack = reader.tgread_string()
return cls(lang_pack=_lang_pack)
class GetStringsRequest(TLRequest):
CONSTRUCTOR_ID = 0xefea3803
SUBCLASS_OF_ID = 0xc7b7353d
def __init__(self, lang_pack: str, lang_code: str, keys: List[str]):
"""
:returns Vector<LangPackString>: This type has no constructors.
"""
self.lang_pack = lang_pack
self.lang_code = lang_code
self.keys = keys
def to_dict(self):
return {
'_': 'GetStringsRequest',
'lang_pack': self.lang_pack,
'lang_code': self.lang_code,
'keys': [] if self.keys is None else self.keys[:]
}
def _bytes(self):
return b''.join((
b'\x038\xea\xef',
self.serialize_bytes(self.lang_pack),
self.serialize_bytes(self.lang_code),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.keys)),b''.join(self.serialize_bytes(x) for x in self.keys),
))
@classmethod
def from_reader(cls, reader):
_lang_pack = reader.tgread_string()
_lang_code = reader.tgread_string()
reader.read_int()
_keys = []
for _ in range(reader.read_int()):
_x = reader.tgread_string()
_keys.append(_x)
return cls(lang_pack=_lang_pack, lang_code=_lang_code, keys=_keys) | PypiClean |
/cs46_laky_trees-1.0.0.tar.gz/cs46_laky_trees-1.0.0/Trees/Heap.py | from Trees.BinaryTree import BinaryTree, Node
class Heap(BinaryTree):
'''
FIXME:
Heap is currently not a subclass of BinaryTree.
You should make the necessary changes in the class declaration line above
and in the constructor below.
'''
def __init__(self, xs=None):
'''
FIXME:
If xs is a list (i.e. xs is not None),
then each element of xs needs to be inserted into the Heap.
'''
self.root = None
if xs:
self.insert_list(xs)
def __repr__(self):
'''
Notice that in the BinaryTree class,
we defined a __str__ function,
but not a __repr__ function.
Recall that the __repr__ function should return a string that can be used to recreate a valid instance of the class.
Thus, if you create a variable using the command Heap([1,2,3])
it's __repr__ will return "Heap([1,2,3])"
For the Heap, type(self).__name__ will be the string "Heap",
but for the AVLTree, this expression will be "AVLTree".
Using this expression ensures that all subclasses of Heap will have a correct implementation of __repr__,
and that they won't have to reimplement it.
'''
return type(self).__name__+'('+str(self.to_list('inorder'))+')'
def is_heap_satisfied(self):
'''
Whenever you implement a data structure,
the first thing to do is to implement a function that checks whether
the structure obeys all of its laws.
This makes it possible to automatically test whether insert/delete functions
are actually working.
'''
if self.root:
return Heap._is_heap_satisfied(self.root)
return True
@staticmethod
def _is_heap_satisfied(node):
'''
FIXME:
Implement this method.
The lecture videos have the exact code you need,
except that their method is an instance method when it should have been a static method.
'''
if node is None or (node.left is None and node.right is None):
return True
elif node.right is None:
return node.value <= node.left.value
elif node.value <= node.left.value and node.value <= node.right.value:
return Heap._is_heap_satisfied(node.left) and Heap._is_heap_satisfied(node.right)
else:
return False
def insert(self, value):
'''
Inserts value into the heap.
'''
if self.root is None:
self.root = Node(value)
self.root.descendents = 1
else:
Heap._insert(value, self.root)
@staticmethod
def _insert(value, node):
'''
FIXME:
Implement this function.
'''
Heap._helpinsert(value, node)
@staticmethod
def size(node):
if node is None:
return 0
stack=[]
stack.append(node)
size=1
while stack:
node=stack.pop()
if node.left:
size+=1
stack.append(node.left)
if node.right:
size+=1
stack.append(node.right)
return size
@staticmethod
def _helpinsert(value, node):
'''
FIXME:
Implement this function.
'''
if node.left is None:
new_node = Node(value)
node.left = new_node
elif node.right is None:
new_node = Node(value)
node.right = new_node
else:
left = Heap.size(node.left)
right = Heap.size(node.right)
new_node = node.left if left <= right else node.right
new_node = Heap._helpinsert(value, new_node)
if new_node.value < node.value:
tmp = new_node.value
new_node.value = node.value
node.value = tmp
return node
def insert_list(self, xs):
'''
Given a list xs, insert each element of xs into self.
FIXME:
Implement this function.
'''
for x in xs:
self.insert(x)
def find_smallest(self):
'''
Returns the smallest value in the tree.
FIXME:
Implement this function.
This function is not implemented in the lecture notes,
but if you understand the structure of a Heap it should be easy to implement.
HINT:
Create a recursive staticmethod helper function,
similar to how the insert and find functions have recursive helpers.
'''
if Heap.is_heap_satisfied(self):
return self.root.value
@staticmethod
def _find_smallest(node):
if node is None:
return
else:
return node.value
def remove_min(self):
'''
Removes the minimum value from the Heap.
If the heap is empty, it does nothing.
FIXME:
Implement this function.
'''
if self.root is None or (self.root.left is None and self.root.right is None):
self.root = None
return self.root
else:
return Heap._remove(self.root)
@staticmethod
def _min(node):
if node.right is None and node.left is None:
val = node.value
return val
elif node.right is None:
val = node.left.value
return val
else:
left = Heap.size(node.left)
right = Heap.size(node.right)
if left > right:
return Heap._min(node.left)
else:
return Heap._min(node.right)
@staticmethod
def _other(node):
if node.left is None or node.right is None:
pass
elif node.left.value == "other method":
node.left = None
elif node.right.value == "other method":
node.right = None
else:
left = Heap.size(node.left)
right = Heap.size(node.right)
if left > right:
return Heap._other(node.left)
else:
return Heap._other(node.right)
@staticmethod
def _find_min(node):
if node.right is None and node.left is None:
node.value = "other method"
return node
elif node.right is None:
node.left = None
return node
else:
left = Heap.size(node.left)
right = Heap.size(node.right)
if left > right:
return Heap._find_min(node.left)
else:
return Heap._find_min(node.right)
@staticmethod
def _remove(node):
val = Heap._min(node)
Heap._find_min(node)
node.value = val
Heap._other(node)
Heap._search(node.value, node)
return node
@staticmethod
def _search(value, node):
if Heap._is_heap_satisfied(node):
return
else:
if node.right is None and node.left is None:
return node
elif node.right is None:
if node.value <= node.left.value:
return node
else:
tmp_node = node.value
node.value = node.left.value
node.left.value = tmp_node
else:
if node.left.value < node.right.value:
tmp_node = node.value
node.value = node.left.value
node.left.value = tmp_node
return Heap._search(value, node.left)
else:
tmp_node = node.value
node.value = node.right.value
node.right.value = tmp_node
return Heap._search(value, node.right) | PypiClean |
/sleipnir.transport-0.1.0.tar.gz/sleipnir.transport-0.1.0/src/sleipnir/transport/adapters/pyside.py | from __future__ import absolute_import
__author__ = "Carlos Martin <[email protected]>"
__license__ = "See LICENSE file for details"
# Import here any required modules.
from itertools import ifilter
__all__ = ['PySideReconnectionStrategy', 'PySideConnection']
# Pyside requirements
from PySide.QtCore import QObject, QSocketNotifier, QCoreApplication, QTimer
from PySide.QtNetwork import QNetworkConfigurationManager, QNetworkSession
# Project requirements
from sleipnir.core.decorators import cached
#Pika requirements
from pika.reconnection_strategies import ReconnectionStrategy
from pika.adapters.base_connection import BaseConnection, READ, WRITE
class PySideReconnectionStrategy(ReconnectionStrategy):
can_reconnect = True
def __init__(self):
self.manager = QNetworkConfigurationManager()
self.session = self._session()
self.manager.onlineStateChanged.connect(self._connect)
def _session(self):
return QNetworkSession(self.manager.defaultConfiguration())
def _connect(self, is_connected):
if is_connected is False:
# create a new session
self.session = self._session()
# start session if required
caps = self.manager.capabilities()
if caps & QNetworkConfigurationManager.CanStartAndStopInterfaces:
self.session.open()
self.session.waitForOpened(-1)
def on_connect_attempt(self, conn):
self._connect(self.manager.isOnline())
def on_connection_open(self, conn):
caps = self.manager.capabilities()
if caps & QNetworkConfigurationManager.ForcedRoaming:
reconnect = conn.force_reconnect
self.session.newConfigurationActivated.connect(reconnect)
def on_connection_closed(self, conn):
conn._reconnect() if not self.is_active else None
class PySideTimer(object):
def __init__(self, container, callback, single_shot):
self.callback = callback
self.single_shot = single_shot
self.first_run = False
def register(self, pool, deadline):
timeout_id = pool.startTimer(deadline)
pool.timers[timeout_id] = self
return timeout_id
def unregister(self, pool, timeout_id):
pool.killTimer(timeout_id)
del pool[timeout_id]
def __call__(self, pool, timeout_id):
self.callback()
if self.single_shot:
self.unregister(pool, timeout_id)
class PySideConnectionPoller(QObject):
def __init__(self, connection):
# Set container
self.parent = connection
def __iter__(self):
return iter((self.reader, self.writer,))
def _connect(self, notifier_type, callback):
notifier = QSocketNotifier(self.parent.fileno, notifier_type)
notifier.activated.connect(callback)
notifier.setEnabled(False)
return notifier
def _read(self, _):
self.parent._handle_read()
self.parent._manage_event_state()
def _write(self, _):
self.parent._handle_write()
self.parent._manage_event_state()
def _error(self, _):
self.parent._handle_disconnect()
def poll(self):
# Create Notifiers
self.reader = self._connect(QSocketNotifier.Read, self._read)
self.writer = self._connect(QSocketNotifier.Write, self._write)
# Create Error watcher
self.errors = self._connect(QSocketNotifier.Exception, self._error)
self.errors.setEnabled(True)
# update handlers
self.parent.ioloop.update_handler(None, self.parent.event_state)
def unpoll(self):
self.reader = self.writer = self.errors = None
class PySideConnection(BaseConnection):
def __iter__(self):
return iter(self.notifiers)
def _adapter_connect(self):
# Connect (blockignly!) to the server
BaseConnection._adapter_connect(self)
self.event_state |= WRITE
# Setup the IOLoop
self.ioloop = IOLoop(self.notifiers)
# Let everyone know we're connected
self._on_connected()
def _flush_outbound(self):
self._manage_event_state()
@property
def fileno(self):
return self.socket.fileno()
@property
@cached
def notifiers(self):
return PySideConnectionPoller(self)
class IOLoop(QObject):
def __init__(self, poller):
self.poller = poller
self.timers = {}
def timerEvent(self, event):
self.timers[event.timerId()](self, timeout_id)
def add_timeout(self, deadline, callback, oneshot=False):
deadline = deadline - time.time()
return PySideTimer(self, callback, oneshot).register(self, deadline)
def add_soft_timeout(self, min_time, max_time, callback, oneshot=False):
raise NotImplementedError
def remove_timeout(self, handler):
self.timers[timeout_id].unregister(self, handler)
def stop(self):
[timer.unregister(self, key) for key, timer in self.timers]
QTimer.singleShot(0, self.poller.unpoll)
self.exec_ and QCoreApplication.instance().quit()
def start(self, exec_=True):
self.exec_ = exec_
QTimer.singleShot(0, self.poller.poll)
self.exec_ and QCoreApplication.instance().exec_()
def remove_handler(self, fdn=None):
[notifier.setEnabled(False) for notifier in self.poller]
def update_handler(self, fdn, event_state):
self.remove_handler()
# update notifiers state
if event_state & READ:
self.poller.reader.setEnabled(True)
if event_state & WRITE:
self.poller.writer.setEnabled(True) | PypiClean |
/snapper-ont-0.4.5.tar.gz/snapper-ont-0.4.5/snapper/src/methods.py | import numpy as np
from itertools import combinations, product
import re
from snapper.src.seq_processing import gen_variants, letter_codes_rev, letter_anticodes
from scipy.stats import chi2_contingency, mode
from tqdm import tqdm
from multiprocessing import Process, Manager
regular_letters = ['A','G','C','T']
non_regular_letters = ['M', 'R', 'W', 'S', 'Y','K', 'V', 'H', 'D','B']
def filter_pos_variants_l3(pos_variants):
filtered_pos_variants = []
for pos_variant in pos_variants:
_p = sorted(pos_variant)
if _p[-1] - _p[0] >= 6:
continue
if tuple(pos_variant) not in filtered_pos_variants:
filtered_pos_variants.append(tuple(_p))
return filtered_pos_variants
def filter_pos_variants(pos_variants):
# custom filtering for pos_variants with length of 3
if len(pos_variants[0]) == 3:
return filter_pos_variants_l3(pos_variants)
filtered_pos_variants = []
for pos_variant in pos_variants:
_p = sorted(pos_variant)
if _p[-1] - _p[0] >= 6:
continue
filtered_pos_variants.append(_p)
_2_filtered_pos_variants = []
for pos_variant in filtered_pos_variants:
#for i in range(1, len(pos_variant) - 1):
# if (pos_variant[i] - pos_variant[i-1] > 1) and (pos_variant[i+1] - pos_variant[i] > 1):
# continue
#
if tuple(pos_variant) in _2_filtered_pos_variants:
continue
#if pos_variant[1] - pos_variant[0] > 1 or pos_variant[-1] - pos_variant[-2] > 1:
# continue
_2_filtered_pos_variants.append(tuple(pos_variant))
return _2_filtered_pos_variants
def filter_motifs(motif_variants):
filtered_motifs = []
for motif in motif_variants:
if 'C' not in motif and 'A' not in motif:
continue
filtered_motifs.append(motif)
return filtered_motifs
def extract_template_subset(pos_variant, motif_variant, seq_array):
subseq = seq_array
for i in range(len(pos_variant)):
if motif_variant[i] == '.':
continue
subseq = subseq[subseq[:,pos_variant[i]] == motif_variant[i]]
return subseq
def extract_template_count(pos_variant, motif_variant, seq_array):
subseq = seq_array
for i in range(len(pos_variant)):
subseq = subseq[subseq[:,pos_variant[i]] == motif_variant[i]]
return len(subseq)
def gen_regexp_template(motif_variant, pos_variant, length=6):
template = ['.',]*length
base_pos = pos_variant[0]
for i, pos in enumerate(pos_variant):
template[pos-base_pos] = motif_variant[i]
return ''.join(template)
def normalized_variation(array):
return np.std(array)/np.mean(array)
def local_filter_seqs(seqs, pos_variant, motif_variant):
new_seqs = []
template = ''.join(motif_variant)
template = template.replace('.', 'N')
template_subvariants = gen_variants(template)
for s in seqs:
str_vec = ''.join([s[i] for i in pos_variant])
if str_vec in template_subvariants:
continue
new_seqs.append(s)
return new_seqs
def modify_seq(seq, pos, target_letter):
newseq = list(seq)
newseq[pos] = target_letter
return ''.join(newseq)
def generate_reference_freqs_parallel(seq_array, batch, dict_per_length):
for pos_variant, motif_variant in batch:
variant_count = extract_template_count(pos_variant, motif_variant, seq_array)
dict_per_length[(motif_variant, pos_variant)] = variant_count
def generate_reference_freqs(reference, length, threads, lengths=(4,5,6)):
variants_counter = {}
seqs = list(set([
reference[i:i+length] for i in range(len(reference) - length)
]))
seq_array = np.array([list(s) for s in seqs])
print(len(seq_array))
for LENGTH in lengths:
print('Reference indexing with length of {}...'.format(LENGTH))
manager = Manager()
dict_per_length = manager.dict()
pos_variants = list(combinations(range(0,length), r=LENGTH))
pos_variants = filter_pos_variants(pos_variants)
motif_variants = list(product(regular_letters, repeat=LENGTH))
motif_variants = filter_motifs(motif_variants)
batch_len = len(pos_variants)*len(motif_variants)//threads
processes = [] #all processes
for i in range(threads+1):
try:
batch = list(product(pos_variants, motif_variants))[(i)*batch_len:(i+1)*batch_len]
except IndexError:
batch = list(product(pos_variants, motif_variants))[(i)*batch_len:]
p = Process(target=generate_reference_freqs_parallel,
args = (seq_array, batch, dict_per_length,))
processes.append(p)
p.start()
#join processes
[p.join() for p in processes]
variants_counter[LENGTH] = dict(dict_per_length)
return variants_counter, len(seq_array)
def add_N(motif):
if motif[0] != 'N':
motif = 'N' + motif
if motif[-1] != 'N':
motif += 'N'
return motif
def is_superset(motif1, motif2, edgelength=2):
motif1 = add_N(motif1)
motif2 = add_N(motif2)
if len(motif2) <= len(motif1):
extended_motif1 = motif1
else:
extended_motif1 = 'N' * edgelength + motif1 + 'N' * edgelength
motif1_variants = gen_variants(extended_motif1)
motif2_variatns = gen_variants(motif2)
global_match = True
for variant2 in motif2_variatns:
match = False
for variant1 in motif1_variants:
if variant2 in variant1:
match = True
break
if match == False:
global_match = False
break
return global_match
def get_alternate_variants(motif_variant, lenmotif=11, range_of_filtering=5):
seq_variant, pos_variant = motif_variant[1], motif_variant[2]
while seq_variant[0] == 'N':
seq_variant = seq_variant[1:]
pos_variant = pos_variant[1:]
while seq_variant[-1] == 'N':
seq_variant = seq_variant[:-1]
pos_variant = pos_variant[:-1]
alternate_variants = []
for i in range(
max(0, pos_variant[0] - range_of_filtering),
min(lenmotif, pos_variant[-1] + range_of_filtering)
):
shift = i - pos_variant[0]
pos_alternate = tuple(j+shift for j in pos_variant)
if pos_alternate[-1] >= lenmotif:
break
alternate_variants.append((motif_variant[0], seq_variant, pos_alternate))
return alternate_variants
def is_subset(motif1, motif2, edgelength=2):
return is_superset(motif2, motif1, edgelength=edgelength)
def variant_counts_parallel(seq_array, ref_motifs_counter, N_REF, batch, LENGTH, total_variants_counter_list):
variants_counter_list = []
N_VARIANT = len(seq_array)
for pos_variant, motif_variant in batch:
try:
reference_count = ref_motifs_counter[LENGTH][(motif_variant, pos_variant)]
except KeyError:
variants_counter_list.append((0, motif_variant, pos_variant))
else:
variant_count = extract_template_count(pos_variant, motif_variant, seq_array)
if variant_count == 0 and reference_count == 0:
variants_counter_list.append((0, motif_variant, pos_variant))
else:
chi2_result = chi2_contingency(
[
[variant_count, N_VARIANT-variant_count],
[reference_count, N_REF-reference_count],
]
)
# chi2_log_pval = -np.log10(chi2_result[1])
chi2_statistic = chi2_result[0]
variants_counter_list.append((chi2_statistic, motif_variant, pos_variant))
total_variants_counter_list+=variants_counter_list
def collect_variant_counts(seq_array, ref_motifs_counter, N_REF, threads, lengths=(4,5,6), lenmotif=11):
merged_variants_counter_list = []
for LENGTH in lengths:
print('\tOBSERVING ANCHOR MOTIFS WITH LENGTH OF', LENGTH)
pos_variants = list(combinations(range(0,lenmotif), r=LENGTH))
pos_variants = filter_pos_variants(pos_variants)
motif_variants = list(product(regular_letters, repeat=LENGTH))
motif_variants = filter_motifs(motif_variants)
#create batch
batch_len = len(pos_variants)*len(motif_variants)//threads
total_variants_counter_list = Manager().list() #for all outputs
processes = [] #all processes
args_list = list(product(pos_variants, motif_variants))
for i in range(threads+1):
try:
batch = args_list[i*batch_len:(i+1)*batch_len]
except IndexError:
batch = args_list[i*batch_len:]
p = Process(target=variant_counts_parallel,
args = (seq_array, ref_motifs_counter, N_REF, batch, LENGTH, total_variants_counter_list))
processes.append(p)
p.start()
[p.join() for p in processes]
merged_variants_counter_list+=list(total_variants_counter_list) # add to
merged_variants_counter_list.sort(reverse=True)
return merged_variants_counter_list
def get_significant_letters(sub_seq_array, top_variant, pos, reference, threshold_ratio):
print('\tLocal motif adjustment...')
reference_letter_freqs = {'A':0, 'G':0, 'T':0, 'C':0}
variant_subset_letter_freqs = {'A':0, 'G':0, 'T':0, 'C':0}
ref_vs_variant_ratios = {'A':0, 'G':0, 'T':0, 'C':0}
variant_length = (top_variant[2][-1] - top_variant[2][0] + 1)
re_variant = gen_regexp_template(top_variant[1], top_variant[2], length=variant_length)
pos_letters = sub_seq_array[:,pos]
for letter in reference_letter_freqs:
re_variant_mod = modify_seq(re_variant, pos-top_variant[2][0], letter)
ref_letter_count = len(re.findall(re_variant_mod, reference))
variant_subset_letter_count = len(pos_letters[pos_letters == letter])
reference_letter_freqs[letter] += ref_letter_count
variant_subset_letter_freqs[letter] += variant_subset_letter_count
list_variant_letter_freqs = [
(variant_subset_letter_freqs[k], k) for k in variant_subset_letter_freqs
]
list_variant_letter_freqs.sort(reverse=True)
# consider the first letter to be presented apriori
the_first_letter = list_variant_letter_freqs[0][1]
ref_vs_variant_ratios[the_first_letter] = 1
significant_letters = set([the_first_letter])
for record in list_variant_letter_freqs[1:]:
try:
ref_letter_ratio = reference_letter_freqs[the_first_letter]/reference_letter_freqs[record[1]]
except ZeroDivisionError:
ref_letter_ratio = np.inf
try:
variant_subset_letter_ratio = variant_subset_letter_freqs[the_first_letter]/variant_subset_letter_freqs[record[1]]
except ZeroDivisionError:
variant_subset_letter_ratio = np.inf
ref_vs_variant_ratio = variant_subset_letter_ratio/ref_letter_ratio
ref_vs_variant_ratios[record[1]] = round(ref_vs_variant_ratio, 4)
if ref_vs_variant_ratio > threshold_ratio:
break
significant_letters.add(record[1])
return tuple(sorted(list(significant_letters)))
def adjust_letter(seq_array, top_variant, pos, reference, threshold_ratio=5):
sub_seq_array = extract_template_subset(top_variant[2], top_variant[1], seq_array)
pos_letters = get_significant_letters(sub_seq_array, top_variant, pos, reference, threshold_ratio=threshold_ratio)
return letter_codes_rev[pos_letters]
def change_subset_motif(supermotif, submotif, edgelength=2):
extended_supermotif = 'N'*edgelength + ''.join(supermotif[1]) + 'N'*edgelength
super_variants = gen_variants(extended_supermotif)
sub_variants = gen_variants(''.join(submotif[1]))
shifts = []
for subvariant in sub_variants:
for supervariant in super_variants:
if subvariant in supervariant:
shift = edgelength - supervariant.find(subvariant)
shifts.append(shift)
shift = mode(shifts).mode[0]
left_pos = max(0, submotif[2][0] + shift)
right_pos = min(11, submotif[2][0] + shift + len(supermotif[2]))
# check left edge case
if shift < 0:
adjusted_subvariant = (
submotif[0],
supermotif[1][-shift:],
tuple(range(submotif[2][0], submotif[2][0] + len(supermotif[1][-shift:])))
)
# check rigth edge case
elif submotif[1][-1] in regular_letters and submotif[2][-1] == 10 and supermotif[1][-1] == 'N':
adjusted_subvariant = (
submotif[0],
supermotif[1][:-1],
tuple(range(left_pos, 11))
)
# common case
else:
adjusted_subvariant = (
submotif[0],
supermotif[1],
tuple(range(left_pos, right_pos))
)
# just a patch, must be formalized!!
if len(adjusted_subvariant[1]) != len(adjusted_subvariant[2]):
adjusted_subvariant = [
submotif[0],
supermotif[1],
tuple(range(left_pos, left_pos + len(supermotif[1])))
]
while adjusted_subvariant[2][-1] > 10:
adjusted_subvariant[1] = adjusted_subvariant[1][:-1]
adjusted_subvariant[2] = adjusted_subvariant[2][:-1]
return tuple(adjusted_subvariant)
def extend_template(top_variant, maxlength=11):
extended_top_variant = [ top_variant[0], list(top_variant[1]), list(top_variant[2])]
if top_variant[2][0] != 0:
extended_top_variant[2] = [extended_top_variant[2][0] - 1] + extended_top_variant[2]
extended_top_variant[1] = ['.'] + extended_top_variant[1]
if top_variant[2][-1] != maxlength-1:
extended_top_variant[2] = extended_top_variant[2] + [extended_top_variant[2][-1] + 1]
extended_top_variant[1] = extended_top_variant[1] + ['.']
variant_length = (extended_top_variant[2][-1] - extended_top_variant[2][0] + 1)
re_variant = gen_regexp_template(extended_top_variant[1], extended_top_variant[2], length=variant_length)
extended_top_variant = (
top_variant[0],
tuple(re_variant),
list(range(extended_top_variant[2][0], extended_top_variant[2][-1] + 1))
)
return extended_top_variant
def save_results (motifs, out_fasta):
with open(out_fasta, 'w') as f:
cnt = 1
for m in motifs:
f.write('>MOTIF_{} conflevel={}\n{}\n'.format(cnt, m[0], ''.join(m[1])))
cnt += 1
def save_k_mers (motifs, out_fasta):
with open(out_fasta, 'w') as f:
cnt = 1
for m in motifs:
f.write('>MOTIF_{}\n{}\n'.format(cnt, m))
cnt += 1 | PypiClean |
/etna_ts-1.3.1-py3-none-any.whl/etna/transforms/change_points_trend.py | from copy import deepcopy
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
from typing import Type
import numpy as np
import pandas as pd
from ruptures.base import BaseEstimator
from ruptures.costs import CostLinear
from sklearn.base import RegressorMixin
from etna.transforms.base import PerSegmentWrapper
from etna.transforms.base import Transform
TTimestampInterval = Tuple[pd.Timestamp, pd.Timestamp]
TDetrendModel = Type[RegressorMixin]
class _OneSegmentChangePointsTrendTransform(Transform):
"""_OneSegmentChangePointsTransform subtracts multiple linear trend from series."""
def __init__(
self,
in_column: str,
change_point_model: BaseEstimator,
detrend_model: TDetrendModel,
**change_point_model_predict_params,
):
"""Init _OneSegmentChangePointsTrendTransform.
Parameters
----------
in_column:
name of column to apply transform to
change_point_model:
model to get trend change points
detrend_model:
model to get trend in data
change_point_model_predict_params:
params for change_point_model predict method
"""
self.in_column = in_column
self.out_columns = in_column
self.change_point_model = change_point_model
self.detrend_model = detrend_model
self.per_interval_models: Optional[Dict[TTimestampInterval, TDetrendModel]] = None
self.intervals: Optional[List[TTimestampInterval]] = None
self.change_point_model_predict_params = change_point_model_predict_params
def _prepare_signal(self, series: pd.Series) -> np.array:
"""Prepare series for change point model."""
signal = series.to_numpy()
if isinstance(self.change_point_model.cost, CostLinear):
signal = signal.reshape((-1, 1))
return signal
def _get_change_points(self, series: pd.Series) -> List[pd.Timestamp]:
"""Fit change point model with series data and predict trends change points."""
signal = self._prepare_signal(series=series)
timestamp = series.index
self.change_point_model.fit(signal=signal)
# last point in change points is the first index after the series
change_points_indices = self.change_point_model.predict(**self.change_point_model_predict_params)[:-1]
change_points = [timestamp[idx] for idx in change_points_indices]
return change_points
@staticmethod
def _build_trend_intervals(change_points: List[pd.Timestamp]) -> List[TTimestampInterval]:
"""Create list of stable trend intervals from list of change points."""
change_points = sorted(change_points)
left_border = pd.Timestamp.min
intervals = []
for point in change_points:
right_border = point
intervals.append((left_border, right_border))
left_border = right_border
intervals.append((left_border, pd.Timestamp.max))
return intervals
def _init_detrend_models(
self, intervals: List[TTimestampInterval]
) -> Dict[Tuple[pd.Timestamp, pd.Timestamp], TDetrendModel]:
"""Create copy of detrend model for each timestamp interval."""
per_interval_models = {interval: deepcopy(self.detrend_model) for interval in intervals}
return per_interval_models
def _get_timestamps(self, series: pd.Series) -> np.ndarray:
"""Convert ETNA timestamp-index to a list of timestamps to fit regression models."""
timestamps = series.index
timestamps = np.array([[ts.timestamp()] for ts in timestamps])
return timestamps
def _fit_per_interval_model(self, series: pd.Series):
"""Fit per-interval models with corresponding data from series."""
for interval in self.intervals:
tmp_series = series[interval[0] : interval[1]]
x = self._get_timestamps(series=tmp_series)
y = tmp_series.values
self.per_interval_models[interval].fit(x, y)
def _predict_per_interval_model(self, series: pd.Series) -> pd.Series:
"""Apply per-interval detrending to series."""
trend_series = pd.Series(index=series.index)
for interval in self.intervals:
tmp_series = series[interval[0] : interval[1]]
if tmp_series.empty:
continue
x = self._get_timestamps(series=tmp_series)
trend = self.per_interval_models[interval].predict(x)
trend_series[tmp_series.index] = trend
return trend_series
def fit(self, df: pd.DataFrame) -> "_OneSegmentChangePointsTrendTransform":
"""Fit OneSegmentChangePointsTransform: find trend change points in df, fit detrend models with data from intervals of stable trend.
Parameters
----------
df:
one segment dataframe indexed with timestamp
Returns
-------
self
"""
series = df.loc[df[self.in_column].first_valid_index() :, self.in_column]
change_points = self._get_change_points(series=series)
self.intervals = self._build_trend_intervals(change_points=change_points)
self.per_interval_models = self._init_detrend_models(intervals=self.intervals)
self._fit_per_interval_model(series=series)
return self
def transform(self, df: pd.DataFrame) -> pd.DataFrame:
"""Split df to intervals of stable trend and subtract trend from each one.
Parameters
----------
df:
one segment dataframe to subtract trend
Returns
-------
detrended df: pd.DataFrame
df with detrended in_column series
"""
df._is_copy = False
series = df.loc[df[self.in_column].first_valid_index() :, self.in_column]
trend_series = self._predict_per_interval_model(series=series)
df.loc[:, self.in_column] -= trend_series
return df
def inverse_transform(self, df: pd.DataFrame) -> pd.DataFrame:
"""Split df to intervals of stable trend according to previous change point detection and add trend to each one.
Parameters
----------
df:
one segment dataframe to turn trend back
Returns
-------
df: pd.DataFrame
df with restored trend in in_column
"""
df._is_copy = False
series = df.loc[df[self.in_column].first_valid_index() :, self.in_column]
trend_series = self._predict_per_interval_model(series=series)
df.loc[:, self.in_column] += trend_series
return df
class ChangePointsTrendTransform(PerSegmentWrapper):
"""ChangePointsTrendTransform subtracts multiple linear trend from series."""
def __init__(
self,
in_column: str,
change_point_model: BaseEstimator,
detrend_model: TDetrendModel,
**change_point_model_predict_params,
):
"""Init ChangePointsTrendTransform.
Parameters
----------
in_column:
name of column to apply transform to
change_point_model:
model to get trend change points
detrend_model:
model to get trend in data
change_point_model_predict_params:
params for change_point_model predict method
"""
self.in_column = in_column
self.change_point_model = change_point_model
self.detrend_model = detrend_model
self.change_point_model_predict_params = change_point_model_predict_params
super().__init__(
transform=_OneSegmentChangePointsTrendTransform(
in_column=self.in_column,
change_point_model=self.change_point_model,
detrend_model=self.detrend_model,
**self.change_point_model_predict_params,
)
) | PypiClean |
/MyClickUp-0.1.5.tar.gz/MyClickUp-0.1.5/README.md | =================
MyClickUp
=================
2022/january/1 - Jose Cordeiro
The MyClickUp class allows you to encapsulate the integration with the ClickUp API.
To connect:
myClickUp = MyClickUp(token="18942493_f3779a347ec29bbd3f5e9d9c9e151bfc63462695")
**getTeam()**: Returns a dict with the data of users registered in ClickUp. See ClickUp API documentation for details of the structure of this data.
team = myClickUp.getTeam()
**getSpace(spaceId)**: Returns a dict with data from a Space. See ClickUp API documentation for details of the structure of this data.
space = myClickUp.getSpace(spaceId="1236")
**getSpaceFolders(spaceId)**: Returns a list with the Folders of a Space. See ClickUp API documentation for details of the structure of this data.
folders = myClickUp.getSpaceFolders(spaceId="1236")
**getFolder(folderId)**: Returns a dict with data from a Folder. See ClickUp API documentation for details of the structure of this data.
folder = myClickUp.getFolder(folderId="1236")
**getFolderByName(folderName, spaceId)**: Returns a list with the Folders of a Space. See ClickUp API documentation for details of the structure of this data.
folder = myClickUp.getFolderByName(folderName="Folder ABC", spaceId="1236")
**getFolderLists(folderId)**: Returns a list with the Lists of a Folder. See ClickUp API documentation for details of the structure of this data.
lists = myClickUp.getFolderLists(folderId="1236")
**getList(listId)**: Returns a dict with data from a List. See ClickUp API documentation for details of the structure of this data.
task = myClickUp.getList(listId="1236")
**getListsByName(listName, folderId)**: Returns a dict with data from a List. See ClickUp API documentation for details of the structure of this data.
task = myClickUp.getList(listName="List A", folderId="1236")
**getListTasks(listId)**: Returns a list with the Tasks of a List. See ClickUp API documentation for details of the structure of this data.
tasks = myClickUp.getListTasks(listId="1236")
**getTask(taskId, flagSubtasks)**: Returns a dict with data from a Task. See ClickUp API documentation for details of the structure of this data.
task = myClickUp.getTasksByName(taskId="1236", flagSubtasks=True)
**getTasksByName(taskName, listId)**: Returns a dict with data from a Task. See ClickUp API documentation for details of the structure of this data.
field = myClickUp.getTasksByName(taskName="Test", listId="1236")
**getListCustomFields(listId)**: Returns a list with the Custom Fields of a List. See ClickUp API documentation for details of the structure of this data.
fields = myClickUp.getListCustomFields(listId="1236")
**getListCustomFieldByName(listId, fieldName)**: Returns a dict with the details of a Custom Fields of a List. See ClickUp API documentation for details of the structure of this data.
field = myClickUp.getListCustomFieldByName(listId="1236", fieldName="CATEGORY")
**getCustomFieldOption(listId, fieldName, optionName)**: Returns a list with the options of a Custom Fields of a List. See ClickUp API documentation for details of the structure of this data.
options = myClickUp.getCustomFieldOption(listId="1236", fieldName="CATEGORY", optionName="abc")
**getUser(userId)**: Returns a dict with a Users's data. See ClickUp API documentation for details of the structure of this data.
user = myClickUp.getUser(userId=1236)
**createFolder(folderJson, spaceId)**: Creates a Folder in a given Space, the Folder details are specified in the dict "folderJson", according to the ClickUp API . See ClickUp API documentation for details of the structure of this data.
**createList(listJson, folderId)**: Creates a List in a given Folder, the List details are specified in the dict "listJson", according to the ClickUp API. See ClickUp API documentation for details of the structure of this data.
myList = {
"name": "BLA BLA BLA",
"content": "BLA BLA BLA",
"due_date": 1567780450202,
"due_date_time": False,
"priority": 1,
"assignee": 183,
"status": "red"
}
resp = myClickUp.createList(listJson=myList, folderId="115031109")
**createTask(taskJson, listId, parentTaskId)**: Creates a Task in a given Folder, the Task details are specified in the dict "taskJson", according to the ClickUp API. See ClickUp API documentation for details of the structure of this data.
task = {
"name": activityName,
"description": descr,
"assignees": [3247672, 18904985],
"status": "To do",
"priority": 3, # normal
"due_date": dueDate,
"due_date_time": False,
"time_estimate": timeEstimate,
"start_date": None,
"start_date_time": False,
"notify_all": True,
"links_to": None
}
newTask = myClickUp.createTask(taskJson=task, listId=listId)
**updateFolder(folderJson, folderId)**: Updates the Folder properties, the Folder details are specified in the dict "folderJson", according to the ClickUp API. See ClickUp API documentation for details of the structure of this data.
**updateList(listJson, listId)**: Updates the List properties, the List details are specified in the dict "listJson", according to the ClickUp API. See ClickUp API documentation for details of the structure of this data.
**updateTask(taskJson, taskId)**: Updates the Task properties, the Task details are specified in the dict "taskJson", according to the ClickUp API. See ClickUp API documentation for details of the structure of this data.
**deleteFolder(folderId)**: Delete a Folder. See ClickUp API documentation for details of the structure of this data.
myClickUp.deleteFolder(folderId="1249")
**deleteList(listId)**: Delete a List. See ClickUp API documentation for details of the structure of this data.
myClickUp.deleteList(listId="1249")
**deleteTask(taskId)**: Delete a Task. See ClickUp API documentation for details of the structure of this data.
myClickUp.deleteTask(taskId="1249")
| PypiClean |
/zhihu_oauth-0.0.42.tar.gz/zhihu_oauth-0.0.42/docs/for-user/search.rst | Search - 搜索
-------------
知乎的搜索功能通过 :any:`ZhihuClient.search` 方法提供。
目前知乎提供了 6 个搜索方式, :any:`SearchType` 枚举常量表示这六种方式,作为参数传递给 :any:`ZhihuClient.search` 方法。
方式与枚举常量对应关系如下:
.. automodule:: zhihu_oauth.zhcls.search
:members: SearchType
搜索的常见用法:
.. code-block:: Python
client.search('程序', SearchType.COLUMN)
client.search('7sDream', SearchType.PEOPLE)
除了 ``SearchType.GENERAL`` 方式,其他方式的搜索都会返回 :any:`SearchResult` 对象的迭代器。
可用属性如下:
.. autoclass:: zhihu_oauth.zhcls.search.SearchResult
:members:
:undoc-members:
:special-members: __init__
所以一般这样用:
.. code-block:: Python
for result in client.search('程序', SearchType.COLUMN):
column = result.obj
print(column.title, column.author.name)
# do something with `column`
结果: ::
程序员实验室 Wayne Shi
程序员达达 达达
程序人生 hi大头鬼hi
程序员的自我修养 luckystar
反转程序猿 大房
程序员作战手册 Jim Jin
红客联盟 小食妹
非著名程序员 loonggg
其他类型的搜索的用法也类似,就不赘述了。
而 ``SearchType.GENERAL`` 方式的搜索也是迭代器,但可能返回 :any:`SearchResult` 和 :any:`SearchResultSection` 对象。
:any:`SearchResultSection` 对象除了自身有一些属性(见下)之外,本身也是个 :any:`SearchResult` 的迭代器:
.. autoclass:: zhihu_oauth.zhcls.search.SearchResultSection
:members:
:special-members: __init__
这样用起来就有点麻烦,你得判断迭代器返回的是那种对象,大概就要这样写:
.. code-block:: Python
for result in client.search("panda", search_type=SearchType.GENERAL):
if isinstance(result, SearchResultSection):
print(result.type, "search result list:")
for r in result:
# do something with r
print(r.obj)
else:
# result is SearchResult object
r = result
# do something with r
print(r.highlight_title, r.highlight_desc)
print(r.obj)
print('-' * 20)
结果如下: ::
topic search result list:
<zhihu_oauth.zhcls.topic.Topic object at 0x7f19e9c1ce48>
--------------------
column search result list:
<zhihu_oauth.zhcls.column.Column object at 0x7f19e9c1ce48>
--------------------
people search result list:
<zhihu_oauth.zhcls.people.People object at 0x7f19e9c1ce48>
<zhihu_oauth.zhcls.people.People object at 0x7f19e9c1ceb8>
<zhihu_oauth.zhcls.people.People object at 0x7f19e9c1ce80>
--------------------
你有哪些收藏来反复看的<em>大熊猫</em>(<em>panda</em>)的图片? <em>熊猫</em><em>panda</em>的尾巴是白色的白色的白色的,重说三,看到好多<em>熊猫</em>玩偶都把<em>熊猫</em>尾巴做成黑色的,就连功夫<em>熊猫</em>里阿宝的尾巴都是黑色的,我觉得有必要科普一下哦,对了,图片来自ipanda,
<zhihu_oauth.zhcls.answer.Answer object at 0x7f19e9c1cef0>
--------------------
如何评价<em>熊猫</em>tv狼人杀新节目<em>panda</em>kill? 10月22日局更新.就第一集而言个人分析仅供参考.首先十二位玩家一一点评.1号鼠大王:比上一季进步了,当民的时候站边,发言都阳光了很多,没有被抗推就是不错的进步,但是当狼的时候依然会紧张状态不稳,第三
<zhihu_oauth.zhcls.answer.Answer object at 0x7f19e9c1cef0>
--------------------
# ... 未完 ...
由于这样写不是很方便,所以提供了 :any:`ZhihuClient.search_unfold` 方法,他会自动将 :any:`SearchResultSection` 展开,生成 :any:`SearchResult` 型的对象,用法:
.. code-block:: Python
for result in client.search_unfold("panda"):
# result is SearchResult object
r = result
print(r.highlight_title, r.highlight_desc)
print(r.obj)
print('-' * 20)
结果: ::
<zhihu_oauth.zhcls.topic.Topic object at 0x7f6ffa42bf60>
--------------------
我吃掉了一辆奔驰
<zhihu_oauth.zhcls.column.Column object at 0x7f6ffa42bf60>
--------------------
<zhihu_oauth.zhcls.people.People object at 0x7f6ffa42bf60>
--------------------
<zhihu_oauth.zhcls.people.People object at 0x7f6ffa42bf60>
--------------------
<zhihu_oauth.zhcls.people.People object at 0x7f6ffa42bf60>
--------------------
你有哪些收藏来反复看的<em>大熊猫</em>(<em>panda</em>)的图片? <em>熊猫</em><em>panda</em>的尾巴是白色的白色的白色的,重说三,看到好多<em>熊猫</em>玩偶都把<em>熊猫</em>尾巴做成黑色的,就连功夫<em>熊猫</em>里阿宝的尾巴都是黑色的,我觉得有必要科普一下哦,对了,图片来自ipanda,
<zhihu_oauth.zhcls.answer.Answer object at 0x7f6ffa42bf60>
--------------------
如何评价<em>熊猫</em>tv狼人杀新节目<em>panda</em>kill? 10月22日局更新.就第一集而言个人分析仅供参考.首先十二位玩家一一点评.1号鼠大王:比上一季进步了,当民的时候站边,发言都阳光了很多,没有被抗推就是不错的进步,但是当狼的时候依然会紧张状态不稳,第三
<zhihu_oauth.zhcls.answer.Answer object at 0x7f6ffa42bef0>
--------------------
如何评价11.5 <em>panda</em>kill 各位的表现? 其实这一期我感觉没有分析的必要,因为这一期总体上就是上一集坏现象进一步恶化后形成的的"进阶版大乱斗",重复的话我觉得没必要再说了,这里随手放个上一期回答的链接~如何评价10.29 pandakill
<zhihu_oauth.zhcls.answer.Answer object at 0x7f6ffa42bf28>
--------------------
# ... 未完 ... 最前面那些空行是因为 `highlight_title` 和 `highlight_desc` 属性都是空。
推荐在综合搜索时使用 :any:`ZhihuClient.search_unfold` 方法,注意,此方法不支持设置搜索类型,也就是说只支持综合搜索。
| PypiClean |
/eyepie-0.11.3.tar.gz/eyepie-0.11.3/src/eyepy/core/utils.py | from __future__ import annotations
import numpy as np
import numpy.typing as npt
from skimage.util import img_as_float32
from skimage.util import img_as_ubyte
from eyepy.core.filter import filter_by_height_enface
from .annotations import EyeVolumeLayerAnnotation
NDArrayFloat = npt.NDArray[np.float_]
NDArrayBool = npt.NDArray[np.bool_]
NDArrayInt = npt.NDArray[np.int_]
class DynamicDefaultDict(dict):
"""A defaultdict for which the factory function has access to the missing
key."""
def __init__(self, factory):
self.factory = factory
def __missing__(self, key):
self[key] = self.factory(key)
return self[key]
def vol_intensity_transform(data: NDArrayFloat) -> NDArrayInt:
"""Wrapper around from_vol_intensity.
Transform intensities from Heyex VOL exports to achieve a constrast similar to the one used in Heyex.
Args:
data: Input data
Returns:
Transformed data
"""
return from_vol_intensity(data)
def from_vol_intensity(data: NDArrayFloat) -> NDArrayInt:
selection_0 = data == np.finfo(np.float32).max
selection_data = data <= 1
new = np.log(data[selection_data] + 2.44e-04)
new = (new + 8.3) / 8.285
data[selection_data] = new
data[selection_0] = 0
data = np.clip(data, 0, 1)
return img_as_ubyte(data)
# Function expects numpy array of uint8 type hint
def to_vol_intensity(data: np.ndarray) -> NDArrayFloat:
data = img_as_float32(data)
data = data * 8.285 - 8.3
data = np.exp(data) - 2.44e-04
return data
def default_intensity_transform(data: np.ndarray) -> np.ndarray:
"""Default intensity transform.
By default intensities are not changed.
Args:
data: Input data
Returns:
Input data unchanged
"""
return data
intensity_transforms = {
'default': default_intensity_transform,
'vol': vol_intensity_transform,
}
def ideal_rpe(rpe_height: NDArrayFloat, bm_height: NDArrayFloat,
volume_shape: tuple[int, int, int]) -> NDArrayFloat:
"""Compute the ideal RPE from an RPE with Drusen.
Args:
rpe_height: The RPE height as offset from the lower border of the B-Scan
bm_height: The BM height as offset from the lower border of the B-Scan
volume_shape: Shape of the OCT volume (number of B-Scans, height, width)
Returns:
The ideal RPE height as offset from the lower border of the B-Scan
"""
d, h, w = volume_shape
# compute shift needed to align the BM to the horizontal center line
shift = np.empty((d, w), dtype='int')
shift.fill(h - (h / 2))
shift = shift - bm_height
# now shift the RPE location array as well
shifted_rpe_height = rpe_height + shift
# Remove all NANs from the shifted RPE data
clean_shifted = shifted_rpe_height[~np.isnan(shifted_rpe_height)]
# Compute a histogram with a bin for every pixel height in a B-Scan
hist, edges = np.histogram(clean_shifted.flatten(),
bins=np.arange(volume_shape[1]))
# Compute the ideal RPE as the mean of the biggest bin and its neighbours
lower_edge = edges[np.argmax(hist) - 1]
upper_edge = edges[np.argmax(hist) + 2]
irpe_height = np.mean(clean_shifted[np.logical_and(
clean_shifted <= upper_edge, clean_shifted >= lower_edge)])
ideal_rpe = np.full_like(shifted_rpe_height, irpe_height)
# Shift back into original image space
ideal_rpe = np.reshape(ideal_rpe, (d, w)) - shift
return ideal_rpe
def drusen(rpe_height: NDArrayFloat,
bm_height: NDArrayFloat,
volume_shape: tuple[int, int, int],
minimum_height: int = 2) -> NDArrayBool:
"""Compute drusen from the RPE and BM layer segmentation.
First estimate the ideal RPE based on a histogram of the RPE heights relativ
to the BM. Then compute drusen as the area between the RPE and the normal RPE
Args:
rpe_height: The RPE height as offset from the lower border of the B-Scan
bm_height: The BM height as offset from the lower border of the B-Scan
volume_shape: Shape of the OCT volume (number of B-Scans, height, width)
minimum_height: Minimum height of a drusen in pixels
Returns:
A boolean array with the same shape as the OCT volume. True indicates a
voxel beeing part of a drusen.
"""
# Estimate ideal RPE
if isinstance(rpe_height, EyeVolumeLayerAnnotation):
rpe_height = np.copy(rpe_height.data)
if isinstance(bm_height, EyeVolumeLayerAnnotation):
bm_height = np.copy(bm_height.data)
irpe = ideal_rpe(rpe_height, bm_height, volume_shape)
# Create drusen map
drusen_map = np.zeros(volume_shape, dtype=bool)
# Exclude normal RPE and RPE from the drusen area.
nans = np.isnan(rpe_height + irpe)
rpe = np.rint(rpe_height + 1)
rpe[nans] = 0
rpe = rpe.astype(int)
irpe = np.rint(irpe)
irpe[nans] = 0
irpe = irpe.astype(int)
for sli in range(drusen_map.shape[0]):
for col in range(drusen_map.shape[2]):
if not nans[sli, col]:
drusen_map[sli, rpe[sli, col]:irpe[sli, col], col] = 1
drusen_map = filter_by_height_enface(drusen_map, minimum_height)
return drusen_map | PypiClean |
/Aries-storage-0.1.330.tar.gz/Aries-storage-0.1.330/Aries/storage/cloud.py | import os
import datetime
import logging
import threading
from io import FileIO
from abc import ABC
from .base import StorageObject, StoragePrefixBase, StorageIOSeekable
logger = logging.getLogger(__name__)
class BucketStorageObject(StorageObject):
"""Represents a cloud storage object associated with a bucket.
This object may not correspond to an actual object in the bucket, e.g. a folder in Google or S3 bucket.
"""
# Caches clients for each scheme
cache_dict = dict()
# Expiration time for each client
cache_expire = dict()
# Ensure that only one thread can initialize the client at one time
# Multiple threads initializing the s3 client at the same time may cause a KeyError: 'credential_provider'
# https://github.com/boto/boto3/issues/1592
client_lock = threading.Lock()
# The number of seconds before the client expires.
CACHE_EXPIRE_SEC = 1200
def __init__(self, uri):
StorageObject.__init__(self, uri)
self._client = None
self._bucket = None
self._blob = None
@classmethod
def get_cached(cls, obj_id, init_method):
"""Gets an unexpired object by obj_id from cache, creates one using init_method() if needed.
"""
cached_obj = cls.cache_dict.get(obj_id)
now = datetime.datetime.now()
if cached_obj:
client_expire = cls.cache_expire.get(obj_id)
# Use the cached client if it is not expired.
if client_expire and client_expire > now:
return cached_obj
obj = init_method()
cls.cache_dict[obj_id] = obj
cls.cache_expire[obj_id] = now + datetime.timedelta(seconds=cls.CACHE_EXPIRE_SEC)
return obj
def get_client(self):
obj_id = self.scheme
with self.client_lock:
return self.get_cached(obj_id, self.init_client)
def get_bucket(self):
obj_id = "%s://%s" % (self.scheme, self.bucket_name)
return self.get_cached(obj_id, self.init_bucket)
@property
def bucket_name(self):
"""The name of the Cloud Storage bucket as a string."""
return self.hostname
@property
def client(self):
if not self._client:
self._client = self.get_client()
return self._client
@property
def bucket(self):
if not self._bucket:
self._bucket = self.get_bucket()
return self._bucket
def is_file(self):
"""Determine if the object is a file.
This will return False if the object does not exist or the object is a folder.
"""
if self.path.endswith("/"):
return False
if not self.exists():
return False
return True
def init_client(self):
raise NotImplementedError()
def init_bucket(self):
raise NotImplementedError()
def exists(self):
raise NotImplementedError()
class CloudStoragePrefix(StoragePrefixBase, ABC):
def blobs(self, delimiter=""):
"""All blobs with the same prefix as this object
The type of blobs depends on the actual implementation of the blobs() method.
The delimiter causes a list operation to roll up all the keys that share a common prefix into a single result.
See Also: https://docs.aws.amazon.com/AmazonS3/latest/dev/ListingKeysHierarchy.html
"""
raise NotImplementedError()
class CloudStorageIO(StorageIOSeekable):
def __init__(self, uri):
"""
"""
StorageIOSeekable.__init__(self, uri)
# Path of the temp local file
self.temp_path = None
# Stores the temp local FileIO object
self.__file_io = None
# Cache the size information
# TODO: use cached property
self.__size = None
@property
def size(self):
if not self.__size:
if self.__file_io:
return os.fstat(self.__file_io.fileno).st_size
self.__size = self.get_size()
return self.__size
def seek(self, pos, whence=0):
if self.__file_io:
self._offset = self.__file_io.seek(pos, whence)
return self._offset
return self._seek(pos, whence)
def tell(self):
if self.__file_io:
self._offset = self.__file_io.tell()
return self._offset
def local(self):
"""Creates a local copy of the file.
"""
if not self.__file_io:
file_obj = self.create_temp_file()
# Download file if appending or updating
if self.exists() and ('a' in self.mode or '+' in self.mode):
self.download(file_obj)
# Close the temp file and open it with FileIO
file_obj.close()
mode = "".join([c for c in self.mode if c in "rw+ax"])
self.__file_io = FileIO(file_obj.name, mode)
self.temp_path = file_obj.name
return self
def read(self, size=None):
"""Reads the file from the Google Cloud bucket to memory
Returns: Bytes containing the contents of the file.
"""
start = self.tell()
if self.__file_io:
self.__file_io.seek(start)
b = self.__file_io.read(size)
else:
if not self.exists():
raise FileNotFoundError("File %s does not exists." % self.uri)
file_size = self.size
# TODO: size unknown?
if not file_size:
return b""
if start >= file_size:
return b""
end = file_size - 1
if size:
end = start + size - 1
if end > file_size - 1:
end = file_size - 1
# logger.debug("Reading from %s to %s" % (start, end))
b = self.read_bytes(start, end)
self._offset += len(b)
return b
def write(self, b):
"""Writes data into the file.
Args:
b: Bytes data
Returns: The number of bytes written into the file.
"""
if self.closed:
raise ValueError("write to closed file %s" % self.uri)
# Create a temp local file
self.local()
# Write data from buffer to file
self.__file_io.seek(self.tell())
size = self.__file_io.write(b)
self._offset += size
return size
def __rm_temp(self):
if self.temp_path and os.path.exists(self.temp_path):
os.unlink(self.temp_path)
logger.debug("Deleted temp file %s of %s" % (self.temp_path, self.uri))
self.temp_path = None
return
def open(self, mode='r', *args, **kwargs):
"""Opens the file for writing
"""
if not self._closed:
self.close()
super().open(mode)
self._closed = False
# Reset offset position when open
self.seek(0)
if 'a' in self.mode:
# Move to the end of the file if open in appending mode.
self.seek(0, 2)
elif 'w' in self.mode:
# Create empty local file
self.local()
return self
def close(self):
"""Flush and close the file.
This method has no effect if the file is already closed.
"""
if self._closed:
return
if self.__file_io:
if not self.__file_io.closed:
self.__file_io.close()
self.__file_io = None
if self.temp_path:
logger.debug("Uploading file to %s" % self.uri)
with open(self.temp_path, 'rb') as f:
self.upload(f)
# Remove __temp_file if it exists.
self.__rm_temp()
# Set _closed attribute
self._closed = True
@property
def updated_time(self):
raise NotImplementedError()
def exists(self):
raise NotImplementedError()
def get_size(self):
raise NotImplementedError()
def delete(self):
raise NotImplementedError()
def upload(self, from_file_obj):
raise NotImplementedError()
def download(self, to_file_obj):
"""Downloads the data to a file object
Caution: This method does not call flush()
"""
raise NotImplementedError()
def read_bytes(self, start, end):
"""Reads bytes from position start to position end, inclusive
"""
raise NotImplementedError() | PypiClean |
/trixie-0.1.2.tar.gz/trixie-0.1.2/homeassistant/components/media_player/openhome.py | import logging
from homeassistant.components.media_player import (
SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PREVIOUS_TRACK, SUPPORT_TURN_ON,
SUPPORT_TURN_OFF, SUPPORT_VOLUME_SET, SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_STEP, SUPPORT_STOP, SUPPORT_PLAY, SUPPORT_SELECT_SOURCE,
MediaPlayerDevice)
from homeassistant.const import (
STATE_IDLE, STATE_PAUSED, STATE_PLAYING, STATE_OFF)
REQUIREMENTS = ['openhomedevice==0.4.2']
SUPPORT_OPENHOME = SUPPORT_SELECT_SOURCE | \
SUPPORT_VOLUME_STEP | SUPPORT_VOLUME_MUTE | SUPPORT_VOLUME_SET | \
SUPPORT_TURN_OFF | SUPPORT_TURN_ON
_LOGGER = logging.getLogger(__name__)
DEVICES = []
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Openhome platform."""
from openhomedevice.Device import Device
if not discovery_info:
return True
name = discovery_info.get('name')
description = discovery_info.get('ssdp_description')
_LOGGER.info("Openhome device found: %s", name)
device = Device(description)
# if device has already been discovered
if device.Uuid() in [x.unique_id for x in DEVICES]:
return True
device = OpenhomeDevice(hass, device)
add_devices([device], True)
DEVICES.append(device)
return True
class OpenhomeDevice(MediaPlayerDevice):
"""Representation of an Openhome device."""
def __init__(self, hass, device):
"""Initialise the Openhome device."""
self.hass = hass
self._device = device
self._track_information = {}
self._in_standby = None
self._transport_state = None
self._volume_level = None
self._volume_muted = None
self._supported_features = SUPPORT_OPENHOME
self._source_names = list()
self._source_index = {}
self._source = {}
self._name = None
self._state = STATE_PLAYING
def update(self):
"""Update state of device."""
self._in_standby = self._device.IsInStandby()
self._transport_state = self._device.TransportState()
self._track_information = self._device.TrackInfo()
self._volume_level = self._device.VolumeLevel()
self._volume_muted = self._device.IsMuted()
self._source = self._device.Source()
self._name = self._device.Room().decode('utf-8')
self._supported_features = SUPPORT_OPENHOME
source_index = {}
source_names = list()
for source in self._device.Sources():
source_names.append(source["name"])
source_index[source["name"]] = source["index"]
self._source_index = source_index
self._source_names = source_names
if self._source["type"] == "Radio":
self._supported_features |= SUPPORT_STOP | SUPPORT_PLAY
if self._source["type"] in ("Playlist", "Cloud"):
self._supported_features |= SUPPORT_PREVIOUS_TRACK | \
SUPPORT_NEXT_TRACK | SUPPORT_PAUSE | SUPPORT_PLAY
if self._in_standby:
self._state = STATE_OFF
elif self._transport_state == 'Paused':
self._state = STATE_PAUSED
elif self._transport_state in ('Playing', 'Buffering'):
self._state = STATE_PLAYING
elif self._transport_state == 'Stopped':
self._state = STATE_IDLE
else:
# Device is playing an external source with no transport controls
self._state = STATE_PLAYING
def turn_on(self):
"""Bring device out of standby."""
self._device.SetStandby(False)
def turn_off(self):
"""Put device in standby."""
self._device.SetStandby(True)
def media_pause(self):
"""Send pause command."""
self._device.Pause()
def media_stop(self):
"""Send stop command."""
self._device.Stop()
def media_play(self):
"""Send play command."""
self._device.Play()
def media_next_track(self):
"""Send next track command."""
self._device.Skip(1)
def media_previous_track(self):
"""Send previous track command."""
self._device.Skip(-1)
def select_source(self, source):
"""Select input source."""
self._device.SetSource(self._source_index[source])
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def supported_features(self):
"""Flag of features commands that are supported."""
return self._supported_features
@property
def should_poll(self):
"""Return the polling state."""
return True
@property
def unique_id(self):
"""Return a unique ID."""
return self._device.Uuid()
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def source_list(self):
"""List of available input sources."""
return self._source_names
@property
def media_image_url(self):
"""Image url of current playing media."""
return self._track_information.get('albumArtwork')
@property
def media_artist(self):
"""Artist of current playing media, music track only."""
artists = self._track_information.get('artist')
if artists:
return artists[0]
@property
def media_album_name(self):
"""Album name of current playing media, music track only."""
return self._track_information.get('albumTitle')
@property
def media_title(self):
"""Title of current playing media."""
return self._track_information.get('title')
@property
def source(self):
"""Name of the current input source."""
return self._source.get('name')
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._volume_level / 100.0
@property
def is_volume_muted(self):
"""Return true if volume is muted."""
return self._volume_muted
def volume_up(self):
"""Volume up media player."""
self._device.IncreaseVolume()
def volume_down(self):
"""Volume down media player."""
self._device.DecreaseVolume()
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
self._device.SetVolumeLevel(int(volume * 100))
def mute_volume(self, mute):
"""Mute (true) or unmute (false) media player."""
self._device.SetMute(mute) | PypiClean |
/fake-bge-module-latest-20230415.tar.gz/fake-bge-module-latest-20230415/gpu/state.py | import sys
import typing
def blend_get():
''' Current blending equation.
'''
pass
def blend_set(mode: str):
''' Defines the fixed pipeline blending equation.
:param mode: The type of blend mode. * NONE No blending. * ALPHA The original color channels are interpolated according to the alpha value. * ALPHA_PREMULT The original color channels are interpolated according to the alpha value with the new colors pre-multiplied by this value. * ADDITIVE The original color channels are added by the corresponding ones. * ADDITIVE_PREMULT The original color channels are added by the corresponding ones that are pre-multiplied by the alpha value. * MULTIPLY The original color channels are multiplied by the corresponding ones. * SUBTRACT The original color channels are subtracted by the corresponding ones. * INVERT The original color channels are replaced by its complementary color.
:type mode: str
'''
pass
def clip_distances_set(distances_enabled: int):
''' Sets the number of gl_ClipDistance planes used for clip geometry.
:param distances_enabled: Number of clip distances enabled.
:type distances_enabled: int
'''
pass
def color_mask_set(r: bool, g: bool, b: bool, a: bool):
''' Enable or disable writing of frame buffer color components.
:param r: components red, green, blue, and alpha.
:type r: bool
:param g: components red, green, blue, and alpha.
:type g: bool
:param b: components red, green, blue, and alpha.
:type b: bool
:param a: components red, green, blue, and alpha.
:type a: bool
'''
pass
def depth_mask_get():
''' Writing status in the depth component.
'''
pass
def depth_mask_set(value):
''' Write to depth component.
:type near: bool
'''
pass
def depth_test_get():
''' Current depth_test equation.
'''
pass
def depth_test_set(mode: str):
''' Defines the depth_test equation.
:param mode: The depth test equation name. Possible values are NONE , ALWAYS , LESS , LESS_EQUAL , EQUAL , GREATER and GREATER_EQUAL .
:type mode: str
'''
pass
def face_culling_set(culling):
''' Specify whether none, front-facing or back-facing facets can be culled.
:param mode: NONE , FRONT or BACK .
:type mode: str
'''
pass
def framebuffer_active_get(enable):
''' Return the active frame-buffer in context.
'''
pass
def front_facing_set(invert):
''' Specifies the orientation of front-facing polygons.
:type mode: bool
'''
pass
def line_width_get():
''' Current width of rasterized lines.
'''
pass
def line_width_set(width):
''' Specify the width of rasterized lines.
:type mode: float
'''
pass
def point_size_set(size):
''' Specify the diameter of rasterized points.
:type mode: float
'''
pass
def program_point_size_set(enable: bool):
''' If enabled, the derived point size is taken from the (potentially clipped) shader builtin gl_PointSize.
:param enable: True for shader builtin gl_PointSize.
:type enable: bool
'''
pass
def scissor_get() -> int:
''' Retrieve the scissors of the active framebuffer. Note: Only valid between 'scissor_set' and a framebuffer rebind.
:rtype: int
:return: The scissor of the active framebuffer as a tuple (x, y, xsize, ysize). x, y: lower left corner of the scissor rectangle, in pixels. xsize, ysize: width and height of the scissor rectangle.
'''
pass
def scissor_set(x: int, y: int, xsize: int, ysize: int):
''' Specifies the scissor area of the active framebuffer. Note: The scissor state is not saved upon framebuffer rebind.
:param x: lower left corner of the scissor rectangle, in pixels.
:type x: int
:param y: lower left corner of the scissor rectangle, in pixels.
:type y: int
:param xsize: width and height of the scissor rectangle.
:type xsize: int
:param ysize: width and height of the scissor rectangle.
:type ysize: int
'''
pass
def scissor_test_set(enable: bool):
''' Enable/disable scissor testing on the active framebuffer.
:param enable: True - enable scissor testing. False - disable scissor testing.
:type enable: bool
'''
pass
def viewport_get():
''' Viewport of the active framebuffer.
'''
pass
def viewport_set(x: int, y: int, xsize: int, ysize: int):
''' Specifies the viewport of the active framebuffer. Note: The viewport state is not saved upon framebuffer rebind.
:param x: lower left corner of the viewport_set rectangle, in pixels.
:type x: int
:param y: lower left corner of the viewport_set rectangle, in pixels.
:type y: int
:param xsize: width and height of the viewport_set.
:type xsize: int
:param ysize: width and height of the viewport_set.
:type ysize: int
'''
pass | PypiClean |
/rlane_libcurses-1.0.5-py3-none-any.whl/libcurses/mouseevent.py |
import copy
import curses
class MouseEvent:
"""Something done by a mouse; results of `curses.getmouse()`."""
# pylint: disable=too-many-instance-attributes
_last_mouse = None
def __init__(self):
"""Initialize `MouseEvent` with current mouse info."""
# https://docs.python.org/3/library/curses.html#curses.getmouse
_, x, y, _, bstate = curses.getmouse()
self.x = x
self.y = y
self.bstate = bstate
if bstate & curses.REPORT_MOUSE_POSITION != 0 and self._last_mouse:
self.button = self._last_mouse.button
self.nclicks = self._last_mouse.nclicks
self.is_pressed = True
self.is_released = False
self.is_alt = self._last_mouse.is_alt
self.is_ctrl = self._last_mouse.is_ctrl
self.is_shift = self._last_mouse.is_shift
self.is_moving = True
return
#
if bstate & (
curses.BUTTON1_CLICKED
| curses.BUTTON1_DOUBLE_CLICKED
| curses.BUTTON1_TRIPLE_CLICKED
| curses.BUTTON1_PRESSED
| curses.BUTTON1_RELEASED
):
self.button = 1 # left
elif bstate & (
curses.BUTTON2_CLICKED
| curses.BUTTON2_DOUBLE_CLICKED
| curses.BUTTON2_TRIPLE_CLICKED
| curses.BUTTON2_PRESSED
| curses.BUTTON2_RELEASED
):
self.button = 2 # middle
elif bstate & (
curses.BUTTON3_CLICKED
| curses.BUTTON3_DOUBLE_CLICKED
| curses.BUTTON3_TRIPLE_CLICKED
| curses.BUTTON3_PRESSED
| curses.BUTTON3_RELEASED
):
self.button = 3 # right
elif bstate & (
curses.BUTTON4_CLICKED
| curses.BUTTON4_DOUBLE_CLICKED
| curses.BUTTON4_TRIPLE_CLICKED
| curses.BUTTON4_PRESSED
| curses.BUTTON4_RELEASED
):
self.button = 4 # wheelup / forward
else:
self.button = 5 # wheeldown / backward
#
self.nclicks = 0
self.is_pressed = False
self.is_released = False
if bstate & (
curses.BUTTON1_PRESSED
| curses.BUTTON2_PRESSED
| curses.BUTTON3_PRESSED
| curses.REPORT_MOUSE_POSITION
):
self.is_pressed = True
elif bstate & (
curses.BUTTON1_RELEASED | curses.BUTTON2_RELEASED | curses.BUTTON3_RELEASED
):
self.is_released = True
elif bstate & (curses.BUTTON1_CLICKED | curses.BUTTON2_CLICKED | curses.BUTTON3_CLICKED):
self.nclicks = 1
elif bstate & (
curses.BUTTON1_DOUBLE_CLICKED
| curses.BUTTON2_DOUBLE_CLICKED
| curses.BUTTON3_DOUBLE_CLICKED
):
self.nclicks = 2
elif bstate & (
curses.BUTTON1_TRIPLE_CLICKED
| curses.BUTTON2_TRIPLE_CLICKED
| curses.BUTTON3_TRIPLE_CLICKED
):
self.nclicks = 3
#
self.is_alt = bstate & curses.BUTTON_ALT != 0
self.is_ctrl = bstate & curses.BUTTON_CTRL != 0
self.is_shift = bstate & curses.BUTTON_SHIFT != 0
self.is_moving = bstate & curses.REPORT_MOUSE_POSITION != 0
self.__class__._last_mouse = copy.copy(self)
def __str__(self):
parts = []
if self.is_alt:
parts.append("Alt")
if self.is_ctrl:
parts.append("Ctrl")
if self.is_shift:
parts.append("Shift")
parts.append("M" + str(self.button))
string = "+".join(parts)
if self.nclicks > 1:
string += f"*{self.nclicks}"
return string
def __repr__(self):
return (
self.__class__.__name__
+ "("
+ ", ".join(
[
f"name='{self!s}'",
f"y={self.y}",
f"x={self.x}",
f"bstate={self.bstate:#x}",
# f'button={self.button}',
f"nclicks={self.nclicks}",
# f'is_alt={self.is_alt}',
# f'is_ctrl={self.is_ctrl}',
# f'is_shift={self.is_shift}',
f"is_pressed={self.is_pressed}",
f"is_released={self.is_released}",
f"is_moving={self.is_moving}",
]
)
+ ")"
) | PypiClean |
/zencore-etcd3-0.12.0.20220817.tar.gz/zencore-etcd3-0.12.0.20220817/etcd3/transactions.py | import etcd3.etcdrpc as etcdrpc
import etcd3.utils as utils
_OPERATORS = {
etcdrpc.Compare.EQUAL: "==",
etcdrpc.Compare.NOT_EQUAL: "!=",
etcdrpc.Compare.LESS: "<",
etcdrpc.Compare.GREATER: ">"
}
class BaseCompare(object):
def __init__(self, key, range_end=None):
self.key = key
self.range_end = range_end
self.value = None
self.op = None
# TODO check other is of correct type for compare
# Version, Mod and Create can only be ints
def __eq__(self, other):
self.value = other
self.op = etcdrpc.Compare.EQUAL
return self
def __ne__(self, other):
self.value = other
self.op = etcdrpc.Compare.NOT_EQUAL
return self
def __lt__(self, other):
self.value = other
self.op = etcdrpc.Compare.LESS
return self
def __gt__(self, other):
self.value = other
self.op = etcdrpc.Compare.GREATER
return self
def __repr__(self):
if self.range_end is None:
keys = self.key
else:
keys = "[{}, {})".format(self.key, self.range_end)
return "{}: {} {} '{}'".format(self.__class__, keys,
_OPERATORS.get(self.op),
self.value)
def build_message(self):
compare = etcdrpc.Compare()
compare.key = utils.to_bytes(self.key)
if self.range_end is not None:
compare.range_end = utils.to_bytes(self.range_end)
if self.op is None:
raise ValueError('op must be one of =, !=, < or >')
compare.result = self.op
self.build_compare(compare)
return compare
class Value(BaseCompare):
def build_compare(self, compare):
compare.target = etcdrpc.Compare.VALUE
compare.value = utils.to_bytes(self.value)
class Version(BaseCompare):
def build_compare(self, compare):
compare.target = etcdrpc.Compare.VERSION
compare.version = int(self.value)
class Create(BaseCompare):
def build_compare(self, compare):
compare.target = etcdrpc.Compare.CREATE
compare.create_revision = int(self.value)
class Mod(BaseCompare):
def build_compare(self, compare):
compare.target = etcdrpc.Compare.MOD
compare.mod_revision = int(self.value)
class Put(object):
def __init__(self, key, value, lease=None, prev_kv=False):
self.key = key
self.value = value
self.lease = lease
self.prev_kv = prev_kv
class Get(object):
def __init__(self, key, range_end=None):
self.key = key
self.range_end = range_end
class Delete(object):
def __init__(self, key, range_end=None, prev_kv=False):
self.key = key
self.range_end = range_end
self.prev_kv = prev_kv
class Txn(object):
def __init__(self, compare, success=None, failure=None):
self.compare = compare
self.success = success
self.failure = failure | PypiClean |
/collective.mathjax-1.1.1.tar.gz/collective.mathjax-1.1.1/collective/mathjax/resources/MathJax/jax/input/MathML/entities/r.js | (function(a){MathJax.Hub.Insert(a.Parse.Entity,{RBarr:"\u2910",REG:"\u00AE",Racute:"\u0154",Rang:"\u27EB",Rarrtl:"\u2916",Rcaron:"\u0158",Rcedil:"\u0156",Rcy:"\u0420",ReverseElement:"\u220B",ReverseUpEquilibrium:"\u296F",Rho:"\u03A1",RightArrowBar:"\u21E5",RightDoubleBracket:"\u27E7",RightDownTeeVector:"\u295D",RightDownVectorBar:"\u2955",RightTeeVector:"\u295B",RightTriangleBar:"\u29D0",RightUpDownVector:"\u294F",RightUpTeeVector:"\u295C",RightUpVectorBar:"\u2954",RightVectorBar:"\u2953",RoundImplies:"\u2970",RuleDelayed:"\u29F4",rAarr:"\u21DB",rArr:"\u21D2",rAtail:"\u291C",rBarr:"\u290F",rHar:"\u2964",race:"\u223D\u0331",racute:"\u0155",radic:"\u221A",raemptyv:"\u29B3",rang:"\u27E9",rangd:"\u2992",range:"\u29A5",rangle:"\u27E9",raquo:"\u00BB",rarr:"\u2192",rarrap:"\u2975",rarrb:"\u21E5",rarrbfs:"\u2920",rarrc:"\u2933",rarrfs:"\u291E",rarrhk:"\u21AA",rarrlp:"\u21AC",rarrpl:"\u2945",rarrsim:"\u2974",rarrw:"\u219D",ratail:"\u291A",ratio:"\u2236",rationals:"\u211A",rbarr:"\u290D",rbbrk:"\u2773",rbrke:"\u298C",rbrksld:"\u298E",rbrkslu:"\u2990",rcaron:"\u0159",rcedil:"\u0157",rceil:"\u2309",rcub:"\u007D",rcy:"\u0440",rdca:"\u2937",rdldhar:"\u2969",rdquo:"\u201D",rdquor:"\u201D",rdsh:"\u21B3",real:"\u211C",realine:"\u211B",realpart:"\u211C",reals:"\u211D",rect:"\u25AD",reg:"\u00AE",rfisht:"\u297D",rfloor:"\u230B",rhard:"\u21C1",rharu:"\u21C0",rharul:"\u296C",rightarrow:"\u2192",rightarrowtail:"\u21A3",rightharpoondown:"\u21C1",rightharpoonup:"\u21C0",rightleftarrows:"\u21C4",rightleftharpoons:"\u21CC",rightsquigarrow:"\u219D",risingdotseq:"\u2253",rlarr:"\u21C4",rlhar:"\u21CC",rlm:"\u200F",rmoustache:"\u23B1",rnmid:"\u2AEE",roang:"\u27ED",roarr:"\u21FE",robrk:"\u27E7",ropar:"\u2986",roplus:"\u2A2E",rotimes:"\u2A35",rpar:"\u0029",rpargt:"\u2994",rppolint:"\u2A12",rrarr:"\u21C9",rsaquo:"\u203A",rsh:"\u21B1",rsqb:"\u005D",rsquo:"\u2019",rsquor:"\u2019",rthree:"\u22CC",rtrie:"\u22B5",rtrif:"\u25B8",rtriltri:"\u29CE",ruluhar:"\u2968",rx:"\u211E"});MathJax.Ajax.loadComplete(a.entityDir+"/r.js")})(MathJax.InputJax.MathML); | PypiClean |
/m2bk-0.4.0.tar.gz/m2bk-0.4.0/README.rst | m2bk
====
.. image:: https://travis-ci.org/axltxl/m2bk.svg?branch=develop
:target: https://travis-ci.org/axltxl/m2bk
.. image:: https://badge.fury.io/py/m2bk.svg
:target: http://badge.fury.io/py/m2bk
.. image:: https://img.shields.io/gratipay/axltxl.svg
:target: https://gratipay.com/axltxl
Send your mongodump backups straight to AWS S3
----------------------------------------------
*m2bk* is command line tool that performs a number of
**mongodb database backups via mongodump**, compresses them into a
gzipped tarball and finally sends them to an **AWS S3 bucket**
(more options are about to be available).
.. image:: http://i.imgur.com/PxqbEPA.gif
- `Requirements <#requirements>`_
- `Contributing <#contributing>`_
- `Installation <#installation>`_
- `Basic usage <#basic-usage>`_
- `Options <#options>`_
- `Configuration file <#configuration-file>`_
- `Sections and directives <#configuration-file-sections-and-directives>`_
- `fs section <#fs-section>`_
- `mongodb section <#mongodb-section>`_
- `mongodb.host_defaults section <#mongodbhost_defaults-section>`_
- `mongodb.hosts section <#mongodbhosts-section>`_
- `Drivers (driver section) <#drivers-driver-section>`_
- `dummy <#dummy>`_
- `s3 <#s3>`_
- `Donating <#donating>`_
- `Copyright and licensing <#copyright-and-licensing>`_
Requirements
============
- `python <http://python.org>`_ >= 3.3
- `boto <http://docs.pythonboto.org/en/latest/>`_ >= 2.33
- `envoy <https://pypi.python.org/pypi/envoy>`_ >= 0.0.3
- `pyyaml <http://pyyaml.org>`_ >= 3.11
- `mongodb <http://www.mongodb.org>`_ >= 2.4
- `clint <https://github.com/kennethreitz/clint>`_ >= 0.4.1
Contributing
============
There are many ways in which you can contribute to m2bk.
Code patches are just one thing amongst others that you can submit to help the project.
We also welcome feedback, bug reports, feature requests, documentation improvements,
advertisement and testing.
Feedback contributions
----------------------
This is by far the easiest way to contribute something.
If you’re using m2bk for your own benefit, don’t hesitate sharing.
Feel free to `submit issues and enhancement requests. <https://github.com/axltxl/m2bk/issues>`_
Code contributions
------------------
Code contributions (patches, new features) are the most obvious way to help with the project’s development.
Since this is so common we ask you to follow our workflow to most efficiently work with us.
For code contributions, we follow the "fork-and-pull" Git workflow.
1. Fork, then clone your repo on GitHub
::
git clone [email protected]:your-username/m2bk.git
git add origin upstream https://github.com/axltxl/m2bk.git
If you already forked the repo, then be sure to merge
the most recent changes from "upstream" before making a pull request.
::
git pull upstream
2. Create a new feature branch in your local repo
::
git checkout -b my_feature_branch
3. Make your changes, then make sure the tests passes
::
pyvenv m2bk-pyve && source m2bk-pyve/bin/activate
python3 setup.py test
4. Commit your changes once done
::
git commit -a -m "My commit message"
git push origin my_feature_branch
5. Submit a `pull request <https://github.com/axltxl/m2bk/compare/>`_ with your feature branch containing your changes.
Installation
============
Installation of m2bk can be made directly from source, via `pip <https://github.com/pypa/pip>`_ or
`easy_install <http://pythonhosted.org/setuptools/easy_install.html>`_, whichever you prefer.
Option # 1: pip
---------------
::
$ pip install m2bk
Option # 2: from source
-----------------------
::
$ git clone [email protected]:axltxl/m2bk.git
$ cd m2bk
$ python3 setup.py install
Option # 3: easy_install
------------------------
::
$ easy_install m2bk
From this point you can edit your `configuration file <#configuration-file>`_
::
$ vi /etc/m2bk/m2bk.yaml
Basic Usage
===========
Normal execution
::
$ m2bk
Quiet output
::
$ m2bk -q
Dry run
::
$ m2bk -d
Specify an alternate configuration file
::
$ m2bk -c /path/to/my/custom/m2bk.yaml
Options
=======
::
m2bk [options]
- ``--version`` show version number and exit
- ``-h | --help`` show a help message and exit
- ``-c [file] | --config=[file] | --config [file]`` specify configuration file to use
- ``-d | --dry-run`` don't actually do anything
- ``-q | --quiet`` quiet output
- ``--ll | --log-level=[num]`` set logging output level
- ``-l LOG_FILE | --log-file LOG_FILE set log file``
Configuration file
------------------
The configuration is handled through a simple `YAML <http://yaml.org/>`_
file including a series of *sections* (which are YAML objects), each one
composed by *directives* (YAML numbers, strings or arrays), these will
determine a corresponding behavior on **m2bk**. If **m2bk** does not receive
any configuration file on command line, it will try to read ``/etc/m2bk.yaml``.
**Please note the configuration format is still a work in progress and will most likely change in the early stages of m2bk.**
The following is an example of what a configuration file looks like:
::
---
driver:
name: s3
options:
aws_access_key_id: "SDF73HSDF3663KSKDJ"
aws_secret_access_key: "d577273ff885c3f84dadb8578bb41399"
fs:
output_dir: "/opt/tmp/mydir"
mongodb:
mongodump: "/opt/bin/mongodump"
host_defaults:
port: 666
user_name: "satan"
password: "14mh4x0r"
hosts:
foo:
address: "foo.example.local"
port: 34127
dbs:
- "app"
- "sessions"
- "another_one"
bar:
address: "bar.example.com"
password: "1AmAn07h3rh4x0r"
auth_db: bar
dbs:
- customers
- sessions
Through this configuration file, you can set key variables about the
databases you want to backup and the AWS S3 bucket you wish to send them
to.
Configuration file: sections and directives
-------------------------------------------
``fs`` section
^^^^^^^^^^^^^^
This section has directives regarding files and directories manipulation
Directives
^^^^^^^^^^
``fs.output_dir``
"""""""""""""""""
- Type: **string**
- Default value : ``/tmp/m2bk``
- Role: directory where m2bk is going to temporarily save backup files
``mongodb`` section
^^^^^^^^^^^^^^^^^^^
This section holds directives regarding mongodb servers **m2bk** is going
to connect to, including databases that are going to be backed up through *mongodump*.
**Example**:
::
mongodb:
mongodump: "/opt/bin/mongodump"
host_defaults:
user_name: tom
address: db.example.local
password: "457893mnfs3j"
dbs:
- halloran
- grady
hosts:
foo:
address: db0.example.internal
port: 27654
user_name: matt
password: "myS3cr37P455w0rd"
dbs:
# This list is going to be merged with dbs at host_defaults, thus
# the resulting dbs will be:
# ['halloran', 'grady', 'jack', 'wendy', 'danny']
- jack
- wendy
- danny
bar: {} # This one is going to acquire all host_defaults values
host_with_mixed_values:
# This host will inherit port, password and dbs from host_defaults
address: moloko.example.internal
user_name: alex
address: localhost
auth_db: milk_plus
Directives
^^^^^^^^^^
``mongodb.mongodump``
"""""""""""""""""""""
- Type: **string**
- Default value : ``mongodump``
- Role: full path to the ``mongodump`` executable used by m2bk
``mongodb.host_defaults`` section
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Many directives (such as user name and/or password) could be common
among the databases that are going to be backed up. For this reason, it
is best to simply put those common directives under a single section,
this is entirely optional but also it is the best for easily manageable
configuration files in order to avoid redundancy, the supported
directives are ``user_name``, ``password``, ``port``, ``dbs`` and ``auth_db`` .
See ``hosts`` section.
``mongodb.hosts`` section
^^^^^^^^^^^^^^^^^^^^^^^^^
This is an object/hash, where each element contains a series of
directives relative to a mongodb database located at a server, its
specifications and databases themselves held by it, these are
the main values used by ``mongodump`` when it does its magic. For each
entry inside the ``hosts`` section, these are its valid directives:
Directives
^^^^^^^^^^
``mongodb.hosts.*.address``
"""""""""""""""""""""""""""
- Type: **string**
- Required: YES
- Role: mongodb server location
``mongodb.hosts.*.port``
""""""""""""""""""""""""
- Type: **integer**
- Required: NO
- Default value : ``mongo.host_defaults.port | 27017``
- Role: mongodb server listening port
``mongodb.hosts.*.user_name``
"""""""""""""""""""""""""""""
- Type: **string**
- Required: NO
- Default value : ``mongodb.host_defaults.user_name | m2bk``
- Role: user name used for authentication against the mongodb server
``mongodb.hosts.*.password``
""""""""""""""""""""""""""""
- Type: **string**
- Required: NO
- Default value : ``mongodb.host_defaults.pass | "pass"``
- Role: password used for authentication against the mongodb server
``mongodb.hosts.*.auth_db``
"""""""""""""""""""""""""""
- Type: **string**
- Required: NO
- Default value : ``admin``
- Role: authentication database
``mongodb.hosts.*.dbs``
"""""""""""""""""""""""
- Type: **array**
- Required: NO
- Default value : ``mongodb.host_defaults.dbs | []``
- Role: a list of databases who are expected inside the mongodb server
**NOTE: particular "dbs" on one host will be merged with those of "host_defaults"**
Drivers (``driver`` section)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Once backup files have been generated, they are then handled by a driver, whose
job is to transfer resulting backup files to some form of storage (depending
on the driver set on configuration). Drivers (and their options) are
set and configured inside the ``driver`` section like so:
::
driver:
# First of all, you need to tell m2bk which driver to use
name: dummy
# Inside this key, driver options are set
options:
hello: world
another_option: another_value
Per driver, there are a bunch of available ``options`` to tweak them.
These options vary among drivers. Though there is only one driver available on
m2bk, there will be more drivers available with new releases. Current available
drivers are the following:
``dummy``
^^^^^^^^^
This driver is just a placeholder for testing out the driver interface as
it won't do a thing on backup files.
Options
^^^^^^^
There are no options for this driver. Any option passed to this driver
will be logged at debug level.
``s3``
^^^^^^
This driver holds directives regarding AWS credentials that **m2bk**
is going to use in order to upload the *mongodump* backups to S3.
If either ``aws_access_key_id`` or ``aws_secret_access_key`` are not specified,
this driver will not try to use them to authenticate against AWS and will rely
on `boto config <http://boto.readthedocs.org/en/latest/boto_config_tut.html>`_ for that matter.
**Example**:
::
driver:
name: s3
options:
aws_access_key_id": "HAS6NBASD8787SD"
aws_secret_access_key: "d41d8cd98f00b204e9800998ecf8427e"
s3_bucket: "mybucket"
Options
^^^^^^^
``aws_access_key_id``
"""""""""""""""""""""
- Type: **string**
- Required: NO
- Role: AWS access key ID
``aws_secret_access_key``
"""""""""""""""""""""""""
- Type: **string**
- Required: NO
- Role: AWS access key ID
``s3_bucket``
"""""""""""""
- Type: **string**
- Required: NO
- Default value: ``m2bk``
- Role: name of the main S3 bucket where m2bk is going to upload the compressed backups for each mongodb server specified in ``mongodb`` section
Donating
========
Show your love and support this project via `gratipay <https://gratipay.com/axltxl>`_
.. image:: https://cdn.rawgit.com/gratipay/gratipay-badge/2.3.0/dist/gratipay.png
:target: https://gratipay.com/axltxl
Copyright and Licensing
=======================
Copyright (c) Alejandro Ricoveri
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
| PypiClean |
/ob_dj_factorial-0.0.2-py3-none-any.whl/ob_dj_factorial/core/factorial/views.py | import logging
import requests
from celery import current_app
from django.conf import settings
from django.contrib.sites.models import Site
from django.http import Http404, HttpRequest, HttpResponse
from django.shortcuts import redirect
from django.urls import reverse
from django.views import View
from ob_dj_factorial.core.factorial.models import FHOAuth
logger = logging.getLogger(__name__)
class FactorialOAuthView(View):
permissions: tuple = ("factorial.add_oauth",)
def get(self, request: HttpRequest, *args, **kwargs) -> HttpResponse:
if "code" not in request.GET:
raise Http404
code = request.GET.get("code")
site = Site.objects.get_current()
response = requests.post(
url=f"{settings.FH_API_BASE_URL}/oauth/token",
data={
"client_id": settings.FH_CLIENT_ID,
"client_secret": settings.FH_CLIENT_SECRET,
"redirect_uri": settings.FH_REDIRECT_URI,
"grant_type": "authorization_code",
"code": code,
},
)
logger.debug(f"{response.content}")
response.raise_for_status()
logger.debug(
f"{self.__class__.__name__}() Response "
f"<url:{response.url}, "
f"status_code:{response.status_code}>"
)
_r = response.json()
try:
oa = FHOAuth.objects.get(site=site)
oa.access_token = _r.get("access_token")
oa.refresh_token = _r.get("refresh_token")
oa.expires_in = _r.get("expires_in")
oa.save()
except FHOAuth.DoesNotExist:
oa = FHOAuth.objects.create(
site=site,
access_token=_r.get("access_token"),
refresh_token=_r.get("refresh_token"),
expires_in=_r.get("expires_in"),
)
# TODO: Post message in sessions to show success in admin
# messages https://docs.djangoproject.com/en/3.1/ref/contrib/messages/
return redirect(reverse("admin:factorial_fhoauth_change", args=[oa.id,]))
class FactorialAllView(View):
permissions: tuple = ("integrations.add_oauth",)
def get(self, request: HttpRequest, *args, **kwargs) -> HttpResponse:
site = Site.objects.get_current()
instance = FHOAuth.objects.get(site=site)
current_app.send_task("ob_dj_factorial.core.factorial.tasks.sync_all_objects",)
return redirect(reverse("admin:factorial_fhoauth_change", args=[instance.id,])) | PypiClean |
/mmcif-0.69.tar.gz/mmcif-0.69/modules/pybind11/docs/advanced/cast/stl.rst | STL containers
##############
Automatic conversion
====================
When including the additional header file :file:`pybind11/stl.h`, conversions
between ``std::vector<>``/``std::deque<>``/``std::list<>``/``std::array<>``/``std::valarray<>``,
``std::set<>``/``std::unordered_set<>``, and
``std::map<>``/``std::unordered_map<>`` and the Python ``list``, ``set`` and
``dict`` data structures are automatically enabled. The types ``std::pair<>``
and ``std::tuple<>`` are already supported out of the box with just the core
:file:`pybind11/pybind11.h` header.
The major downside of these implicit conversions is that containers must be
converted (i.e. copied) on every Python->C++ and C++->Python transition, which
can have implications on the program semantics and performance. Please read the
next sections for more details and alternative approaches that avoid this.
.. note::
Arbitrary nesting of any of these types is possible.
.. seealso::
The file :file:`tests/test_stl.cpp` contains a complete
example that demonstrates how to pass STL data types in more detail.
.. _cpp17_container_casters:
C++17 library containers
========================
The :file:`pybind11/stl.h` header also includes support for ``std::optional<>``
and ``std::variant<>``. These require a C++17 compiler and standard library.
In C++14 mode, ``std::experimental::optional<>`` is supported if available.
Various versions of these containers also exist for C++11 (e.g. in Boost).
pybind11 provides an easy way to specialize the ``type_caster`` for such
types:
.. code-block:: cpp
// `boost::optional` as an example -- can be any `std::optional`-like container
namespace pybind11 { namespace detail {
template <typename T>
struct type_caster<boost::optional<T>> : optional_caster<boost::optional<T>> {};
}}
The above should be placed in a header file and included in all translation units
where automatic conversion is needed. Similarly, a specialization can be provided
for custom variant types:
.. code-block:: cpp
// `boost::variant` as an example -- can be any `std::variant`-like container
namespace pybind11 { namespace detail {
template <typename... Ts>
struct type_caster<boost::variant<Ts...>> : variant_caster<boost::variant<Ts...>> {};
// Specifies the function used to visit the variant -- `apply_visitor` instead of `visit`
template <>
struct visit_helper<boost::variant> {
template <typename... Args>
static auto call(Args &&...args) -> decltype(boost::apply_visitor(args...)) {
return boost::apply_visitor(args...);
}
};
}} // namespace pybind11::detail
The ``visit_helper`` specialization is not required if your ``name::variant`` provides
a ``name::visit()`` function. For any other function name, the specialization must be
included to tell pybind11 how to visit the variant.
.. warning::
When converting a ``variant`` type, pybind11 follows the same rules as when
determining which function overload to call (:ref:`overload_resolution`), and
so the same caveats hold. In particular, the order in which the ``variant``'s
alternatives are listed is important, since pybind11 will try conversions in
this order. This means that, for example, when converting ``variant<int, bool>``,
the ``bool`` variant will never be selected, as any Python ``bool`` is already
an ``int`` and is convertible to a C++ ``int``. Changing the order of alternatives
(and using ``variant<bool, int>``, in this example) provides a solution.
.. note::
pybind11 only supports the modern implementation of ``boost::variant``
which makes use of variadic templates. This requires Boost 1.56 or newer.
Additionally, on Windows, MSVC 2017 is required because ``boost::variant``
falls back to the old non-variadic implementation on MSVC 2015.
.. _opaque:
Making opaque types
===================
pybind11 heavily relies on a template matching mechanism to convert parameters
and return values that are constructed from STL data types such as vectors,
linked lists, hash tables, etc. This even works in a recursive manner, for
instance to deal with lists of hash maps of pairs of elementary and custom
types, etc.
However, a fundamental limitation of this approach is that internal conversions
between Python and C++ types involve a copy operation that prevents
pass-by-reference semantics. What does this mean?
Suppose we bind the following function
.. code-block:: cpp
void append_1(std::vector<int> &v) {
v.push_back(1);
}
and call it from Python, the following happens:
.. code-block:: pycon
>>> v = [5, 6]
>>> append_1(v)
>>> print(v)
[5, 6]
As you can see, when passing STL data structures by reference, modifications
are not propagated back the Python side. A similar situation arises when
exposing STL data structures using the ``def_readwrite`` or ``def_readonly``
functions:
.. code-block:: cpp
/* ... definition ... */
class MyClass {
std::vector<int> contents;
};
/* ... binding code ... */
py::class_<MyClass>(m, "MyClass")
.def(py::init<>())
.def_readwrite("contents", &MyClass::contents);
In this case, properties can be read and written in their entirety. However, an
``append`` operation involving such a list type has no effect:
.. code-block:: pycon
>>> m = MyClass()
>>> m.contents = [5, 6]
>>> print(m.contents)
[5, 6]
>>> m.contents.append(7)
>>> print(m.contents)
[5, 6]
Finally, the involved copy operations can be costly when dealing with very
large lists. To deal with all of the above situations, pybind11 provides a
macro named ``PYBIND11_MAKE_OPAQUE(T)`` that disables the template-based
conversion machinery of types, thus rendering them *opaque*. The contents of
opaque objects are never inspected or extracted, hence they *can* be passed by
reference. For instance, to turn ``std::vector<int>`` into an opaque type, add
the declaration
.. code-block:: cpp
PYBIND11_MAKE_OPAQUE(std::vector<int>);
before any binding code (e.g. invocations to ``class_::def()``, etc.). This
macro must be specified at the top level (and outside of any namespaces), since
it adds a template instantiation of ``type_caster``. If your binding code consists of
multiple compilation units, it must be present in every file (typically via a
common header) preceding any usage of ``std::vector<int>``. Opaque types must
also have a corresponding ``class_`` declaration to associate them with a name
in Python, and to define a set of available operations, e.g.:
.. code-block:: cpp
py::class_<std::vector<int>>(m, "IntVector")
.def(py::init<>())
.def("clear", &std::vector<int>::clear)
.def("pop_back", &std::vector<int>::pop_back)
.def("__len__", [](const std::vector<int> &v) { return v.size(); })
.def("__iter__", [](std::vector<int> &v) {
return py::make_iterator(v.begin(), v.end());
}, py::keep_alive<0, 1>()) /* Keep vector alive while iterator is used */
// ....
.. seealso::
The file :file:`tests/test_opaque_types.cpp` contains a complete
example that demonstrates how to create and expose opaque types using
pybind11 in more detail.
.. _stl_bind:
Binding STL containers
======================
The ability to expose STL containers as native Python objects is a fairly
common request, hence pybind11 also provides an optional header file named
:file:`pybind11/stl_bind.h` that does exactly this. The mapped containers try
to match the behavior of their native Python counterparts as much as possible.
The following example showcases usage of :file:`pybind11/stl_bind.h`:
.. code-block:: cpp
// Don't forget this
#include <pybind11/stl_bind.h>
PYBIND11_MAKE_OPAQUE(std::vector<int>);
PYBIND11_MAKE_OPAQUE(std::map<std::string, double>);
// ...
// later in binding code:
py::bind_vector<std::vector<int>>(m, "VectorInt");
py::bind_map<std::map<std::string, double>>(m, "MapStringDouble");
When binding STL containers pybind11 considers the types of the container's
elements to decide whether the container should be confined to the local module
(via the :ref:`module_local` feature). If the container element types are
anything other than already-bound custom types bound without
``py::module_local()`` the container binding will have ``py::module_local()``
applied. This includes converting types such as numeric types, strings, Eigen
types; and types that have not yet been bound at the time of the stl container
binding. This module-local binding is designed to avoid potential conflicts
between module bindings (for example, from two separate modules each attempting
to bind ``std::vector<int>`` as a python type).
It is possible to override this behavior to force a definition to be either
module-local or global. To do so, you can pass the attributes
``py::module_local()`` (to make the binding module-local) or
``py::module_local(false)`` (to make the binding global) into the
``py::bind_vector`` or ``py::bind_map`` arguments:
.. code-block:: cpp
py::bind_vector<std::vector<int>>(m, "VectorInt", py::module_local(false));
Note, however, that such a global binding would make it impossible to load this
module at the same time as any other pybind module that also attempts to bind
the same container type (``std::vector<int>`` in the above example).
See :ref:`module_local` for more details on module-local bindings.
.. seealso::
The file :file:`tests/test_stl_binders.cpp` shows how to use the
convenience STL container wrappers.
| PypiClean |
/cloud_asset-0.0.4.9-py3-none-any.whl/asset/base.py |
# Copyright The Cloud Asset Authors.
# SPDX-License-Identifier: Apache-2.0
import abc
import copy
import logging
import datetime
from typing import List
from sqlalchemy import create_engine, Table, UniqueConstraint
from boto3 import Session
from aliyunsdkcore.client import AcsClient
from asset.asset_table import AssetTable
from asset.schema import DbConfig, AssetColumn, STSAssumeRoleCredential, RamRoleArnCredential, AwsCredential,\
TencentProfile, AliyunProfile, AwsProfile, HuaweiProfile, HuaweiCredential
from asset.utils import to_hump_underline, get_tencent_account_id, get_aliyun_account_id, get_aws_account_id, \
tencent_parser_response, aliyun_parser_response, aws_parser_response, recursive_list, aws_assume_role, \
get_huawei_account_id, huawei_parser_response, retry
from huaweicloudsdkcore.auth.credentials import BasicCredentials
class Describe:
def parser_response(self):
raise NotImplementedError("")
def describe(self):
raise NotImplementedError("")
class DescribeTencent(Describe):
def __init__(
self,
client,
des_request_func: str,
des_request,
response_filed: str,
parser_response: callable = tencent_parser_response
):
self.client = client
self.des_request_func = des_request_func
self.des_request = des_request
self.response_field = response_filed
self.parser_response_func = parser_response
@retry(3)
def parser_response(self):
return self.parser_response_func(self.describe(), self.response_field)
def describe(self):
return getattr(self.client, self.des_request_func)(self.des_request)
class DescribeAliyun(Describe):
def __init__(
self,
client,
des_request,
response_filed: str,
child_response_filed: str = None,
parser_response_func: callable = aliyun_parser_response
):
self.client = client
self.des_request = des_request
self.response_filed = response_filed
self.child_response_filed = child_response_filed
self.parser_response_func = parser_response_func
@retry(5)
def parser_response(self):
return self.parser_response_func(self.describe(), self.response_filed, self.child_response_filed)
def describe(self):
return getattr(self.client, 'do_action_with_exception')(self.des_request)
class DescribeAws(Describe):
def __init__(
self,
client,
des_request: str,
response_field: str,
child_response_filed: str = None,
des_request_kwargs: dict = None,
parser_response_func: callable = aws_parser_response
):
self.client = client
self.des_request = des_request
self.des_request_kwargs = dict() if des_request_kwargs is None else des_request_kwargs
self.response_field = response_field
self.child_response_filed = child_response_filed
self.parser_response_func = parser_response_func
@retry(3)
def parser_response(self):
return self.parser_response_func(self.describe(), self.response_field, self.child_response_filed)
def describe(self):
return getattr(self.client, self.des_request)(**self.des_request_kwargs)
class DescribeHuawei(Describe):
def __init__(
self,
client: object,
des: str,
des_request: object,
response_field: str,
parser_response_func: callable = huawei_parser_response
):
self.client = client
self.des = des
self.des_request = des_request
self.response_field = response_field
self.parser_response_func = parser_response_func
@retry(3)
def parser_response(self):
return self.parser_response_func(self.describe(), self.response_field)
def describe(self):
return getattr(self.client, self.des)(self.des_request).to_json_object()
class Asset(metaclass=abc.ABCMeta):
logger = logging.getLogger('cloud-asset-fetch')
_platform: str = ''
_table_name: str = None
_asset_columns: List[AssetColumn] = None
_is_hump_underline: bool = True
_table_args: tuple = tuple()
_table_kwargs: dict = None
_default_columns = [
AssetColumn(name='account_id', type='str', len=128, kwargs={'nullable': False, 'default': ''}),
AssetColumn(name='region', type='str', len=128, kwargs={'nullable': False, 'default': ''}),
AssetColumn(
name='record_date', type='date',
kwargs={'nullable': False, 'default': datetime.datetime.now, 'onupdate': datetime.datetime.now}
)
]
_field_document: str = ''
def __init__(
self, cred, region=None, dbconfig: DbConfig = None, parser_response: callable = None
):
self.cred = cred
self.region = region
self.dbconfig = dbconfig
self._engine = create_engine(
"postgresql://{user}:{password}@{host}:{port}/{database}".format(**self.dbconfig.dict())
)
if not self._table_name.startswith(f'{self._platform}_'):
self._table_name = f'{self._platform}_{self._table_name}'
self.asset_table = AssetTable(
self._table_name, self._engine, self._asset_columns,
default_columns=self._default_columns,
is_hump_underline=self._is_hump_underline,
args=self._table_args,
kwargs=self._table_kwargs
)
self.parser_response = parser_response
self._account_id = self._get_account_id()
for default_column in self._default_columns:
if default_column.name == 'account_id':
default_column.kwargs['default'] = self.account_id
if default_column.name == 'region':
default_column.kwargs['default'] = self.region
@property
def engine(self):
return self._engine
@property
def table(self) -> Table:
return self.asset_table.table
@property
def assets(self) -> list:
return self._get_assets()
@property
def paginate_all_assets(self) -> list:
return self._paginate_all_assets()
@property
def client(self):
return self._get_client()
@property
def account_id(self):
return self._account_id
def _paginate_all_assets(self) -> list:
raise NotImplementedError("")
def _get_assets(self) -> list:
raise NotImplementedError("")
def _get_client(self):
raise NotImplementedError("")
def _get_account_id(self):
raise NotImplementedError("")
def fetch(self):
if self._is_hump_underline:
paginate_all_assets = self.assets_to_hump_underline(self.paginate_all_assets, self.asset_table.columns)
else:
paginate_all_assets = self.paginate_all_assets
# all_assets datetime, bool, ... to str
paginate_all_assets = recursive_list(paginate_all_assets)
if paginate_all_assets:
uc = [_ for _ in self._table_args if isinstance(_, UniqueConstraint)]
if uc:
AssetTable.insert_values_duplicat_do_nothing(self.table, paginate_all_assets, self.engine, uc[0].name)
else:
AssetTable.insert_values(self.table, paginate_all_assets, self.engine)
return True
@classmethod
def assets_to_hump_underline(cls, assets: List[dict], asset_columns: List[AssetColumn]) -> List[dict]:
_assets, asset_columns = [], [asset_column.name for asset_column in asset_columns]
for asset in assets:
_ = copy.deepcopy(asset)
_asset = {}
_asset_keys = {to_hump_underline(key): key for key in _.keys()}
for asset_column in asset_columns:
if asset_column not in _asset_keys:
if asset_column in ('account_id', 'record_date', 'region'):
continue
_asset.update({asset_column: None})
else:
_asset[asset_column] = asset.pop(_asset_keys[asset_column])
_assets.append(_asset)
return _assets
@classmethod
def load_creds(cls, profile):
raise NotImplementedError("")
class TencentAsset(Asset):
_platform = 'tencent'
_describe = DescribeTencent
_des_request_func: str = ''
_des_request: object = None
_response_field: str = ''
_paginate: bool = True
_paginate_type = 'int' # int or str
def __init__(
self,
cred: STSAssumeRoleCredential,
region=None,
dbconfig: DbConfig = None,
parser_response: callable = tencent_parser_response
):
super(TencentAsset, self).__init__(cred, region=region, dbconfig=dbconfig, parser_response=parser_response)
self.asset_describe = self._describe(
self.client, self._des_request_func, self._des_request, self._response_field)
def _get_assets(self):
return self.asset_describe.parser_response()
def _paginate_all_assets(self):
page, assets = 0, []
_des_request = copy.deepcopy(self._des_request)
if self._paginate:
_des_request.Limit = 50 if self._paginate_type == 'int' else '50'
while True:
response = self._describe(
self.client, self._des_request_func, _des_request, self._response_field).parser_response()
if not response:
break
assets += response
page = 1
if self._paginate:
if isinstance(_des_request.Limit, str):
_des_request.Offset = str(page * int(_des_request.Limit))
else:
_des_request.Offset = page * _des_request.Limit
else:
break
return assets
def _get_client(self):
pass
def _get_account_id(self):
return get_tencent_account_id(self.cred)
@classmethod
def load_creds(cls, profile: TencentProfile) -> List[STSAssumeRoleCredential]:
return [
STSAssumeRoleCredential(
profile.ak,
profile.sk,
role.arn,
role.session_name,
duration_seconds=role.duration_seconds
) for role in profile.roles
]
class AliyunAsset(Asset):
_platform = 'aliyun'
_des_request: callable = None
_response_field = ''
_child_response_filed = None
_describe = DescribeAliyun
_paginate = True
def __init__(
self,
cred: RamRoleArnCredential,
region: str = None,
dbconfig: DbConfig = None,
parser_response: callable = aliyun_parser_response
):
super(AliyunAsset, self).__init__(cred, region=region, dbconfig=dbconfig, parser_response=parser_response)
self._des_request = self._des_request()
self._des_request.set_accept_format('json')
"""
client,
des_request,
response_filed: str,
parser_response_func: callable = aliyun_parser_response
"""
self.asset_describe = self._describe(
self.client,
self._des_request,
self._response_field,
self._child_response_filed,
self.parser_response
)
def _paginate_all_assets(self):
page, page_size, assets = 0, 50, []
if self._paginate:
self._des_request.set_PageSize(page_size)
while True:
response = self._describe(
self.client,
self._des_request,
self._response_field,
self._child_response_filed,
self.parser_response
).parser_response()
if not response:
break
assets += response
page += 1
if self._paginate:
self._des_request.set_PageNumber(page*page_size)
else:
break
return assets
def _get_client(self):
return AcsClient(credential=self.cred, region_id=self.region)
def _get_assets(self):
return self.asset_describe.parser_response()
def _get_account_id(self):
return get_aliyun_account_id(cred=self.cred)
@classmethod
def load_creds(cls, profile: AliyunProfile) -> List[RamRoleArnCredential]:
return [
RamRoleArnCredential(
profile.ak,
profile.sk,
role.arn,
role.session_name
) for role in profile.roles
]
class AwsAsset(Asset):
_platform = 'aws'
_client_name: str = ''
_des_request: str = ''
_response_field: str = ''
_child_response_filed: str = None
_des_request_kwargs: dict = {'MaxResults': 50}
_next_type = 'NextToken'
_describe = DescribeAws
def __init__(
self,
cred: AwsCredential,
region=None,
dbconfig: DbConfig = None,
parser_response: callable = aws_parser_response
):
super(AwsAsset, self).__init__(cred, region=region, dbconfig=dbconfig, parser_response=parser_response)
self.asset_describe = self._describe(
self.client,
self._des_request,
self._response_field,
des_request_kwargs=self._des_request_kwargs,
parser_response_func=parser_response
)
def _paginate_all_assets(self) -> list:
assets = []
_des_request_kwargs = copy.deepcopy(self._des_request_kwargs)
while True:
_assets, next_token = self._describe(
self.client,
self._des_request,
self._response_field,
self._child_response_filed,
des_request_kwargs=_des_request_kwargs,
parser_response_func=self.parser_response
).parser_response()
assets += _assets
if next_token is None:
break
_des_request_kwargs.update({self._next_type: next_token})
return assets
def _get_client(self):
"""由子类实现"""
return Session(**self.cred.dict()).client(self._client_name, region_name=self.region)
def _get_assets(self) -> list:
assets, _ = self.asset_describe.parser_response()
return assets
def _get_account_id(self):
return get_aws_account_id(self.cred)
@classmethod
def load_creds(cls, profile: AwsProfile) -> List[AwsCredential]:
creds = []
for role in profile.roles:
try:
creds.append(
aws_assume_role(role.arn, role_session_name=role.session_name, duration_seconds=role.duration_seconds)
)
except Exception as e:
cls.logger.error(f'aws_assume_role fail, error: {e} ')
continue
return creds
class HuaweiAsset(Asset):
_platform = 'huawei'
_client = None
_region_obj = None
_describe = DescribeHuawei
_des: str = ''
_request_obj = None
_request_pars: dict = {'limit': 50, 'offset': 1}
_response_field = ''
_offset = True
def __init__(
self,
cred: HuaweiCredential,
region=None, dbconfig: DbConfig = None,
parser_response=huawei_parser_response
):
super(HuaweiAsset, self).__init__(cred, region=region, dbconfig=dbconfig, parser_response=parser_response)
@property
def credential(self):
return BasicCredentials(self.cred.ak, self.cred.sk)
def _paginate_all_assets(self) -> list:
assets = []
_des_request_kwargs = self._request_obj(**self._request_pars)
while True:
response = self._describe(
self.client,
self._des,
_des_request_kwargs,
self._response_field,
self.parser_response
).parser_response()
assets += response
if not response:
break
if self._offset:
_des_request_kwargs.offset += 1
else:
if _des_request_kwargs.limit:
if len(response) < _des_request_kwargs.limit:
break
else:
break
return assets
def _get_assets(self) -> list:
pass
def _get_account_id(self):
return get_huawei_account_id(self.credential)
def _get_client(self):
region = self._region_obj.value_of(self.region)
return getattr(
getattr(self._client.new_builder(), 'with_credentials')(self.credential),
'with_region'
)(region).build()
@classmethod
def load_creds(cls, profile: HuaweiProfile) -> List[HuaweiCredential]:
return profile.credentials | PypiClean |
/sphinxcontrib-yamcs-1.2.8.tar.gz/sphinxcontrib-yamcs-1.2.8/README.rst | sphinxcontrib-yamcs
===================
This repository holds the sources for the PyPI package sphinxcontrib-yamcs. This package
includes a number of Sphinx directives and other extensions that are used during document
generation of multiple Yamcs projects.
To use this package, include the following in your Sphinx ``conf.py``:
.. code-block:: python
extensions = [
"sphinxcontrib.yamcs",
]
See source code for available directives. There are also a few configuration options:
yamcs_api_protobin
Path to a \*.protobin file. If present this plugin will autogenerate pages based on contained GPB services.
yamcs_api_destdir
Path where autogenerated files are generated (applies only when a protobin file was configured). Defaults to ``http-api``.
yamcs_api_title
Title of the document that contains links to generated API docs (applies only when a protobin file was configured). Defaults to ``HTTP API``.
yamcs_api_additional_docs
Additional non-autogenerated files to be included in the TOC. (applies only when a protobin file was configured). Defaults to ``[]``.
| PypiClean |
/spotify-ripper-fix-2.11.tar.gz/spotify-ripper-fix-2.11/spotify_ripper/eventloop.py |
# From PySpotify's EventLoop
# https://github.com/mopidy/pyspotify/blob/v2.x/master/spotify/eventloop.py
from __future__ import unicode_literals
from colorama import Fore
import threading
try:
# Python 3
import queue
except ImportError:
# Python 2
import Queue as queue
import spotify
__all__ = [
'EventLoop',
]
class EventLoop(threading.Thread):
"""Event loop for automatically processing events from libspotify.
The event loop is a :class:`~threading.Thread` that listens to
:attr:`~spotify.SessionEvent.NOTIFY_MAIN_THREAD` events and calls
:meth:`~spotify.Session.process_events` when needed.
To use it, pass it your :class:`~spotify.Session` instance and call
:meth:`start`::
>>> session = spotify.Session()
>>> event_loop = EventLoop(session)
>>> event_loop.start()
.. warning::
If you use :class:`EventLoop` to process the libspotify events, any
event listeners you've registered will be called from the event loop
thread. pyspotify itself is thread safe, but you'll need to ensure that
you have proper synchronization in your own application code, as always
when working with threads.
"""
name = 'SpotifyEventLoop'
def __init__(self, session, timeout, ripper):
threading.Thread.__init__(self)
self._session = session
self._runnable = True
self._queue_timeout = timeout * 1000
self._queue = queue.Queue()
self._ripper = ripper
def start(self):
"""Start the event loop."""
self._session.on(
spotify.SessionEvent.NOTIFY_MAIN_THREAD,
self._on_notify_main_thread)
threading.Thread.start(self)
def stop(self):
"""Stop the event loop."""
self._runnable = False
self._session.off(
spotify.SessionEvent.NOTIFY_MAIN_THREAD,
self._on_notify_main_thread)
def run(self):
timeout_countdown = self._session.process_events()
while self._runnable and self._ripper.isAlive():
timeout = min(timeout_countdown, self._queue_timeout)
try:
self._queue.get(timeout=(timeout / 1000.0))
except queue.Empty:
# queue timeout
timeout_countdown -= timeout
else:
# notification
timeout_countdown = 0
finally:
if timeout_countdown <= 0:
timeout_countdown = self._session.process_events()
def _on_notify_main_thread(self, session):
# WARNING: This event listener is called from an internal libspotify
# thread. It must not block.
try:
self._queue.put_nowait(1)
except queue.Full:
print(Fore.RED +
"event loop queue full. dropped notification event" +
Fore.RESET) | PypiClean |
/ER-Evaluation-2.1.0.tar.gz/ER-Evaluation-2.1.0/er_evaluation/data_structures/_data_structures.py | import logging
import numpy as np
import pandas as pd
from igraph import Graph
def compress_memberships(*memberships):
"""
Compress membership vectors to int values, preserving index compatibility.
Args:
series (list): list of membership vectors (Series) to compress
Returns:
List of Series with int codes for index and values. Index are compatible accross the Series.
Examples:
>>> membership = pd.Series(["c1", "c1", "c1", "c2", "c2", "c3"], index=[0,1,2,3,4,5])
>>> compressed, = compress_memberships(membership)
>>> compressed
0 0
1 0
2 0
3 1
4 1
5 2
Name: 0, dtype: int8
"""
compressed = pd.concat(memberships, axis=1)
compressed.index = pd.Categorical(compressed.index).codes
for col in compressed.columns:
compressed[col] = pd.Categorical(compressed[col]).codes
return [compressed[col] for col in compressed.columns]
class MembershipVector(pd.Series):
"""
Series wrapper to validate membership vector format and log potential issues.
Given a Series ``membership`` representing a membership vector, you can validate it using:
.. code::
membership = MembershipVector(membership)
This casts its type to the MembershipVector subclass. If ``membership`` is already of the MembershipVector subtype, this does absolutely nothing and simply returns the ``membership`` object as-is. However, if ``membership`` is a Series, then it is validated, potential issues are logged, and then the object is returned as a instance of the MembershipVector subclass.
This wrapper helps avoid duplicate validation and duplicate logging within the er_evaluation package. Externally, you may use :meth:`ismembership` to validate that a given pandas Series satisfies the requirements of a membership vector.
Examples:
>>> series = pd.Series([1,2,3,3])
>>> membership = MembershipVector(series) # Validates the series and logs potential issues.
>>> membership = MembershipVector(membership) # Does nothing.
"""
def __init__(self, data=None, dropna=False, **kwargs):
if not isinstance(data, MembershipVector):
super().__init__(data=data, **kwargs)
if ismembership(self):
if len(self) == 0:
logging.info("Membership vector is empty.")
if self.hasnans:
logging.info("Membership vector contains NA values.")
else:
logging.critical(f"Invalid membership vector: {self}")
raise ValueError(f"Invalid membership vector: {self}")
if dropna:
self.dropna(inplace=True)
def __new__(cls, data=None, dropna=False, **kwargs):
if isinstance(data, MembershipVector):
return data
return super().__new__(cls)
def isgraph(obj):
r"""
Check if given object is an iGraph :py:class:`Graph`.
Graph:
A graph is an igraph :py:class:`Graph` object with vertices representing clustering elements and with edges between all elements belonging to the same cluster. Note that clusters are unnamed in graphs. Example::
1───2 4
│ │ │ 6
└─3─┘ 5
Returns:
bool: True if Graph, False otherwise.
Examples:
>>> import igraph
>>> g = igraph.Graph()
>>> isgraph(g)
True
"""
if isinstance(obj, Graph):
return True
else:
return False
def ismembership(obj):
r"""
Check if given object is a membership vector.
Membership vector:
A membership vector is a pandas :py:class:`Series` indexed by the elements of :math:`E` and with values corresponding to cluster identifiers. That is, the memebership vector maps elements to clusters. Example::
>>> pd.Series(["c1", "c1", "c1", "c2", "c2", "c3"], index=[0,1,2,3,4,5])
0 c1
1 c1
2 c1
3 c2
4 c2
5 c3
dtype: object
Returns:
bool: True if membership vector, False otherwise.
Examples:
>>> import pandas as pd
>>> obj = pd.Series(index=[1,2,3,4,5,6,7,8], data=[1,1,2,3,2,4,4,4])
>>> ismembership(obj)
True
>>> ismembership([1,1,2,3,2,4,4,4])
False
"""
if isinstance(obj, pd.Series):
return all(
[
obj.index.has_duplicates == False,
obj.index.hasnans == False,
]
)
else:
return False
def isclusters(obj):
r"""
Check if given object is a clusters dictionary.
Clusters dictionary:
A clusters dictionary is a Python :py:class:`dict` with keys corresponding to cluster identifiers and values being list of cluster elements. Example::
{'c1': array([0, 1, 2]), 'c2': array([3, 4]), 'c3': array([5])}
Returns:
bool: True if clusters dictionary, False otherwise.
Examples:
>>> from numpy import array
>>> obj = {'c1': array([0, 1, 2]), 'c2': array([3, 4]), 'c3': array([5])}
>>> isclusters(obj)
True
Dictionary values should be numpy arrays:
>>> obj = {'c1': [0, 1, 2], 'c2': [3, 4], 'c3': [5]}
>>> isclusters(obj)
False
⚠️ Warning: Clustering validity is not checked.
>>> import pandas as pd
>>> obj = {'c1': array([pd.NA]), 'c2': array([pd.NA])}
>>> isclusters(obj)
True
Notes:
* This function does not verify that clusters are non-overlapping with unique non-NaN elements.
"""
if isinstance(obj, dict):
return all(isinstance(value, np.ndarray) for value in obj.values())
else:
return False
def ispairs(obj):
r"""
Check if given object is a pairs list.
A pairwise links list is an array of pairwise links between elements of the clustering, where each element of a cluster is linked to every other element of the same cluster. Note that clusters are unnamed in pairwise links lists. Example::
array([[0, 1],
[0, 2],
[1, 2],
[3, 4]])
Returns:
bool: True if a pairs list, False otherwise.
Examples:
>>> from numpy import array
>>> obj = array([[0, 1], [0, 2], [1, 2], [3, 4]])
>>> ispairs(obj)
True
>>> obj = [[0, 1], [0, 2], [1, 2], [3, 4]]
>>> ispairs(obj)
False
"""
if isinstance(obj, np.ndarray):
shape = obj.shape
if shape[1] == 2:
return True
else:
return False
else:
return False
def membership_to_clusters(membership):
r"""
Transform membership vector into clusters dictionary.
Args:
membership (Series): Membership vector.
Returns:
Cluters dictionary.
Examples:
>>> import pandas as pd
>>> membership = pd.Series(index=[1,2,3,4,5,6,7,8], data=[1,1,2,3,2,4,4,4])
>>> membership_to_clusters(membership)
{1: array([1, 2]), 2: array([3, 5]), 3: array([4]), 4: array([6, 7, 8])}
"""
membership = MembershipVector(membership)
return {k: np.array(v) for k, v in membership.groupby(membership).groups.items()}
def membership_to_pairs(membership):
r"""
Transform membership vector into pairs list.
Args:
membership (Series): Membership vector.
Returns:
Pairs list.
Examples:
>>> membership = pd.Series(index=[1,2,3,4,5,6,7,8], data=[1,1,2,3,2,4,4,4])
>>> membership_to_pairs(membership)
array([[1, 2],
[3, 5],
[6, 7],
[6, 8],
[7, 8]])
"""
membership = MembershipVector(membership)
clusters = membership_to_clusters(membership)
return clusters_to_pairs(clusters)
def membership_to_graph(membership):
r"""
Transform membership vector into Graph.
Args:
membership (Series): Membership vector.
Returns:
Graph, with all elements converted to string.
Note:
All elements are converted to string before creating the graph.
Examples:
>>> membership = pd.Series(index=[1,2,3,4,5,6,7,8], data=[1,1,2,3,2,4,4,4])
>>> graph = membership_to_graph(membership)
"""
membership = MembershipVector(membership)
return pairs_to_graph(membership_to_pairs(membership), membership.index.values)
def clusters_to_pairs(clusters):
r"""
Transform clusters dictionary into pairs list.
Args:
clusters (dictionary): Dictionary mapping cluster identifiers to numpy array of cluster elements.
Returns:
Pairs list.
Examples:
>>> from numpy import array
>>> clusters = {1: array([1, 2]), 2: array([3, 5]), 3: array([4]), 4: array([6, 7, 8])}
>>> clusters_to_pairs(clusters)
array([[1, 2],
[3, 5],
[6, 7],
[6, 8],
[7, 8]])
"""
assert isclusters(clusters)
def single_cluster_to_pairs(c):
"""
References:
- Carlos Gameiro (2021) Fast pairwise combinations in NumPy.
Accessed online on November 1, 2022.
https://carlostgameiro.medium.com/fast-pairwise-combinations-in-numpy-c29b977c33e2
"""
I = np.stack(np.triu_indices(len(c), k=1), axis=-1)
return c[I]
return np.row_stack([single_cluster_to_pairs(c) for c in clusters.values()])
def clusters_to_membership(clusters):
r"""
Transform clusters dictionary into membership vector.
Args:
clusters (dictionary): Dictionary mapping cluster identifiers to numpy array of cluster elements.
Returns:
Membership vector.
Examples:
>>> from numpy import array
>>> clusters = {1: array([1, 2]), 2: array([3, 5]), 3: array([4]), 4: array([6, 7, 8])}
>>> clusters_to_membership(clusters)
1 1
2 1
3 2
5 2
4 3
6 4
7 4
8 4
dtype: int64
"""
assert isclusters(clusters)
return pd.concat([pd.Series(value, index=indices) for value, indices in clusters.items()])
def clusters_to_graph(clusters):
r"""
Transform clusters dictionary into Graph.
Args:
clusters (dictionary): Dictionary mapping cluster identifiers to numpy array of cluster elements.
Returns:
Membership vector.
Examples:
>>> from numpy import array
>>> clusters = {1: array([1, 2]), 2: array([3, 5]), 3: array([4]), 4: array([6, 7, 8])}
>>> graph = clusters_to_graph(clusters)
"""
assert isclusters(clusters)
indices = np.concatenate(list(clusters.values()))
return pairs_to_graph(clusters_to_pairs(clusters), indices)
def pairs_to_membership(pairs, indices):
r"""Transform pairs list into membership vector.
Args:
pairs (ndarray): array of paired elements.
indices (ndarray): flat array of all elements to consider (paired and non-paired), including singletons.
Returns:
Membership vector
Examples:
>>> from numpy import array
>>> pairs = array([[1, 2], [3, 5], [6, 7], [6, 8], [7, 8]])
>>> indices = array([1,2,3,4,5,6,7,8])
>>> pairs_to_membership(pairs, indices)
1 0
2 0
3 1
4 2
5 1
6 3
7 3
8 3
dtype: int64
"""
assert ispairs(pairs)
assert all(np.isin(pairs.flatten(), indices))
return graph_to_membership(pairs_to_graph(pairs, indices))
def pairs_to_clusters(pairs, indices):
r"""Transform pairs list into clusters dictionary.
Args:
pairs (ndarray): array of paired elements.
indices (ndarray): flat array of all elements to consider (paired and non-paired), including singletons.
"""
assert ispairs(pairs)
assert all(np.isin(pairs.flatten(), indices))
return membership_to_clusters(pairs_to_membership(pairs, indices))
def pairs_to_graph(pairs, indices):
r"""
Transform pairs list into Graph.
Args:
pairs (ndarray): array of paired elements.
indices (ndarray): flat array of all elements to consider (paired and non-paired), including singletons.
Returns:
Graph corresponding to the pairs list with given indices as vertices. Note that all elements are converted to string before creating the graph.
Note:
All elements are converted to string before creating the graph.
Examples:
>>> from numpy import array
>>> pairs = array([[1, 2], [3, 5], [6, 7], [6, 8], [7, 8]])
>>> indices = array([1,2,3,4,5,6,7,8])
>>> graph = pairs_to_graph(pairs, indices)
"""
assert ispairs(pairs)
assert all(np.isin(pairs.flatten(), indices))
g = Graph()
g.add_vertices(indices.astype(str))
g.add_edges(pairs.astype(str))
return g
def graph_to_membership(graph):
r"""
Transform Graph into membership vector.
Args:
graph (Graph): igraph Graph object.
Returns:
Membership vector
Examples:
>>> from numpy import array
>>> membership = pd.Series(index=[1,2,3,4,5,6,7,8], data=[1,1,2,3,2,4,4,4])
>>> graph = membership_to_graph(membership)
>>> graph_to_membership(graph) # Note that cluster identifiers are arbitrary.
1 0
2 0
3 1
4 2
5 1
6 3
7 3
8 3
dtype: int64
"""
assert isgraph(graph)
return pd.Series(
index=graph.get_vertex_dataframe().name.values,
data=graph.connected_components().membership,
)
def graph_to_clusters(graph):
r"""
Transform Graph into clusters dictionary.
Args:
graph (Graph): igraph Graph object.
Returns:
Membership vector
Examples:
>>> from numpy import array
>>> clusters = {1: array([1, 2]), 2: array([3, 5]), 3: array([4]), 4: array([6, 7, 8])}
>>> graph = clusters_to_graph(clusters)
>>> graph_to_clusters(graph) # doctest: +NORMALIZE_WHITESPACE
{0: array(['1', '2'], dtype=object),
1: array(['3', '5'], dtype=object),
2: array(['4'], dtype=object),
3: array(['6', '7', '8'], dtype=object)}
"""
assert isgraph(graph)
return membership_to_clusters(graph_to_membership(graph))
def graph_to_pairs(graph):
r"""
Transform Graph into pairs list.
Args:
graph (Graph): igraph Graph object.
Returns:
Membership vector
Examples:
>>> from numpy import array
>>> pairs = array([[1, 2], [3, 5], [6, 7], [6, 8], [7, 8]])
>>> indices = array([1,2,3,4,5,6,7,8])
>>> graph = pairs_to_graph(pairs, indices)
>>> graph_to_pairs(graph)
array([['1', '2'],
['3', '5'],
['6', '7'],
['6', '8'],
['7', '8']], dtype='<U1')
"""
assert isgraph(graph)
names = graph.get_vertex_dataframe().name.values
edges = graph.get_edgelist()
return np.array([[names[e[0]], names[e[1]]] for e in edges]) | PypiClean |
/ansible-8.3.0-py3-none-any.whl/ansible_collections/azure/azcollection/plugins/modules/azure_rm_ddosprotectionplan_info.py |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: azure_rm_ddosprotectionplan_info
version_added: "1.7.0"
short_description: Get Azure DDoS protection plan
description:
- Get facts of Azure DDoS protection plan.
options:
resource_group:
description:
- The name of the resource group.
type: str
name:
description:
- The name of the DDoS protection plan.
type: str
extends_documentation_fragment:
- azure.azcollection.azure
author:
- Praveen Ghuge (@praveenghuge)
- Karl Dasan (@ikarldasan)
'''
EXAMPLES = '''
- name: Get facts of specific DDoS protection plan
azure_rm_ddosprotectionplan_info:
resource_group: myResourceGroup
name: myDDoSProtectionPlan
'''
RETURN = '''
'''
from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase
try:
from azure.core.exceptions import ResourceNotFoundError
from msrest.serialization import Model
except ImportError:
# This is handled in azure_rm_common
pass
class AzureDDoSProtectionPlanInfo(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str'
),
name=dict(
type='str'
)
)
# store the results of the module operation
self.results = dict(
changed=False)
self.resource_group = None
self.name = None
self.tags = None
super(AzureDDoSProtectionPlanInfo, self).__init__(
self.module_arg_spec, supports_check_mode=True, supports_tags=False)
def exec_module(self, **kwargs):
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
if self.name is not None:
results = self.get()
elif self.resource_group:
# all the DDoS protection plan listed in that specific resource group
results = self.list_resource_group()
else:
# all the DDoS protection plan listed in the subscription
results = self.list_subscription()
self.results['ddosprotectionplan'] = [
self.ddos_protection_plan_to_dict(x) for x in results]
return self.results
def get(self):
response = None
results = []
try:
response = self.network_client.ddos_protection_plans.get(
self.resource_group, self.name)
self.log("Response : {0}".format(response))
except ResourceNotFoundError as e:
self.fail('Could not get info for DDoS protection plan. {0}'.format(str(e)))
if response and self.has_tags(response.tags, self.tags):
results = [response]
return results
def list_resource_group(self):
self.log('List items for resource group')
try:
response = self.network_client.ddos_protection_plans.list_by_resource_group(
self.resource_group)
except ResourceNotFoundError as exc:
self.fail(
"Failed to list for resource group {0} - {1}".format(self.resource_group, str(exc)))
results = []
for item in response:
if self.has_tags(item.tags, self.tags):
results.append(item)
return results
def list_subscription(self):
self.log('List items for subscription')
try:
response = self.network_client.ddos_protection_plans.list()
except ResourceNotFoundError as exc:
self.fail(
"Failed to list DDoS protection plan in the subscription - {0}".format(str(exc)))
results = []
for item in response:
if self.has_tags(item.tags, self.tags):
results.append(item)
return results
def ddos_protection_plan_to_dict(self, item):
# turn DDoS protection plan object into a dictionary (serialization)
ddos_protection_plan = item.as_dict()
result = dict(
additional_properties=ddos_protection_plan.get('additional_properties', None),
id=ddos_protection_plan.get('id', None),
name=ddos_protection_plan.get('name', None),
type=ddos_protection_plan.get('type', None),
location=ddos_protection_plan.get('location', None),
tags=ddos_protection_plan.get('tags', None),
etag=ddos_protection_plan.get('etag', None),
resource_guid=ddos_protection_plan.get('resource_guid', None),
provisioning_state=ddos_protection_plan.get('provisioning_state', None),
virtual_networks=ddos_protection_plan.get('virtual_networks', None)
)
return result
def main():
AzureDDoSProtectionPlanInfo()
if __name__ == '__main__':
main() | PypiClean |
/the_census-2.1.2.tar.gz/the_census-2.1.2/the_census/_variables/search/service.py | from logging import Logger
import pandas as pd
from the_census._utils.log.factory import ILoggerFactory
from the_census._utils.timer import timer
from the_census._variables.models import GroupCode
from the_census._variables.repository.interface import IVariableRepository
from the_census._variables.search.interface import IVariableSearchService
class VariableSearchService(IVariableSearchService[pd.DataFrame]):
_variable_repository: IVariableRepository[pd.DataFrame]
_logger: Logger
def __init__(
self,
variableRepository: IVariableRepository[pd.DataFrame],
loggerFactory: ILoggerFactory,
) -> None:
self._variable_repository = variableRepository
self._logger = loggerFactory.getLogger(__name__)
@timer
def search_groups(self, regex: str) -> pd.DataFrame:
self._logger.debug(f"searching groups for regex: `{regex}`")
groups = self._variable_repository.get_groups()
if groups.empty:
self._logger.info("There are no groups for this dataset")
return pd.DataFrame()
series: pd.Series[bool] = groups["description"].str.contains( # type: ignore
regex, case=False
)
return groups[series].reset_index(
drop=True,
)
@timer
def search_variables(
self,
regex: str,
*in_groups: GroupCode,
) -> pd.DataFrame:
self._logger.debug(f"searching variables for pattern `{regex}`")
variables: pd.DataFrame
if not len(in_groups):
variables = self._variable_repository.get_all_variables()
else:
variables = self._variable_repository.get_variables_by_group(*in_groups)
if variables.empty:
self._logger.info("There are no variables for this dataset")
return pd.DataFrame()
series = variables["name"].str.contains(regex, case=False) # type: ignore
return variables[series].reset_index(drop=True) # type: ignore | PypiClean |
/pyParticleEst-1.1.4.tar.gz/pyParticleEst-1.1.4/pyparticleest/smoother.py | import numpy
import copy
from builtins import range
from . import filter as pf
from .filter import ParticleApproximation, TrajectoryStep
def bsi_full(model, pa, ptraj, pind, future_trajs, find, ut, yt, tt, cur_ind):
"""
Perform backward simulation by drawing particles from
the categorical distribution with weights given by
\omega_{t|T}^i = \omega_{t|t}^i*p(x_{t+1}|x^i)
Args:
- pa (ParticleApproximation): particles approximation from which to sample
- model (FFBSi): model defining probability density function
- future_trajs (array-like): trajectory estimate of {t+1:T}
- ut (array-like): inputs signal for {t:T}
- yt (array-like): measurements for {t:T}
- tt (array-like): time stamps for {t:T}
"""
M = len(find)
N = len(pa.w)
res = numpy.empty(M, dtype=int)
for j in range(M):
currfind = find[j] * numpy.ones((N,), dtype=int)
p_next = model.logp_xnext_full(pa.part, ptraj, pind,
future_trajs, currfind,
ut=ut, yt=yt, tt=tt, cur_ind=cur_ind)
w = pa.w + p_next
w = w - numpy.max(w)
w_norm = numpy.exp(w)
w_norm /= numpy.sum(w_norm)
res[j] = pf.sample(w_norm, 1)
return res
def bsi_rs(model, pa, ptraj, pind, future_trajs, find, ut, yt, tt, cur_ind, maxpdf, max_iter):
"""
Perform backward simulation by using rejection sampling to draw particles
from the categorical distribution with weights given by
\omega_{t|T}^i = \omega_{t|t}^i*p(x_{t+1}|x^i)
Args:
- pa (ParticleApproximation): particles approximation from which to sample
- model (FFBSi): model defining probability density function
- future_trajs (array-like): trajectory estimate of {t+1:T}
- ut (array-like): inputs signal for {t:T}
- yt (array-like): measurements for {t:T}
- tt (array-like): time stamps for {t:T}
- maxpdf (float): argmax p(x_{t+1:T}|x_t)
- max_iter (int): number of attempts before falling back to bsi_full
"""
M = len(find)
todo = numpy.arange(M)
res = numpy.empty(M, dtype=int)
weights = numpy.copy(pa.w)
weights -= numpy.max(weights)
weights = numpy.exp(weights)
weights /= numpy.sum(weights)
for _i in range(max_iter):
ind = numpy.random.permutation(pf.sample(weights, len(todo)))
pn = model.logp_xnext_full(pa.part[ind], ptraj, pind[ind],
future_trajs, todo,
ut=ut, yt=yt, tt=tt, cur_ind=cur_ind)
test = numpy.log(numpy.random.uniform(size=len(todo)))
accept = test < pn - maxpdf
res[todo[accept]] = ind[accept]
todo = todo[~accept]
if (len(todo) == 0):
return res
# TODO, is there an efficient way to store those weights
# already calculated to avoid double work, or will that
# take more time than simply evaulating them all again?
res[todo] = bsi_full(model, pa, ptraj, pind, future_trajs, todo, ut=ut, yt=yt, tt=tt, cur_ind=cur_ind)
return res
def bsi_rsas(model, pa, ptraj, pind, future_trajs, find, ut, yt, tt, cur_ind, maxpdf, x1, P1, sv, sw, ratio):
"""
Perform backward simulation by using rejection sampling to draw particles
from the categorical distribution with weights given by
\omega_{t|T}^i = \omega_{t|t}^i*p(x_{t+1}|x^i)
Adaptively determine when to to fallback to bsi_full by using a Kalman
filter to track the prediceted acceptance rate of the rejection sampler
Based on "Adaptive Stopping for Fast Particle Smoothing" by
Taghavi, Lindsten, Svensson and Sch\"{o}n. See orignal article for details
about the meaning of the Kalman filter paramters
Args:
- pa (ParticleApproximation): particles approximation from which to sample
- model (FFBSi): model defining probability density function
- future_trajs (array-like): trajectory estimate of {t+1:T}
- ut (array-like): inputs signal for {t:T}
- yt (array-like): measurements for {t:T}
- tt (array-like): time stamps for {t:T}
- maxpdf (float): argmax p(x_{t+1:T}|x_t)
- x1 (float): initial state of Kalman filter
- P1 (float): initial covariance of Kalman filter estimate
- sv (float): process noise (for Kalman filter)
- sw (float): measurement noise (for Kalman filter)
- ratio (float): cost ration of running rejection sampling compared to
switching to the full bsi (D_0 / D_1)
"""
M = len(find)
todo = numpy.arange(M)
res = numpy.empty(M, dtype=int)
weights = numpy.copy(pa.w)
weights -= numpy.max(weights)
weights = numpy.exp(weights)
weights /= numpy.sum(weights)
pk = x1
Pk = P1
stop_criteria = ratio / len(pa)
while (True):
ind = numpy.random.permutation(pf.sample(weights, len(todo)))
pn = model.logp_xnext_full(pa.part[ind], ptraj, pind[ind],
future_trajs, todo,
ut=ut, yt=yt, tt=tt, cur_ind=cur_ind)
test = numpy.log(numpy.random.uniform(size=len(todo)))
accept = test < pn - maxpdf
ak = numpy.sum(accept)
mk = len(todo)
res[todo[accept]] = ind[accept]
todo = todo[~accept]
if (len(todo) == 0):
return res
# meas update for adaptive stop
mk2 = mk * mk
sw2 = sw * sw
pk = pk + (mk * Pk) / (mk2 * Pk + sw2) * (ak - mk * pk)
Pk = (1 - (mk2 * Pk) / (mk2 * Pk + sw2)) * Pk
# predict
pk = (1 - ak / mk) * pk
Pk = (1 - ak / mk) ** 2 * Pk + sv * sv
if (pk < stop_criteria):
break
res[todo] = bsi_full(model, pa, ptraj, pind, future_trajs, todo, ut=ut, yt=yt, tt=tt, cur_ind=cur_ind)
return res
def bsi_mcmc(model, pa, ptraj, pind, future_trajs, find, ut, yt, tt, cur_ind, R, ancestors):
"""
Perform backward simulation by using Metropolis-Hastings to draw particles
from the categorical distribution with weights given by
\omega_{t|T}^i = \omega_{t|t}^i*p(x_{t+1}|x^i)
Args:
- pa (ParticleApproximation): particles approximation from which to sample
- model (FFBSi): model defining probability density function
- future_trajs (array-like): trajectory estimate of {t+1:T}
- ut (array-like): inputs signal for {t:T}
- yt (array-like): measurements for {t:T}
- tt (array-like): time stamps for {t:T}
- R (int): number of iterations to run the markov chain
- ancestor (array-like): ancestor of each particle from the particle filter
"""
# Perform backward simulation using an MCMC sampler proposing new
# backward particles, initialized with the filtered trajectory
M = len(find)
ind = ancestors
weights = numpy.copy(pa.w)
weights -= numpy.max(weights)
weights = numpy.exp(weights)
weights /= numpy.sum(weights)
pcurr = model.logp_xnext_full(pa.part[ind], ptraj, pind[ind],
future_trajs, find,
ut=ut, yt=yt, tt=tt, cur_ind=cur_ind)
for _j in range(R):
propind = numpy.random.permutation(pf.sample(weights, M))
pprop = model.logp_xnext_full(pa.part[propind], ptraj, pind[propind],
future_trajs, find,
ut=ut, yt=yt, tt=tt, cur_ind=cur_ind)
diff = pprop - pcurr
diff[diff > 0.0] = 0.0
test = numpy.log(numpy.random.uniform(size=M))
accept = test < diff
ind[accept] = propind[accept]
pcurr[accept] = pprop[accept]
return ind
class SmoothTrajectory(object):
"""
Create smoothed trajectory from filtered trajectory
Args:
- pt (ParticleTrajectory): Forward estimates (typically
generated by a ParticleFilter), combined with inputs and measurements
- M (int): Number of smoothed trajectories to create
- method (string): Smoothing method to use
- options (dict): options to pass on to the smoothing algorithm
"""
def __init__(self, pt, M=1, method='full', options=None):
self.traj = None
self.u = numpy.copy(pt.uvec)
self.y = numpy.copy(pt.yvec)
self.t = numpy.copy(pt.tvec)
self.M = M
self.model = pt.pf.model
if (method == 'full' or method == 'mcmc' or method == 'rs' or
method == 'rsas'):
self.perform_bsi(pt=pt, M=M, method=method, options=options)
elif (method == 'ancestor'):
self.perform_ancestors(pt=pt, M=M)
elif (method == 'mhips' or method == 'mhips_reduced'):
if (method == 'mhips'):
reduced = False
else:
reduced = True
# Initialize using forward trajectories
self.traj = self.perform_ancestors_int(pt=pt, M=M)
if 'R' in options:
R = options['R']
else:
R = 10
for _i in range(R):
# Recover filtering statistics for linear states
if hasattr(self.model, 'pre_mhips_pass'):
self.traj = self.model.pre_mhips_pass(self)
self.traj = self.perform_mhips_pass(options=options, reduced=reduced)
elif (method == 'mhbp'):
if 'R' in options:
R = options['R']
else:
R = 10
self.perform_mhbp(pt=pt, M=M, R=R)
else:
raise ValueError('Unknown smoother: %s' % method)
if hasattr(self.model, 'post_smoothing'):
self.traj = self.model.post_smoothing(self)
def __len__(self):
return len(self.traj)
def perform_ancestors(self, pt, M):
"""
Create smoothed trajectories by taking the forward trajectories
Args:
- pt (ParticleTrajectory): forward trajetories
- M (int): number of trajectories to createa
"""
self.traj = self.perform_ancestors_int(pt, M)
if hasattr(self.model, 'post_smoothing'):
# Do e.g. constrained smoothing for RBPS models
self.traj = self.model.post_smoothing(self)
def calculate_ancestors(self, pt, ind):
T = len(pt)
M = len(ind)
ancestors = pt[T - 1].ancestors[ind]
find = numpy.arange(M, dtype=int)
last_part = self.model.sample_smooth(part=pt[T - 1].pa.part[ind],
ptraj=pt[:(T - 1)], anc=ancestors,
future_trajs=None, find=None,
ut=self.u, yt=self.y,
tt=self.t, cur_ind=T - 1)
traj = numpy.empty((len(pt),), dtype=object)
traj[T - 1] = TrajectoryStep(ParticleApproximation(last_part),
numpy.arange(M, dtype=int))
for t in reversed(range(T - 1)):
ind = ancestors
ancestors = pt[t].ancestors[ind]
# Select 'previous' particle
traj[t] = TrajectoryStep(ParticleApproximation(self.model.sample_smooth(part=pt[t].pa.part[ind],
ptraj=pt[:t],
anc=ancestors,
future_trajs=traj[(t + 1):],
find=find,
ut=self.u,
yt=self.y,
tt=self.t,
cur_ind=t)),
ancestors=find)
return traj
def perform_ancestors_int(self, pt, M):
"""
Create smoothed trajectories by taking the forward trajectories, don't
perform post processing
Args:
- pt (ParticleTrajectory): forward trajetories
- M (int): number of trajectories to createa
"""
tmp = numpy.copy(pt[-1].pa.w)
tmp -= numpy.max(tmp)
tmp = numpy.exp(tmp)
tmp = tmp / numpy.sum(tmp)
ind = pf.sample(tmp, M)
return self.calculate_ancestors(pt, ind)
def perform_bsi(self, pt, M, method, options):
"""
Create smoothed trajectories using Backward Simulation
Args:
- pt (ParticleTrajectory): forward trajetories
- M (int): number of trajectories to createa
- method (string): Type of backward simulation to use
- optiones (dict): Parameters to the backward simulator
"""
# Sample from end time estimates
tmp = numpy.copy(pt[-1].pa.w)
tmp -= numpy.max(tmp)
tmp = numpy.exp(tmp)
tmp = tmp / numpy.sum(tmp)
ind = pf.sample(tmp, M)
ancestors = pt[-1].ancestors[ind]
last_part = self.model.sample_smooth(part=pt[-1].pa.part[ind],
ptraj=pt[:-1], anc=ancestors,
future_trajs=None, find=None,
ut=self.u, yt=self.y,
tt=self.t, cur_ind=len(pt) - 1)
self.traj = numpy.empty((len(pt),), dtype=object)
self.traj[-1] = TrajectoryStep(ParticleApproximation(last_part),
numpy.arange(M, dtype=int))
if (method == 'full'):
pass
elif (method == 'mcmc' or method == 'ancestor' or method == 'mhips'):
pass
elif (method == 'rs'):
max_iter = options['R']
elif (method == 'rsas'):
x1 = options['x1']
P1 = options['P1']
sv = options['sv']
sw = options['sw']
ratio = options['ratio']
else:
raise ValueError('Unknown sampler: %s' % method)
find = numpy.arange(M, dtype=numpy.int)
for cur_ind in reversed(range(len(pt) - 1)):
ft = self.traj[(cur_ind + 1):]
ut = self.u
yt = self.y
tt = self.t
if (method == 'rs'):
ind = bsi_rs(self.model, pt[cur_ind].pa,
pt[:cur_ind], pt[cur_ind].ancestors,
ft, find,
ut=ut, yt=yt, tt=tt, cur_ind=cur_ind,
maxpdf=options['maxpdf'][cur_ind],
max_iter=int(max_iter))
elif (method == 'rsas'):
ind = bsi_rsas(self.model, pt[cur_ind].pa,
pt[:cur_ind], pt[cur_ind].ancestors,
ft, find,
ut=ut, yt=yt, tt=tt, cur_ind=cur_ind,
maxpdf=options['maxpdf'][cur_ind], x1=x1,
P1=P1, sv=sv, sw=sw, ratio=ratio)
elif (method == 'mcmc'):
ind = bsi_mcmc(self.model, pt[cur_ind].pa,
pt[:cur_ind], pt[cur_ind].ancestors,
ft, find,
ut=ut, yt=yt, tt=tt, cur_ind=cur_ind,
R=options['R'], ancestors=ancestors)
ancestors = pt[cur_ind].ancestors[ind]
elif (method == 'full'):
ind = bsi_full(self.model, pt[cur_ind].pa,
pt[:cur_ind], pt[cur_ind].ancestors,
ft, find,
ut=ut, yt=yt, tt=tt, cur_ind=cur_ind)
elif (method == 'ancestor'):
ind = ancestors
ancestors = pt[cur_ind].ancestors[ind]
# Select 'previous' particle
find = numpy.arange(M, dtype=int)
tmp = self.model.sample_smooth(part=pt[cur_ind].pa.part[ind],
ptraj=pt[:cur_ind],
anc=ancestors,
future_trajs=ft,
find=find,
ut=ut,
yt=yt,
tt=tt,
cur_ind=cur_ind)
self.traj[cur_ind] = TrajectoryStep(ParticleApproximation(tmp),
numpy.arange(M, dtype=int))
# if hasattr(self.model, 'post_smoothing'):
# # Do e.g. constrained smoothing for RBPS models
# self.traj = self.model.post_smoothing(self)
def perform_mhbp(self, pt, M, R, reduced=False):
"""
Create smoothed trajectories using Metropolis-Hastings Backward Propeser
Args:
- pt (ParticleTrajectory): forward trajetories
- M (int): number of trajectories to createa
- R (int): Number of proposal for each time step
"""
T = len(pt)
ut = self.u
yt = self.y
tt = self.t
straj = numpy.empty((T,), dtype=object)
# Initialise from end time estimates
tmp = numpy.copy(pt[-1].pa.w)
tmp -= numpy.max(tmp)
tmp = numpy.exp(tmp)
tmp = tmp / numpy.sum(tmp)
cind = pf.sample(tmp, M)
find = numpy.arange(M, dtype=int)
# anc = pt[-1].ancestors[cind]
# last_part = self.model.sample_smooth(part=pt[-1].pa.part[cind],
# ptraj=pt[:-1],
# anc=anc,
# future_trajs=None,
# find=find,
# ut=ut, yt=yt, tt=tt,
# cur_ind=T - 1)
for t in reversed(range(T)):
# Initialise from filtered estimate
if (t < T - 1):
ft = straj[(t + 1):]
else:
ft = None
# Initialize with filterted estimates
pnew = pt[t].pa.part[cind]
if (t > 0):
anc = pt[t].ancestors[cind]
tmp = numpy.copy(pt[t - 1].pa.w)
tmp -= numpy.max(tmp)
tmp = numpy.exp(tmp)
tmp = tmp / numpy.sum(tmp)
ptraj = pt[:t]
else:
ptraj = None
for _ in range(R):
if (t > 0):
# Propose new ancestors
panc = pf.sample(tmp, M)
(pnew, acc) = mc_step(model=self.model,
part=pnew,
ptraj=ptraj,
pind_prop=panc,
pind_curr=anc,
future_trajs=ft,
find=find,
ut=ut,
yt=yt,
tt=tt,
cur_ind=t,
reduced=reduced)
anc[acc] = panc[acc]
fpart = self.model.sample_smooth(part=pnew,
ptraj=ptraj,
anc=anc,
future_trajs=ft,
find=find,
ut=ut, yt=yt, tt=tt,
cur_ind=t)
straj[t] = TrajectoryStep(ParticleApproximation(fpart))
cind = anc
self.traj = straj
if hasattr(self.model, 'post_smoothing'):
# Do e.g. constrained smoothing for RBPS models
self.traj = self.model.post_smoothing(self)
def perform_mhips_pass(self, options, reduced=False):
"""
Runs MHIPS with the proposal density q as p(x_{t+1}|x_t)
Args:
- pt (ParticleTrajectory): Forward esimates
- M (int): Number of backward trajectories
- options (None): Unused
"""
T = len(self.traj)
# Handle last time-step seperately
ut = self.u
yt = self.y
tt = self.t
pind = numpy.arange(self.M, dtype=numpy.int)
straj = numpy.empty((T,), dtype=object)
pt = self.traj[:T - 1]
(part, _acc) = mc_step(model=self.model,
part=self.traj[-1].pa.part,
ptraj=pt,
pind_prop=pind,
pind_curr=pind,
future_trajs=None, find=pind,
ut=ut, yt=yt, tt=tt, cur_ind=T - 1,
reduced=reduced)
tmp = numpy.copy(self.model.sample_smooth(part=part,
ptraj=pt,
anc=pind,
future_trajs=None,
find=pind,
ut=ut,
yt=yt,
tt=tt,
cur_ind=T - 1))
straj[T - 1] = TrajectoryStep(ParticleApproximation(tmp), pind)
for i in reversed(range(1, (T - 1))):
ft = straj[(i + 1):]
pt = self.traj[:i]
(part, _acc) = mc_step(model=self.model,
part=self.traj[i].pa.part,
ptraj=pt,
pind_prop=pind,
pind_curr=pind,
future_trajs=ft, find=pind,
ut=ut, yt=yt, tt=tt, cur_ind=i,
reduced=reduced)
# The data dimension is not necessarily the same, since self.traj
# contains data that has been processed by "post_smoothing".
# This implementation assumes that the existing trajectory contains
# enough space to hold the data, if that is not the case the
# model class should extend "pre_mhips_pass" to allocate a larger
# array
#self.traj[i].pa.part[acc] = prop[acc]
tmp = self.model.sample_smooth(part=part,
ptraj=pt,
anc=pind,
future_trajs=ft,
find=pind,
ut=ut,
yt=yt,
tt=tt,
cur_ind=i)
straj[i] = TrajectoryStep(ParticleApproximation(tmp), pind)
ft = straj[1:]
(part, _acc) = mc_step(model=self.model,
part=self.traj[0].pa.part,
ptraj=None,
pind_prop=None,
pind_curr=None,
future_trajs=ft,
find=pind,
ut=ut, yt=yt, tt=tt, cur_ind=0,
reduced=reduced)
tmp = self.model.sample_smooth(part,
ptraj=None,
anc=pind,
future_trajs=ft,
find=pind,
ut=ut,
yt=yt,
tt=tt,
cur_ind=0)
straj[0] = TrajectoryStep(ParticleApproximation(tmp), pind)
return straj
def get_smoothed_estimates(self):
"""
Return smoothed estimates (must first have called 'simulate')
Returns:
- (T, N, D) array
T is the length of the dataset,
N is the number of particles
D is the dimension of each particle
"""
T = len(self.traj)
N = self.traj[0].pa.part.shape[0]
D = self.traj[0].pa.part.shape[1]
est = numpy.empty((T, N, D))
for t in range(T):
est[t] = self.traj[t].pa.part
return est
def mc_step(model, part, ptraj, pind_prop, pind_curr, future_trajs, find,
ut, yt, tt, cur_ind, reduced):
"""
Perform a single iteration of the MCMC sampler used for MHIPS and MHBP
Args:
- model: model definition
- partp_prop (array-like): proposed previous particle
- partp_prop (array-like): current previous particle
- up (array-like): input at time t-1
- tp (array-like): timestamp at time t-1
- curpart: current accepted paricle
- yt (array-like): measurement at time t
- ut (array-like): input at time t
- tt (array-like): timestamp at time t
- future_trajs (array-like): particle approximations of {x_{t+1:T|T}}
"""
# The previously stored values for part already include the measurment from
# cur_ind, we therefore need to recomputed the sufficient statistics
# (for Rao-Blackwellized models)
if (not ptraj is None):
oldpart = numpy.copy(ptraj[-1].pa.part[pind_curr])
part = model.cond_predict_single_step(part=oldpart, past_trajs=ptraj[:-1],
pind=ptraj[-1].ancestors[pind_curr],
future_parts=part, find=numpy.arange(len(pind_curr)),
ut=ut, yt=yt, tt=tt, cur_ind=cur_ind - 1)
else:
part = model.cond_sampled_initial(part, tt[cur_ind])
if (reduced):
if (ptraj is not None):
noise = model.sample_process_noise_full(ptraj=ptraj,
ancestors=pind_prop,
ut=ut[:cur_ind],
tt=tt[:cur_ind])
xprop = numpy.copy(ptraj[-1].pa.part[pind_prop])
model.update_full(particles=xprop, traj=ptraj,
uvec=ut[:cur_ind], yvec=yt[:cur_ind],
tvec=tt[:cur_ind],
ancestors=pind_prop, noise=noise)
else:
xprop = model.create_initial_estimate(len(future_trajs[0].pa.part))
# Drawing from p(x_{t+1}|x_t), so these will be identical
logp_prev_prop = 0.0
logp_prev_curr = 0.0
logp_q_prop = 0.0
logp_q_curr = 0.0
else:
xprop = model.propose_smooth(ptraj=ptraj,
anc=pind_prop,
future_trajs=future_trajs,
find=find,
yt=yt,
ut=ut,
tt=tt,
cur_ind=cur_ind)
# Accept/reject new sample
logp_q_prop = model.logp_proposal(prop_part=xprop,
ptraj=ptraj,
anc=pind_prop,
future_trajs=future_trajs,
find=find,
yt=yt,
ut=ut,
tt=tt,
cur_ind=cur_ind)
logp_q_curr = model.logp_proposal(prop_part=part,
ptraj=ptraj,
anc=pind_curr,
future_trajs=future_trajs,
find=find,
yt=yt,
ut=ut,
tt=tt,
cur_ind=cur_ind)
if (ptraj is not None):
logp_prev_prop = model.logp_xnext_singlestep(part=ptraj[-1].pa.part[pind_prop],
past_trajs=ptraj[:-1],
pind=ptraj[-1].ancestors[pind_prop],
future_parts=xprop,
find=numpy.arange(len(xprop), dtype=int),
ut=ut, yt=yt, tt=tt,
cur_ind=cur_ind - 1)
logp_prev_curr = model.logp_xnext_singlestep(part=ptraj[-1].pa.part[pind_curr],
past_trajs=ptraj[:-1],
pind=ptraj[-1].ancestors[pind_curr],
future_parts=part,
find=numpy.arange(len(part), dtype=int),
ut=ut, yt=yt, tt=tt,
cur_ind=cur_ind - 1)
else:
logp_prev_prop = model.eval_logp_x0(xprop, tt[0])
logp_prev_curr = model.eval_logp_x0(part, tt[0])
xpropy = numpy.copy(xprop)
curparty = numpy.copy(part)
if (yt[cur_ind] is not None):
logp_y_prop = model.measure_full(particles=xpropy, traj=ptraj,
uvec=ut[:cur_ind + 1], yvec=yt[:(cur_ind + 1)],
tvec=tt[:cur_ind + 1], ancestors=pind_prop)
logp_y_curr = model.measure_full(particles=curparty, traj=ptraj,
uvec=ut[:cur_ind + 1], yvec=yt[:(cur_ind + 1)],
tvec=tt[:cur_ind + 1], ancestors=pind_curr)
else:
logp_y_prop = 0.0
logp_y_curr = 0.0
if (future_trajs is not None):
logp_next_prop = model.logp_xnext_full(part=xpropy,
past_trajs=ptraj,
pind=pind_prop,
future_trajs=future_trajs,
find=find,
ut=ut,
yt=yt,
tt=tt,
cur_ind=cur_ind)
logp_next_curr = model.logp_xnext_full(part=curparty,
past_trajs=ptraj,
pind=pind_curr,
future_trajs=future_trajs,
find=find,
ut=ut,
yt=yt,
tt=tt,
cur_ind=cur_ind)
else:
logp_next_prop = 0.0
logp_next_curr = 0.0
# Calc ratio
ratio = ((logp_prev_prop - logp_prev_curr) +
(logp_y_prop - logp_y_curr) +
(logp_next_prop - logp_next_curr) +
(logp_q_curr - logp_q_prop))
test = numpy.log(numpy.random.uniform(size=len(ratio)))
acc = test < ratio
curparty[acc] = xpropy[acc]
return (curparty, acc) | PypiClean |
/django_kelove_admin-0.4.1-py3-none-any.whl/django_kelove_admin/static/kelove_admin/editor_md/lib/codemirror/addon/comment/continuecomment.js |
(function(mod) {
if (typeof exports == "object" && typeof module == "object") // CommonJS
mod(require("../../lib/codemirror"));
else if (typeof define == "function" && define.amd) // AMD
define(["../../lib/codemirror"], mod);
else // Plain browser env
mod(CodeMirror);
})(function(CodeMirror) {
var modes = ["clike", "css", "javascript"];
for (var i = 0; i < modes.length; ++i)
CodeMirror.extendMode(modes[i], {blockCommentContinue: " * "});
function continueComment(cm) {
if (cm.getOption("disableInput")) return CodeMirror.Pass;
var ranges = cm.listSelections(), mode, inserts = [];
for (var i = 0; i < ranges.length; i++) {
var pos = ranges[i].head, token = cm.getTokenAt(pos);
if (token.type != "comment") return CodeMirror.Pass;
var modeHere = CodeMirror.innerMode(cm.getMode(), token.state).mode;
if (!mode) mode = modeHere;
else if (mode != modeHere) return CodeMirror.Pass;
var insert = null;
if (mode.blockCommentStart && mode.blockCommentContinue) {
var end = token.string.indexOf(mode.blockCommentEnd);
var full = cm.getRange(CodeMirror.Pos(pos.line, 0), CodeMirror.Pos(pos.line, token.end)), found;
if (end != -1 && end == token.string.length - mode.blockCommentEnd.length && pos.ch >= end) {
// Comment ended, don't continue it
} else if (token.string.indexOf(mode.blockCommentStart) == 0) {
insert = full.slice(0, token.start);
if (!/^\s*$/.test(insert)) {
insert = "";
for (var j = 0; j < token.start; ++j) insert += " ";
}
} else if ((found = full.indexOf(mode.blockCommentContinue)) != -1 &&
found + mode.blockCommentContinue.length > token.start &&
/^\s*$/.test(full.slice(0, found))) {
insert = full.slice(0, found);
}
if (insert != null) insert += mode.blockCommentContinue;
}
if (insert == null && mode.lineComment && continueLineCommentEnabled(cm)) {
var line = cm.getLine(pos.line), found = line.indexOf(mode.lineComment);
if (found > -1) {
insert = line.slice(0, found);
if (/\S/.test(insert)) insert = null;
else insert += mode.lineComment + line.slice(found + mode.lineComment.length).match(/^\s*/)[0];
}
}
if (insert == null) return CodeMirror.Pass;
inserts[i] = "\n" + insert;
}
cm.operation(function() {
for (var i = ranges.length - 1; i >= 0; i--)
cm.replaceRange(inserts[i], ranges[i].from(), ranges[i].to(), "+insert");
});
}
function continueLineCommentEnabled(cm) {
var opt = cm.getOption("continueComments");
if (opt && typeof opt == "object")
return opt.continueLineComment !== false;
return true;
}
CodeMirror.defineOption("continueComments", null, function(cm, val, prev) {
if (prev && prev != CodeMirror.Init)
cm.removeKeyMap("continueComment");
if (val) {
var key = "Enter";
if (typeof val == "string")
key = val;
else if (typeof val == "object" && val.key)
key = val.key;
var map = {name: "continueComment"};
map[key] = continueComment;
cm.addKeyMap(map);
}
});
}); | PypiClean |
/cpp_version_2-0.0.1-py3-none-any.whl/courses/views.py | from itertools import chain
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseRedirect
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse
from . import forms
from . import models
def course_list(request):
courses = models.Course.objects.all()
email = 'questions@learning_site.com'
# output = ', '.join([str(course) for course in courses]) # Joined Them Together With Commas
# return HttpResponse(output)
return render(request, 'courses/course_list.html', {'courses':courses, 'email':email})
def course_detail(request, pk):
# course = Course.objects.get(pk=pk)
course = get_object_or_404(models.Course, pk=pk)
steps = sorted(chain(course.text_set.all(), course.quiz_set.all()), key=lambda step: step.order)
return render(request, 'courses/course_detail.html', {'course':course, 'steps':steps})
def text_detail(request, course_pk, step_pk):
step = get_object_or_404(models.Text, course_id=course_pk, pk=step_pk)
return render(request, 'courses/step_detail.html', {'step':step})
def quiz_detail(request, course_pk, step_pk):
step = get_object_or_404(models.Quiz, course_id=course_pk, pk=step_pk)
return render(request, 'courses/quiz_detail.html', {'step':step})
@login_required
def quiz_create(request, course_pk):
course = get_object_or_404(models.Course, pk=course_pk)
form = forms.QuizForm()
if request.method == 'POST':
form = forms.QuizForm(request.POST)
if form.is_valid():
quiz = form.save(commit=False)
quiz.course = course
quiz.save()
messages.add_message(request, messages.SUCCESS, "Quiz Added!")
return HttpResponseRedirect(quiz.get_absolute_url())
return render(request, 'courses/quiz_form.html', {'form':form, 'course':course})
@login_required
def quiz_edit(request, course_pk, quiz_pk):
quiz = get_object_or_404(models.Quiz, pk=quiz_pk, course_id=course_pk)
form = forms.QuizForm(instance=quiz)
if request.method == 'POST':
form = forms.QuizForm(instance=quiz, data=request.POST)
if form.is_valid():
form.save()
messages.success(request, "Updated {}".format(form.cleaned_data['title']))
return HttpResponseRedirect(quiz.get_absolute_url())
return render(request, 'courses/quiz_form.html', {'form':form, 'course':quiz.course})
@login_required
def create_question(request, quiz_pk, question_type):
quiz = get_object_or_404(models.Quiz, pk=quiz_pk)
if question_type == 'tf':
form_class = forms.TrueFalseQuestionForm
else:
form_class = forms.MultipleChoiceQuestionForm
form = form_class()
answer_forms = forms.AnswerInlineFormSet(
queryset = models.Answer.objects.none()
)
if request.method == 'POST':
form = form_class(request.POST)
answer_forms = forms.AnswerInlineFormSet(request.POST, queryset = models.Answer.objects.none())
if form.is_valid() and answer_forms.is_valid():
question = form.save(commit=False)
question.quiz = quiz
question.save()
answers = answer_forms.save(commit=False)
for answer in answers:
answer.question = question
answer.save()
messages.success(request, "Added Question")
return HttpResponseRedirect(quiz.get_absolute_url())
return render(request, 'courses/question_form.html', {'form':form, 'quiz':quiz, 'formset':answer_forms})
@login_required
def edit_question(request, quiz_pk, question_pk):
question = get_object_or_404(models.Question, pk=question_pk, quiz_id=quiz_pk)
if hasattr(question, 'truefalsequestion'):
form_class = forms.TrueFalseQuestionForm
question = question.truefalsequestion
else:
form_class = forms.MultipleChoiceQuestionForm
question = question.multiplechoicequestion
form = form_class(instance=question)
answer_forms = forms.AnswerInlineFormSet(
queryset = form.instance.answer_set.all()
)
if request.method == 'POST':
form = form_class(request.POST, instance=question)
answer_forms = forms.AnswerInlineFormSet(
request.POST,
queryset = form.instance.answer_set.all()
)
if form.is_valid() and answer_forms.is_valid():
form.save()
answers = answer_forms.save(commit=False)
for answer in answers:
answer.question = question
answer.save()
for answer in answer_forms.deleted_objects:
answer.delete()
messages.success(request, "Updated Question")
return HttpResponseRedirect(question.quiz.get_absolute_url())
return render(request, 'courses/question_form.html', {'form':form, 'quiz':question.quiz, 'formset':answer_forms})
@login_required
def answer_form(request, question_pk):
question = get_object_or_404(models.Question, pk=question_pk)
formset = forms.AnswerFormSet(queryset=question.answer_set.all())
if request.method == 'POST':
formset = forms.AnswerFormSet(request.POST, queryset=question.answer_set.all())
if formset.is_valid():
answers = formset.save(commit=False)
for answer in answers:
answer.question = question
answer.save()
messages.success(request, "Added Answers")
return HttpResponseRedirect(question.quiz.get_absolute_url())
return render(request, 'courses/answer_form.html', {'formset':formset, 'question':question}) | PypiClean |
/pyobs_core-1.4.10-py3-none-any.whl/pyobs/modules/robotic/scheduler.py | import asyncio
import copy
import json
import logging
import multiprocessing as mp
from typing import Union, List, Tuple, Any, Optional, Dict
import astroplan
from astroplan import ObservingBlock
from astropy.time import TimeDelta
import astropy.units as u
from pyobs.events.taskfinished import TaskFinishedEvent
from pyobs.events.taskstarted import TaskStartedEvent
from pyobs.events import GoodWeatherEvent, Event
from pyobs.utils.time import Time
from pyobs.interfaces import IStartStop, IRunnable
from pyobs.modules import Module
from pyobs.robotic import TaskArchive, TaskSchedule
log = logging.getLogger(__name__)
class Scheduler(Module, IStartStop, IRunnable):
"""Scheduler."""
__module__ = "pyobs.modules.robotic"
def __init__(
self,
tasks: Union[Dict[str, Any], TaskArchive],
schedule: Union[Dict[str, Any], TaskSchedule],
schedule_range: int = 24,
safety_time: int = 60,
twilight: str = "astronomical",
trigger_on_task_started: bool = False,
trigger_on_task_finished: bool = False,
**kwargs: Any,
):
"""Initialize a new scheduler.
Args:
scheduler: Scheduler to use
schedule_range: Number of hours to schedule into the future
safety_time: If no ETA for next task to start exists (from current task, weather became good, etc), use
this time in seconds to make sure that we don't schedule for a time when the scheduler is
still running
twilight: astronomical or nautical
trigger_on_task_started: Whether to trigger a re-calculation of schedule, when task has started.
trigger_on_task_finishes: Whether to trigger a re-calculation of schedule, when task has finished.
"""
Module.__init__(self, **kwargs)
# get scheduler
self._task_archive = self.add_child_object(tasks, TaskArchive)
self._schedule = self.add_child_object(schedule, TaskSchedule)
# store
self._schedule_range = schedule_range
self._safety_time = safety_time
self._twilight = twilight
self._running = True
self._initial_update_done = False
self._need_update = False
self._trigger_on_task_started = trigger_on_task_started
self._trigger_on_task_finished = trigger_on_task_finished
# time to start next schedule from
self._schedule_start: Optional[Time] = None
# ID of currently running task, and current (or last if finished) block
self._current_task_id = None
self._last_task_id = None
# blocks
self._blocks: List[ObservingBlock] = []
# update thread
self.add_background_task(self._schedule_worker)
self.add_background_task(self._update_worker)
async def open(self) -> None:
"""Open module."""
await Module.open(self)
# subscribe to events
if self.comm:
await self.comm.register_event(TaskStartedEvent, self._on_task_started)
await self.comm.register_event(TaskFinishedEvent, self._on_task_finished)
await self.comm.register_event(GoodWeatherEvent, self._on_good_weather)
async def start(self, **kwargs: Any) -> None:
"""Start scheduler."""
self._running = True
async def stop(self, **kwargs: Any) -> None:
"""Stop scheduler."""
self._running = False
async def is_running(self, **kwargs: Any) -> bool:
"""Whether scheduler is running."""
return self._running
async def _update_worker(self) -> None:
# time of last change in blocks
last_change = None
# run forever
while True:
# not running?
if self._running is False:
await asyncio.sleep(1)
continue
# got new time of last change?
t = await self._task_archive.last_changed()
if last_change is None or last_change < t:
# get schedulable blocks and sort them
log.info("Found update in schedulable block, downloading them...")
blocks = sorted(
await self._task_archive.get_schedulable_blocks(),
key=lambda x: json.dumps(x.configuration, sort_keys=True),
)
log.info("Downloaded %d schedulable block(s).", len(blocks))
# compare new and old lists
removed, added = self._compare_block_lists(self._blocks, blocks)
# schedule update
self._need_update = True
# no changes?
if len(removed) == 0 and len(added) == 0:
# no need to re-schedule
log.info("No change in list of blocks detected.")
self._need_update = False
# has only the current block been removed?
log.info("Removed: %d, added: %d", len(removed), len(added))
if len(removed) == 1:
log.info(
"Found 1 removed block with ID %d. Last task ID was %s, current is %s.",
removed[0].target.name,
str(self._last_task_id),
str(self._current_task_id),
)
if len(removed) == 1 and len(added) == 0 and removed[0].target.name == self._last_task_id:
# no need to re-schedule
log.info("Only one removed block detected, which is the one currently running.")
self._need_update = False
# check, if one of the removed blocks was actually in schedule
if len(removed) > 0 and self._need_update:
schedule = await self._schedule.get_schedule()
removed_from_schedule = [r for r in removed if r in schedule]
if len(removed_from_schedule) == 0:
log.info(f"Found {len(removed)} blocks, but none of them was scheduled.")
self._need_update = False
# store blocks
self._blocks = blocks
# schedule update
if self._need_update:
log.info("Triggering scheduler run...")
# remember now
last_change = Time.now()
self._initial_update_done = True
# sleep a little
await asyncio.sleep(5)
@staticmethod
def _compare_block_lists(
blocks1: List[ObservingBlock], blocks2: List[ObservingBlock]
) -> Tuple[List[ObservingBlock], List[ObservingBlock]]:
"""Compares two lists of ObservingBlocks and returns two lists, containing those that are missing in list 1
and list 2, respectively.
Args:
blocks1: First list of blocks.
blocks2: Second list of blocks.
Returns:
(tuple): Tuple containing:
unique1: Blocks that exist in blocks1, but not in blocks2.
unique2: Blocks that exist in blocks2, but not in blocks1.
"""
# get dictionaries with block names
names1 = {b.target.name: b for b in blocks1}
names2 = {b.target.name: b for b in blocks2}
# find elements in names1 that are missing in names2 and vice versa
additional1 = set(names1.keys()).difference(names2.keys())
additional2 = set(names2.keys()).difference(names1.keys())
# get blocks for names and return them
unique1 = [names1[n] for n in additional1]
unique2 = [names2[n] for n in additional2]
return unique1, unique2
async def _schedule_worker(self) -> None:
# run forever
while True:
# need update?
if self._need_update and self._initial_update_done:
# reset need for update
self._need_update = False
try:
# prepare scheduler
blocks, start, end, constraints = await self._prepare_schedule()
# schedule
scheduled_blocks = await self._schedule_blocks(blocks, start, end, constraints)
# finish schedule
await self._finish_schedule(scheduled_blocks, start)
except ValueError as e:
log.warning(str(e))
# sleep a little
await asyncio.sleep(1)
async def _prepare_schedule(self) -> Tuple[List[ObservingBlock], Time, Time, List[Any]]:
"""TaskSchedule blocks."""
# only global constraint is the night
if self._twilight == "astronomical":
constraints = [astroplan.AtNightConstraint.twilight_astronomical()]
elif self._twilight == "nautical":
constraints = [astroplan.AtNightConstraint.twilight_nautical()]
else:
raise ValueError("Unknown twilight type.")
# make shallow copies of all blocks and loop them
copied_blocks = [copy.copy(block) for block in self._blocks]
for block in copied_blocks:
# astroplan's PriorityScheduler expects lower priorities to be more important, so calculate
# 1000 - priority
block.priority = 1000.0 - block.priority
if block.priority < 0:
block.priority = 0
# it also doesn't match the requested observing windows exactly, so we make them a little smaller.
for constraint in block.constraints:
if isinstance(constraint, astroplan.TimeConstraint):
constraint.min += 30 * u.second
constraint.max -= 30 * u.second
# get start time for scheduler
start = self._schedule_start
now_plus_safety = Time.now() + self._safety_time * u.second
if start is None or start < now_plus_safety:
# if no ETA exists or is in the past, use safety time
start = now_plus_safety
# get running scheduled block, if any
if self._current_task_id is None:
log.info("No running block found.")
running_task = None
else:
# get running task from archive
log.info("Trying to find running block in current schedule...")
tasks = await self._schedule.get_schedule()
if self._current_task_id in tasks:
running_task = tasks[self._current_task_id]
else:
log.info("Running block not found in last schedule.")
running_task = None
# if start is before end time of currently running block, change that
if running_task is not None:
log.info("Found running block that ends at %s.", running_task.end)
# get block end plus some safety
block_end = running_task.end + 10.0 * u.second
if start < block_end:
start = block_end
log.info("Start time would be within currently running block, shifting to %s.", start.isot)
# calculate end time
end = start + TimeDelta(self._schedule_range * u.hour)
# remove currently running block and filter by start time
blocks: List[ObservingBlock] = []
for b in filter(lambda x: x.configuration["request"]["id"] != self._current_task_id, copied_blocks):
time_constraint_found = False
# loop all constraints
for c in b.constraints:
if isinstance(c, astroplan.TimeConstraint):
# we found a time constraint
time_constraint_found = True
# does the window start before the end of the scheduling range?
if c.min < end:
# yes, store block and break loop
blocks.append(b)
break
else:
# loop has finished without breaking
# if no time constraint has been found, we still take the block
if time_constraint_found is False:
blocks.append(b)
# if need new update, skip here
if self._need_update:
raise ValueError("Not running scheduler, since update was requested.")
# no blocks found?
if len(blocks) == 0:
await self._schedule.set_schedule([], start)
raise ValueError("No blocks left for scheduling.")
# return all
return blocks, start, end, constraints
async def _schedule_blocks(
self, blocks: List[ObservingBlock], start: Time, end: Time, constraints: List[Any]
) -> List[ObservingBlock]:
# run actual scheduler in separate process and wait for it
qout: mp.Queue = mp.Queue()
p = mp.Process(target=self._schedule_process, args=(blocks, start, end, constraints, qout))
p.start()
# wait for process to finish
# note that the process only finishes, when the queue is empty! so we have to poll the queue first
# and then the process.
loop = asyncio.get_running_loop()
scheduled_blocks: List[ObservingBlock] = await loop.run_in_executor(None, qout.get, True)
await loop.run_in_executor(None, p.join)
return scheduled_blocks
async def _finish_schedule(self, scheduled_blocks: List[ObservingBlock], start: Time) -> None:
# if need new update, skip here
if self._need_update:
log.info("Not using scheduler results, since update was requested.")
return
# update
await self._schedule.set_schedule(scheduled_blocks, start)
if len(scheduled_blocks) > 0:
log.info("Finished calculating schedule for %d block(s):", len(scheduled_blocks))
for i, block in enumerate(scheduled_blocks, 1):
log.info(
" #%d: %s to %s (%.1f)",
block.configuration["request"]["id"],
block.start_time.strftime("%H:%M:%S"),
block.end_time.strftime("%H:%M:%S"),
block.priority,
)
else:
log.info("Finished calculating schedule for 0 blocks.")
def _schedule_process(
self,
blocks: List[ObservingBlock],
start: Time,
end: Time,
constraints: List[Any],
scheduled_blocks: mp.Queue,
) -> None:
"""Actually do the scheduling, usually run in a separate process."""
# log it
log.info("Calculating schedule for %d schedulable block(s) starting at %s...", len(blocks), start)
# we don't need any transitions
transitioner = astroplan.Transitioner()
# create scheduler
scheduler = astroplan.PriorityScheduler(constraints, self.observer, transitioner=transitioner)
# run scheduler
time_range = astroplan.Schedule(start, end)
schedule = scheduler(blocks, time_range)
# put scheduled blocks in queue
scheduled_blocks.put(schedule.scheduled_blocks)
async def run(self, **kwargs: Any) -> None:
"""Trigger a re-schedule."""
self._need_update = True
async def _on_task_started(self, event: Event, sender: str) -> bool:
"""Re-schedule when task has started and we can predict its end.
Args:
event: The task started event.
sender: Who sent it.
"""
if not isinstance(event, TaskStartedEvent):
return False
# store it
self._current_task_id = event.id
self._last_task_id = event.id
# trigger?
if self._trigger_on_task_started:
# get ETA in minutes
eta = (event.eta - Time.now()).sec / 60
log.info("Received task started event with ETA of %.0f minutes, triggering new scheduler run...", eta)
# set it
self._need_update = True
self._schedule_start = event.eta
return True
async def _on_task_finished(self, event: Event, sender: str) -> bool:
"""Reset current task, when it has finished.
Args:
event: The task finished event.
sender: Who sent it.
"""
if not isinstance(event, TaskFinishedEvent):
return False
# reset current task
self._current_task_id = None
# trigger?
if self._trigger_on_task_finished:
# get ETA in minutes
log.info("Received task finished event, triggering new scheduler run...")
# set it
self._need_update = True
self._schedule_start = Time.now()
return True
async def _on_good_weather(self, event: Event, sender: str) -> bool:
"""Re-schedule on incoming good weather event.
Args:
event: The good weather event.
sender: Who sent it.
"""
if not isinstance(event, GoodWeatherEvent):
return False
# get ETA in minutes
eta = (event.eta - Time.now()).sec / 60
log.info("Received good weather event with ETA of %.0f minutes, triggering new scheduler run...", eta)
# set it
self._need_update = True
self._schedule_start = event.eta
return True
async def abort(self, **kwargs: Any) -> None:
pass
__all__ = ["Scheduler"] | PypiClean |
/ntv-firmware-1.0.2.tar.gz/ntv-firmware-1.0.2/ntv_firmware/twisted.py | from __future__ import absolute_import
import sys
import serial
from serial.tools import list_ports
import glob
from time import sleep
from twisted.internet import reactor, defer
def locate_firmware(
baudrate = 57600,
firmware_id = None,
firmware_version = None,
firmware_uid = None,
outstream = sys.stdout,
silent = False,
delay = 1,
max_attempts = 0
):
"""
Parameters
---
baudrate (int): Baudrate to open COM port at
firmware_id (string): Firmware identifier to search for
firmware_version (string): Firmware version to search for
firmware_uid (string): Firmware instance UID to search for
outstream (file): Output stream for logging. Defaults to stdout
silent (boolean): Don't output anything to outstream
delay (int): Seconds to delay between iterations
max_attempts (int): Number of iterations to search for. Defaults
to `0` (infinite)
Returns
-------
Deferred
Deferred object that resolves once the port has been found
"""
d = defer.Deferred()
attempts = 0
# outstream = sys.stdout if outstream is None else outstream
def log (msg, *args):
if not silent:
outstream.write( msg.format(*args) )
def search():
## Platform-dependant port enumeration
log("[NTV-Firmware] Enumerating COM ports... ")
ports = list_ports.comports()
# if sys.platform.startswith('win'):
# ports = ['COM%s' % (i + 1) for i in range(256)]
# elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
# # this excludes your current terminal "/dev/tty"
# ports = glob.glob('/dev/ttyACM[0-9]*')
# elif sys.platform.startswith('darwin'):
# ports = glob.glob('/dev/tty.usbmodem[0-9]*')
# else:
# raise EnvironmentError('Unsupported platform')
log("{} found\n", len(ports))
for port in ports:
try:
log("[NTV-Firmware] - Checking {}... ", port.device)
with serial.Serial(port.device, baudrate, timeout=1) as sp:
sleep(1)
numBytes = sp.inWaiting()
sp.read(9999)
sp.reset_input_buffer()
sp.write("IDENTIFY\n")
replied = False
tries = 0
id = None
while not replied:
id = sp.readline()
if len(id) == 0:
tries += 1
if tries > 6:
id = ''
replied = True
else:
replied = True
if id == "":
log("NOT_NTV_FIRMWARE\n")
continue
_,f_uid,f_id,f_version = id.strip().split(":")
if firmware_id is not None and firmware_id != f_id:
log("INVALID_FIRMWARE\n")
continue
if firmware_version is not None and firmware_version != f_version:
log("INVALID_FIRMWARE_VERSION")
continue
if firmware_uid is not None and firmware_uid != f_uid:
log("INVALID_UID\n")
continue
log("SUCCESS\n")
return port.device
except Exception as e:
if hasattr(e,'errno') and e.errno == 16:
log("DEVICE_BUSY\n")
else:
print e
log("NOT_NTV_FIRMWARE\n")
return None
def deferred_search( attempts = 0, max_attempts = 0):
try:
found_port = search()
attempts += 1
if found_port is None:
if max_attempts > 0 and attempts >= max_attempts:
d.errback()
else:
reactor.callLater(delay, deferred_search, attempts=attempts, max_attempts=max_attempts)
else:
d.callback(found_port)
except Exception as err:
d.errback(err)
reactor.callLater(0, deferred_search, max_attempts=max_attempts )
return d | PypiClean |
/RsCMPX_WlanMeas-4.0.150-py3-none-any.whl/RsCMPX_WlanMeas/Implementations/WlanMeas/MultiEval/TsMask/Mimo/Maximum.py | from typing import List
from ......Internal.Core import Core
from ......Internal.CommandsGroup import CommandsGroup
from ......Internal.Types import DataType
from ......Internal.StructBase import StructBase
from ......Internal.ArgStruct import ArgStruct
from ...... import enums
from ...... import repcap
# noinspection PyPep8Naming,PyAttributeOutsideInit,SpellCheckingInspection
class MaximumCls:
"""Maximum commands group definition. 3 total commands, 0 Subgroups, 3 group commands"""
def __init__(self, core: Core, parent):
self._core = core
self._cmd_group = CommandsGroup("maximum", core, parent)
# noinspection PyTypeChecker
class ResultData(StructBase):
"""Response structure. Fields: \n
- Reliability: int: 'Reliability indicator'
- Out_Of_Tol: float: Out of tolerance result, i.e. percentage of measurement intervals of the statistic count for spectrum emission measurements exceeding the specified transmit spectrum mask limits.
- Margin_Tx: List[float]: Comma-separated list of margin values, one value per spectrum mask area The number of margin values depends on the selected standard, see Table 'Spectrum mask areas'."""
__meta_args_list = [
ArgStruct.scalar_int('Reliability', 'Reliability'),
ArgStruct.scalar_float('Out_Of_Tol'),
ArgStruct('Margin_Tx', DataType.FloatList, None, False, True, 1)]
def __init__(self):
StructBase.__init__(self, self)
self.Reliability: int = None
self.Out_Of_Tol: float = None
self.Margin_Tx: List[float] = None
def read(self, mimo=repcap.Mimo.Default) -> ResultData:
"""SCPI: READ:WLAN:MEASurement<Instance>:MEValuation:TSMask:MIMO<n>:MAXimum \n
Snippet: value: ResultData = driver.wlanMeas.multiEval.tsMask.mimo.maximum.read(mimo = repcap.Mimo.Default) \n
Return the limit line margin values of the transmit spectrum mask for MIMO measurements, antenna <n>, bandwidths with one
segment. Margins for the current, average, minimum and maximum traces are returned. A positive result indicates that the
trace is located above the limit line. The limit is exceeded. The values described below are returned by FETCh and READ
commands. CALCulate commands return limit check results instead, one value for each result listed below. \n
:param mimo: optional repeated capability selector. Default value: Nr1 (settable in the interface 'Mimo')
:return: structure: for return value, see the help for ResultData structure arguments."""
mimo_cmd_val = self._cmd_group.get_repcap_cmd_value(mimo, repcap.Mimo)
return self._core.io.query_struct(f'READ:WLAN:MEASurement<Instance>:MEValuation:TSMask:MIMO{mimo_cmd_val}:MAXimum?', self.__class__.ResultData())
def fetch(self, mimo=repcap.Mimo.Default) -> ResultData:
"""SCPI: FETCh:WLAN:MEASurement<Instance>:MEValuation:TSMask:MIMO<n>:MAXimum \n
Snippet: value: ResultData = driver.wlanMeas.multiEval.tsMask.mimo.maximum.fetch(mimo = repcap.Mimo.Default) \n
Return the limit line margin values of the transmit spectrum mask for MIMO measurements, antenna <n>, bandwidths with one
segment. Margins for the current, average, minimum and maximum traces are returned. A positive result indicates that the
trace is located above the limit line. The limit is exceeded. The values described below are returned by FETCh and READ
commands. CALCulate commands return limit check results instead, one value for each result listed below. \n
:param mimo: optional repeated capability selector. Default value: Nr1 (settable in the interface 'Mimo')
:return: structure: for return value, see the help for ResultData structure arguments."""
mimo_cmd_val = self._cmd_group.get_repcap_cmd_value(mimo, repcap.Mimo)
return self._core.io.query_struct(f'FETCh:WLAN:MEASurement<Instance>:MEValuation:TSMask:MIMO{mimo_cmd_val}:MAXimum?', self.__class__.ResultData())
# noinspection PyTypeChecker
class CalculateStruct(StructBase):
"""Response structure. Fields: \n
- Reliability: int: 'Reliability indicator'
- Out_Of_Tol: enums.ResultStatus2: Out of tolerance result, i.e. percentage of measurement intervals of the statistic count for spectrum emission measurements exceeding the specified transmit spectrum mask limits.
- Margin_Tx: List[enums.ResultStatus2]: Comma-separated list of margin values, one value per spectrum mask area The number of margin values depends on the selected standard, see Table 'Spectrum mask areas'."""
__meta_args_list = [
ArgStruct.scalar_int('Reliability', 'Reliability'),
ArgStruct.scalar_enum('Out_Of_Tol', enums.ResultStatus2),
ArgStruct('Margin_Tx', DataType.EnumList, enums.ResultStatus2, False, True, 1)]
def __init__(self):
StructBase.__init__(self, self)
self.Reliability: int = None
self.Out_Of_Tol: enums.ResultStatus2 = None
self.Margin_Tx: List[enums.ResultStatus2] = None
def calculate(self, mimo=repcap.Mimo.Default) -> CalculateStruct:
"""SCPI: CALCulate:WLAN:MEASurement<Instance>:MEValuation:TSMask:MIMO<n>:MAXimum \n
Snippet: value: CalculateStruct = driver.wlanMeas.multiEval.tsMask.mimo.maximum.calculate(mimo = repcap.Mimo.Default) \n
Return the limit line margin values of the transmit spectrum mask for MIMO measurements, antenna <n>, bandwidths with one
segment. Margins for the current, average, minimum and maximum traces are returned. A positive result indicates that the
trace is located above the limit line. The limit is exceeded. The values described below are returned by FETCh and READ
commands. CALCulate commands return limit check results instead, one value for each result listed below. \n
:param mimo: optional repeated capability selector. Default value: Nr1 (settable in the interface 'Mimo')
:return: structure: for return value, see the help for CalculateStruct structure arguments."""
mimo_cmd_val = self._cmd_group.get_repcap_cmd_value(mimo, repcap.Mimo)
return self._core.io.query_struct(f'CALCulate:WLAN:MEASurement<Instance>:MEValuation:TSMask:MIMO{mimo_cmd_val}:MAXimum?', self.__class__.CalculateStruct()) | PypiClean |
/groupdocs-merger-cloud-23.2.tar.gz/groupdocs-merger-cloud-23.2/groupdocs_merger_cloud/models/formats_result.py |
# -----------------------------------------------------------------------------------
# <copyright company="Aspose Pty Ltd" file="FormatsResult.py">
# Copyright (c) 2003-2023 Aspose Pty Ltd
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# </summary>
# -----------------------------------------------------------------------------------
import pprint
import re # noqa: F401
import six
class FormatsResult(object):
"""
Describes object which contains list of supported file formats.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'formats': 'list[Format]'
}
attribute_map = {
'formats': 'Formats'
}
def __init__(self, formats=None, **kwargs): # noqa: E501
"""Initializes new instance of FormatsResult""" # noqa: E501
self._formats = None
if formats is not None:
self.formats = formats
@property
def formats(self):
"""
Gets the formats. # noqa: E501
Supported file formats collection # noqa: E501
:return: The formats. # noqa: E501
:rtype: list[Format]
"""
return self._formats
@formats.setter
def formats(self, formats):
"""
Sets the formats.
Supported file formats collection # noqa: E501
:param formats: The formats. # noqa: E501
:type: list[Format]
"""
self._formats = formats
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, FormatsResult):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other | PypiClean |