id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
/genie.libs.conf-23.8-py3-none-any.whl/genie/libs/conf/ospf/iosxe/arearange.py | import re
import warnings
from abc import ABC
from netaddr import IPNetwork
# Genie
from genie.conf.base.cli import CliConfigBuilder
from genie.conf.base.attributes import AttributesHelper
class AreaRange(ABC):
def build_config(self, apply=True, attributes=None, unconfig=False,
**kwargs):
attributes = AttributesHelper(self, attributes)
configurations = CliConfigBuilder(unconfig=unconfig)
# Get area information
area = kwargs['area']
# router ospf 1
# area 2 range 192.168.1.0 255.255.255.0 cost 10
# area 2 range 192.168.1.0 255.255.255.0 advertise cost 10
# area 2 range 192.168.1.0 255.255.255.0 not-advertise cost 10
if attributes.value('area_range_prefix'):
# area {area}
ar_str = 'area {}'.format(area)
# + range {area_range_prefix}
if re.search("\/", attributes.value('area_range_prefix')):
range_val = IPNetwork(attributes.value('area_range_prefix'))
prefix = str(range_val.ip)
netmask = str(range_val.netmask)
ar_str += ' range {} {}'.format(prefix, netmask)
else:
ar_str += ' range {area_range_prefix}'
# + advertise
# + not-advertise
if attributes.value('area_range_advertise') is True:
ar_str += ' advertise'
elif attributes.value('area_range_advertise') is False:
ar_str += ' not-advertise'
# + cost {area_range_cost}
if attributes.value('area_range_cost'):
ar_str += ' cost {area_range_cost}'
configurations.append_line(attributes.format(ar_str))
return str(configurations)
def build_unconfig(self, apply=True, attributes=None, **kwargs):
return self.build_config(apply=apply, attributes=attributes,
unconfig=True, **kwargs) | PypiClean |
/diffino-0.2.1.tar.gz/diffino-0.2.1/README.md | diffino
====
[![Build Status](https://travis-ci.com/IntuitiveWebSolutions/diffino.svg?branch=master)](https://travis-ci.com/IntuitiveWebSolutions/diffino)
[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/ambv/black)
Diffing tools for comparing datasets in CSV, XLSX and other formats available as CLI app, API, web app and module. Powered by the awesome Pandas library for Python.
### Done
- Install as CLI app
- Install and use as python module
- Compare two CSV datasets using Pandas where you can output differences row by row
- Use the following inputs for your datasets:
- Local file in CSV pandas modes
- File in S3 pandas mode
- Define a subset of columns to use for comparing/diffing (only works with pandas mode, not supported for MD5 comparison)
- Output differences to:
- Console (print)
- CSV file
### To-Do (ROADMAP)
- Compare one or more CSV datasets using MD5 hash of the files
- Compare one or more XLSX datasets using Pandas where you can output differences row by row
- Use the following inputs for your datasets:
- Local file in CSV MD5
- Local file in XLSX (only for pandas mode)
- Local directory with CSVs or XSLX files (for both MD5 and pandas modes)
- ZIP file with CSVs or XLSX files (only for pandas mode)
- File in S3 for MD5
- Bucket in S3 (for both MD5 and pandas modes)
- Output differences to:
- XSLX file
- JSON file
## Install
To install as module and CLI:
```
pip install diffino
```
## CLI
Diffino will try it's best to guess your input storage mechanisms, for that you need to include `s3://` in the input argument and/or the `.csv`, `.xls` and `.xlsx extensions`.
### Compare using pandas
MD5 is only useful for knowing two CSV datasets are not the same but it's not useful for knowing which are the actual differences among those. For that you can use the pandas mode which will output the differences row by row.
The same commands shown earlier for MD5 are available, you need to pass the `--mode pandas` argument for using pandas. **By default Pandas mode is used so this argument can be omitted**:
```
diffino before_dataset.csv after_dataset.csv --mode pandas
```
When using pandas mode, by default Diffino will try to convert numeric columns, you can change this behavior with:
```
diffino before_dataset.csv after_dataset.csv --convert-numeric false
```
You can define the columns to be used for checking the diffs:
```
diffino before_dataset.csv after_dataset.csv --cols id name
```
#### Compare two CSV files in an S3 bucket using pandas mode
```
diffino s3://bucket/before_dataset.csv s3://bucket/after_dataset.csv --mode pandas
```
### Output diff results to file
Diffino will try it's best to guess your output storage mechanism, for that you need to include `s3://` in the input argument or use the `.csv`, `.xls` and `.xlsx extensions`.
#### Output to a local CSV file
```
diffino file_1.csv file_2.csv --output diff.csv
```
Note: Two files are going to be generated, comparing the left argument file to the right argument file. For the example above, 2 files are going to be created:
* `diff_left.csv`
* `diff_right.csv`
#### Avoid creating unnecesary files
If you want to avoid unnecesary noise, you can prevent diffino from creating resulting files if there are no actual differences with the `--output-only-diffs` like
```
diffino file_1.csv file_2.csv --output diff.csv
```
For the above example, if `file_1` has some extra rows that are not present in `file_2`, but `file_2` only have rows that are present in `file_1`, then we are going to end up only with a resulting `diff_left.csv` file.
#### Output to a local Excel file
When using Excel, output will contain different sheets as well as one summary sheet containing all differences:
```
diffino file_1.csv file_2.csv --output diff.xlsx
```
#### Output to a local JSON file
```
diffino file_1.csv file_2.csv --output diff.json
```
#### Output to an CSV file in S3
```
diffino file_1.csv file_2.csv --output s3://bucket/diff.csv
```
#### Output to an Excel file in S3
When using Excel, output will contain different sheets as well as one summary sheet containing all differences:
```
diffino file_1.csv file_2.csv --output s3://bucket/diff.xlsx
```
#### Output to a JSON file in S3
```
diffino file_1.csv file_2.csv --output s3://bucket/diff.json
```
## Python module
Useful if you want to integrate as part of you ETL or as part of you Continuous Integration (CI) builds.
### Get a dictionary with differences using pandas mode
For using all columns:
```python
from diffino.models import Diffino
diffino = Diffino(left='s3://bucket/one.csv', right='s3://bucket/two.csv', mode='pandas')
results = diffino.build_diff()
```
In the above example, the `results` variable contains a tuple with the first index containing
the left differences count and the second index with the right differences count:
```python
results(0)
results(1)
```
And for using a subset of columns you can specify a string with a Python list of the column names you want to include:
```python
from diffino.models import Diffino
diffino = Diffino(
left='one.csv',
right='two.csv',
mode='pandas',
cols=['id', 'name']
)
results = diffino.build_diff()
```
## COMING SOON
Different column names? No problemo that works too!
```python
from diffino.models import Diffino
diffino = Diffino(
left='one.xlsx',
right='two.xlsx',
mode='pandas',
left_cols=['myColumn'],
right_cols=['my_column'],
)
results = diffino.build_diff()
```
## Web App
Coming soon
## API
Coming soon
| PypiClean |
/Ion-0.6.4.tar.gz/Ion-0.6.4/ion/cache.py | import time, string, re
class CacheEntry(object):
__slots__ = ("value", "ftime", "mtime")
def __init__(self, value):
self.set(value)
self.ftime = 0.0
def get(self):
self.ftime = time.time()
return self.value
def set(self, value):
self.value = value
self.mtime = time.time()
class ExpiredError(KeyError):
pass
class Cache(dict):
def __init__(self,data=None,size=100,age=None, log=None):
self.size = size
self.requests = self.hits = 0
self.inserts = self.unused = 0
if isinstance(age, (str, unicode)):
age = self._cvtage(age)
self.age = age
self.log = log
def shrink(self):
trim = max(0, int(len(self)-0.95*self.size))
if trim:
# sort keys by access times
values = zip(self.ftimes(), self.keys())
values.sort()
for val,k in values[0:trim]:
if val == 0.0:
self.unused += 1
del self[k]
def purge_old_entries(self):
if self.age is None:
return
t = time.time()
for k in self.keys():
v = dict.__getitem__(self, k)
threshold = t - self.age
# modified or fetched in last self.age seconds?
if threshold > v.mtime and threshold > v.ftime:
if v.ftime == 0.0:
self.unused += 1
del self[k]
def __setitem__(self,key,val):
self.inserts += 1
if self.age is not None and self.requests % 1000 == 0:
self.purge_old_entries()
if (key not in self and self.size and len(self) >= self.size):
self.shrink()
dict.__setitem__(self, key, CacheEntry(val))
if self.log:
self.log("Setting item %s to %s" % (key, val), "Cache")
def __getitem__(self,key):
self.requests += 1
item = dict.__getitem__(self, key)
val = item.get()
if self.age is not None:
if self.requests % 1000 == 0:
self.purge_old_entries()
# check to make sure value has not expired
if time.time()-self.age > item.mtime:
if item.ftime == 0.0:
self.unused += 1
del self[key]
if self.log:
self.log("Cache miss at key %s" % key, "Cache")
raise ExpiredError, key
# if we get here there was no KeyError
self.hits = self.hits + 1
if self.log:
self.log("Cache hit at key %s" % key, "Cache")
return val
def has_key(self,key):
try:
v = dict.__getitem__(self, key)
except (KeyError,ExpiredError):
return 0
if self.age is not None and time.time()-self.age > v.mtime:
return 0
return 1
def get(self,key,default=None):
try:
return self[key]
except KeyError:
return default
def values(self):
return [dict.__getitem__(self,key).get() for key in self]
def ftimes(self):
return [dict.__getitem__(self,key).ftime for key in self]
def mtimes(self):
return [dict.__getitem__(self,key).mtime for key in self]
def items(self):
return map(None, self.keys(), self.values())
def copy(self):
return self.__class__(self, self.size)
def update(self, dict):
for k in dict.keys():
self[k] = dict[k]
def stats(self):
return {
'hits': self.hits,
'inserts': self.inserts,
'requests': self.requests,
'unused': self.unused,
}
def __repr__(self):
l = []
for k in self.keys():
l.append("%s: %s" % (repr(k), repr(self[k])))
return "{" + string.join(l, ", ") + "}"
__str__=__repr__
_apat = re.compile("([0-9]+([.][0-9]+)?)\s*([dhms])?\s*$")
def _cvtage(self,age):
mat = self._apat.match(age)
if mat is None:
raise ValueError, "invalid age spec: "+age
n = float(mat.group(1))
units = mat.group(3) or "s"
if units == "s":
pass
elif units == "m":
n = n * 60
elif units == "h":
n = n * 60*60
elif units == "d":
n = n * 24*60*60
return n | PypiClean |
/indy_plenum-1.13.1rc1-py3-none-any.whl/common/serializers/json_serializer.py |
import base64
from typing import Dict
from common.serializers.mapping_serializer import MappingSerializer
try:
import ujson as json
from ujson import encode as uencode
# Older versions of ujson's encode do not support `sort_keys`, if that
# is the case default to using json
uencode({'xx': '123', 'aa': 90}, sort_keys=True)
class UJsonEncoder:
@staticmethod
def encode(o):
if isinstance(o, (bytes, bytearray)):
return '"{}"'.format(base64.b64encode(o).decode("utf-8"))
else:
return uencode(o, sort_keys=True)
JsonEncoder = UJsonEncoder()
except (ImportError, TypeError):
import json
class OrderedJsonEncoder(json.JSONEncoder):
def __init__(self, *args, **kwargs):
kwargs['ensure_ascii'] = False
kwargs['sort_keys'] = True
kwargs['separators'] = (',', ':')
super().__init__(*args, **kwargs)
def encode(self, o):
if isinstance(o, (bytes, bytearray)):
return '"{}"'.format(base64.b64encode(o).decode("utf-8"))
else:
return super().encode(o)
JsonEncoder = OrderedJsonEncoder()
class JsonSerializer(MappingSerializer):
"""
Class to convert a mapping to json with keys ordered in lexicographical
order
"""
@staticmethod
def dumps(data, toBytes=True):
encoded = JsonEncoder.encode(data)
if toBytes:
encoded = encoded.encode()
return encoded
@staticmethod
def loads(data):
if isinstance(data, (bytes, bytearray)):
data = data.decode()
return json.loads(data)
# The `fields` argument is kept to conform to the interface, its not
# need in this method
def serialize(self, data: Dict, fields=None, toBytes=True):
return self.dumps(data, toBytes)
# The `fields` argument is kept to conform to the interface, its not
# need in this method
def deserialize(self, data, fields=None):
return self.loads(data) | PypiClean |
/mpds_client-0.24.tar.gz/mpds_client-0.24/mpds_client/retrieve_MPDS.py | import os
import sys
import time
import math
import warnings
from urllib.parse import urlencode
import httplib2
import ujson as json
import pandas as pd
from numpy import array_split
import jmespath
from .errors import APIError
use_pmg, use_ase = False, False
try:
from pymatgen.core.structure import Structure
from pymatgen.core.lattice import Lattice
use_pmg = True
except ImportError: pass
try:
from ase import Atom
from ase.spacegroup import crystal
use_ase = True
except ImportError: pass
if not use_pmg and not use_ase:
warnings.warn("Crystal structure treatment unavailable")
__author__ = 'Evgeny Blokhin <[email protected]>'
__copyright__ = 'Copyright (c) 2020, Evgeny Blokhin, Tilde Materials Informatics'
__license__ = 'MIT'
class MPDSDataTypes(object):
PEER_REVIEWED = 1
MACHINE_LEARNING = 2
AB_INITIO = 4
ALL = 7
def _massage_atsymb(sequence):
"""
Handle the difference between PY2 and PY3
in how pandas and ASE treat atomic symbols,
received from the MPDS JSON
"""
if sys.version_info[0] < 3:
return [i.encode('ascii') for i in sequence]
return sequence
class MPDSDataRetrieval(object):
"""
An example Python implementation
of the API consumer for the MPDS platform,
see https://developer.mpds.io
Usage:
$>export MPDS_KEY=...
client = MPDSDataRetrieval()
dataframe = client.get_dataframe({"formula":"SrTiO3", "props":"phonons"})
*or*
jsonobj = client.get_data(
{"formula":"SrTiO3", "sgs": 99, "props":"atomic properties"},
fields={
'S':["entry", "cell_abc", "sg_n", "basis_noneq", "els_noneq"]
}
)
*or*
jsonobj = client.get_data({"formula":"SrTiO3"}, fields={})
"""
default_fields = {
'S': [
'phase_id',
'chemical_formula',
'sg_n',
'entry',
lambda: 'crystal structure',
lambda: 'angstrom'
],
'P': [
'sample.material.phase_id',
'sample.material.chemical_formula',
'sample.material.condition[0].scalar[0].value',
'sample.material.entry',
'sample.measurement[0].property.name',
'sample.measurement[0].property.units',
'sample.measurement[0].property.scalar'
],
'C': [
lambda: None,
'title',
lambda: None,
'entry',
lambda: 'phase diagram',
'naxes',
'arity'
]
}
default_titles = ['Phase', 'Formula', 'SG', 'Entry', 'Property', 'Units', 'Value']
endpoint = "https://api.mpds.io/v0/download/facet"
pagesize = 1000
maxnpages = 120 # one hit may reach 50kB in RAM, consider pagesize*maxnpages*50kB free RAM
maxnphases = 1500 # more phases require additional requests
chillouttime = 2 # please, do not use values < 2, because the server may burn out
verbose = True
debug = False
def __init__(self, api_key=None, endpoint=None, dtype=None, verbose=None, debug=None):
"""
MPDS API consumer constructor
Args:
api_key: (str) The MPDS API key, or None if the MPDS_KEY envvar is set
endpoint: (str) MPDS API gateway URL
Returns: None
"""
self.api_key = api_key if api_key else os.environ['MPDS_KEY']
self.network = httplib2.Http()
self.endpoint = endpoint or self.endpoint
self.dtype = dtype or MPDSDataTypes.PEER_REVIEWED
self.verbose = verbose if verbose is not None else self.verbose
self.debug = debug or self.debug
def _request(self, query, phases=None, page=0, pagesize=None):
phases = ','.join([str(int(x)) for x in phases]) if phases else ''
uri = self.endpoint + '?' + urlencode({
'q': json.dumps(query),
'phases': phases,
'page': page,
'pagesize': pagesize or self.pagesize,
'dtype': self.dtype
})
if self.debug:
print('curl -XGET -HKey:%s \'%s\'' % (self.api_key, uri))
response, content = self.network.request(
uri=uri,
method='GET',
headers={'Key': self.api_key}
)
if response.status != 200:
return {'error': content, 'code': response.status}
try:
content = json.loads(content)
except:
return {'error': 'Unreadable data obtained'}
if content.get('error'):
return {'error': content['error']}
if not content['out']:
return {'error': 'No hits', 'code': 204}
return content
def _massage(self, array, fields):
if not fields:
return array
output = []
for item in array:
filtered = []
for object_type in ['S', 'P', 'C']:
if item['object_type'] == object_type:
for expr in fields.get(object_type, []):
if isinstance(expr, jmespath.parser.ParsedResult):
filtered.append(expr.search(item))
else:
filtered.append(expr)
break
else:
raise APIError("API error: unknown entry type")
output.append(filtered)
return output
def count_data(self, search, phases=None, **kwargs):
"""
Calculate the number of entries matching the keyword(s) specified
Args:
search: (dict) Search query like {"categ_A": "val_A", "categ_B": "val_B"},
documented at https://developer.mpds.io/#Categories
phases: (list) Phase IDs, according to the MPDS distinct phases concept
kwargs: just a mockup
Returns:
count (int)
"""
result = self._request(search, phases=phases, pagesize=10)
if result['error']:
raise APIError(result['error'], result.get('code', 0))
if result['npages'] > self.maxnpages:
warnings.warn(
"\r\nDataset is too big, you may risk to change maxnpages from %s to %s" % \
(self.maxnpages, int(math.ceil(result['count']/self.pagesize)))
)
return result['count']
def get_data(self, search, phases=None, fields=default_fields):
"""
Retrieve data in JSON.
JSON is expected to be valid against the schema
at https://developer.mpds.io/mpds.schema.json
Args:
search: (dict) Search query like {"categ_A": "val_A", "categ_B": "val_B"},
documented at https://developer.mpds.io/#Categories
phases: (list) Phase IDs, according to the MPDS distinct phases concept
fields: (dict) Data of interest for C-, S-, and P-entries,
e.g. for phase diagrams: {'C': ['naxes', 'arity', 'shapes']},
documented at https://developer.mpds.io/#JSON-schemata
Returns:
List of dicts: C-, S-, and P-entries, the format is
documented at https://developer.mpds.io/#JSON-schemata
"""
output = []
fields = {
key: [jmespath.compile(item) if isinstance(item, str) else item() for item in value]
for key, value in fields.items()
} if fields else None
tot_count = 0
phases = list(set(phases)) if phases else []
if len(phases) > self.maxnphases:
all_phases = array_split(phases, int(math.ceil(
len(phases)/self.maxnphases
)))
else: all_phases = [phases]
nsteps = len(all_phases)
for step, current_phases in enumerate(all_phases, start=1):
counter, hits_count = 0, 0
while True:
result = self._request(search, phases=list(current_phases), page=counter)
if result['error']:
raise APIError(result['error'], result.get('code', 0))
if result['npages'] > self.maxnpages:
raise APIError(
"Too many hits (%s > %s), please, be more specific" % \
(result['count'], self.maxnpages * self.pagesize),
2
)
output.extend(self._massage(result['out'], fields))
if hits_count and hits_count != result['count']:
raise APIError("API error: hits count has been changed during the query")
hits_count = result['count']
time.sleep(self.chillouttime)
if counter == result['npages'] - 1:
break
counter += 1
if self.verbose:
sys.stdout.write("\r\t%d%% of step %s from %s" % (
(counter/result['npages']) * 100, step, nsteps)
)
sys.stdout.flush()
tot_count += hits_count
if len(output) != tot_count:
raise APIError("API error: collected and declared counts of hits differ")
if self.verbose:
sys.stdout.write(" Got %s hits\r\n" % tot_count)
sys.stdout.flush()
return output
def get_dataframe(self, *args, **kwargs):
"""
Retrieve data as a Pandas dataframe.
Args:
search: (dict) Search query like {"categ_A": "val_A", "categ_B": "val_B"},
documented at https://developer.mpds.io/#Categories
phases: (list) Phase IDs, according to the MPDS distinct phases concept
fields: (dict) Data of interest for C-, S-, and P-entries,
e.g. for phase diagrams: {'C': ['naxes', 'arity', 'shapes']},
documented at https://developer.mpds.io/#JSON-schemata
columns: (list) Column names for Pandas dataframe
Returns: (object) Pandas dataframe object containing the results
"""
columns = kwargs.get('columns')
if columns:
del kwargs['columns']
else:
columns = self.default_titles
return pd.DataFrame(self.get_data(*args, **kwargs), columns=columns)
def get_crystals(self, search, phases=None, flavor='pmg', **kwargs):
search["props"] = "atomic structure"
crystals = []
for crystal_struct in self.get_data(
search,
phases,
fields={'S':['cell_abc', 'sg_n', 'basis_noneq', 'els_noneq']},
**kwargs
):
crobj = self.compile_crystal(crystal_struct, flavor)
if crobj is not None:
crystals.append(crobj)
return crystals
@staticmethod
def compile_crystal(datarow, flavor='pmg'):
"""
Helper method for representing the MPDS crystal structures in two flavors:
either as a Pymatgen Structure object, or as an ASE Atoms object.
Attention #1. Disordered structures (e.g. fractional indices in the chemical formulae)
are not supported by this method, and hence the occupancies are not retrieved.
Currently it's up to the user to take care of that (see e.g.
https://doi.org/10.1186/s13321-016-0129-3 etc.).
Attention #2. Pymatgen and ASE flavors are generally not compatible, e.g.
primitive vs. crystallographic cell is defaulted,
atoms wrapped or non-wrapped into the unit cell etc.
Note, that the crystal structures are not retrieved by default,
so for them one needs to specify the following fields:
- cell_abc
- sg_n
- basis_noneq
- els_noneq
e.g. like this: {'S':['cell_abc', 'sg_n', 'basis_noneq', 'els_noneq']}
Args:
datarow: (list) Required data to construct crystal structure:
[cell_abc, sg_n, basis_noneq, els_noneq]
flavor: (str) Either "pmg", or "ase"
Returns:
- if flavor is pmg, Pymatgen Structure object
- if flavor is ase, ASE Atoms object
"""
if not datarow or not datarow[-1]:
# this is either a P-entry with the cell data, which meets the search criterion,
# or a 'low quality' structure with no basis (just unit cell parameters)
return None
if len(datarow) < 4:
raise ValueError(
"Must supply a data row that ends with the entries "
"'cell_abc', 'sg_n', 'basis_noneq', 'els_noneq'")
cell_abc, sg_n, basis_noneq, els_noneq = \
datarow[-4], int(datarow[-3]), datarow[-2], _massage_atsymb(datarow[-1])
if flavor == 'pmg' and use_pmg:
return Structure.from_spacegroup(
sg_n,
Lattice.from_parameters(*cell_abc),
els_noneq,
basis_noneq
)
elif flavor == 'ase' and use_ase:
atom_data = []
for num, i in enumerate(basis_noneq):
atom_data.append(Atom(els_noneq[num], tuple(i)))
return crystal(
atom_data,
spacegroup=sg_n,
cellpar=cell_abc,
primitive_cell=True,
onduplicates='replace'
)
else: raise APIError("Crystal structure treatment unavailable") | PypiClean |
/workspace-puller-0.0.22.tar.gz/workspace-puller-0.0.22/workspace_puller/workspace_puller.py | import datetime
import json
import tempfile
import certifi
import yaml
import io
import os
from git import Repo
import shutil
import urllib3
import requests
from oauth2client.client import OOB_CALLBACK_URN
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from downloader import Download
from requests.adapters import HTTPAdapter
from urllib3 import Retry
from workspace_puller import tools as t
from workspace_puller.wp_telegram_bot import WPTelegramBot
class WorkspacePuller:
def __init__(self, config_url: str, telegram_token=None):
self.kaggle_dirmode = 'skip'
config_file_name = 'config_' + str(
datetime.datetime.now()) + '.yml'
self.download_config(config_url, config_file_name)
self.config = WorkspaceConfig(
self.parse_config(config_file_name))
os.remove(config_file_name)
self.pool_manager = urllib3.PoolManager(
num_pools=1,
maxsize=1,
cert_reqs='CERT_REQUIRED',
ca_certs=certifi.where()
)
self.telegram_bot = None
if telegram_token is not None:
self.telegram_bot = WPTelegramBot(token=telegram_token, chat_ids=self.config.telegram_channels)
def build_workspace(self):
if self.config is None:
return
self.gauth = self.get_gauth()
if self.config.dataset_list is not None:
self.download_list(self.config.dataset_list)
if self.config.repos is not None:
self.clone_repos(self.config.repos)
if self.config.script_files is not None:
self.run(self.config.script_files)
if self.config.gdrive_folders is not None:
self.send_output_to_gdrive(self.config.output_folder, self.config.gdrive_folders)
if self.config.kaggle is not None:
self.upload_data_to_kaggle()
if 'dirmode' in self.config.kaggle:
self.kaggle_dirmode = self.config.kaggle['dirmode']
t.log_message('Done.')
if self.telegram_bot is not None:
self.telegram_bot.send_message('Workspace build done.')
def get_gauth(self):
gauth = None
packge_path, _ = os.path.split(__file__)
client_config = os.path.join(packge_path, 'client_secrets.json')
credentials_file = os.path.join(packge_path, 'drive_credentials')
if os.path.exists(credentials_file):
try:
gauth = GoogleAuth()
gauth.LoadClientConfigFile(client_config_file=client_config)
gauth.LoadCredentialsFile(credentials_file=credentials_file)
return gauth
except Exception as e:
t.log_message(str(e))
gauth = None
if self.config.gdrive_folders is not None and self.config.telegram_channels is not None and self.telegram_bot is not None:
try:
gauth = self.start_auth_telegram(client_config=client_config)
gauth.SaveCredentialsFile(credentials_file=credentials_file)
except Exception as e:
t.log_message(str(e))
gauth = None
elif self.config.gdrive_folders is not None and self.telegram_bot is None or self.config.telegram_channels is None:
try:
gauth = GoogleAuth()
gauth.LoadClientConfigFile(client_config_file=client_config)
gauth.CommandLineAuth()
gauth.SaveCredentialsFile(credentials_file=credentials_file)
except Exception as e:
t.log_message(str(e))
gauth = None
return gauth
def download_config(self, config_url: str, file_name: str = None):
downloader = Download(config_url, file_name)
downloader.download()
def parse_config(self, config_file_path):
stream = io.open(config_file_path, mode='r', buffering=-1, encoding=None, errors=None, newline=None,
closefd=True)
yml = yaml.load(stream=stream, Loader=yaml.CLoader)
return yml
def run(self, run_file_list: list):
for item in run_file_list:
if not os.path.exists(item):
t.log_message('ERROR. File not found: ' + item)
continue
t.log_message("Executing file: " + item)
os.system('python ' + item)
def download_list(self, url_list: list):
for item in url_list:
t.log_message('Downloading: ' + item)
try:
download = Download(item, retries=5)
download.download()
path = os.path.abspath(download.download_path)
_, extension = os.path.splitext(path)
if extension[1:] in dict(shutil.get_archive_formats()).keys() and self.config.extract_archives:
shutil.unpack_archive(path)
except Exception as e:
t.log_message("ERROR. Download: " + item + ' FAILED.\n' + str(e))
def clone_repos(self, repos: dict):
for repo_name, repo_data in repos.items():
branch: str = None
if 'branch' in repo_data:
branch = repo_data['branch']
if 'url' in repo_data:
url: str = repo_data['url']
if os.path.exists(repo_name):
shutil.rmtree(repo_name)
if branch is not None:
t.log_message('Cloning repo: ' + url + ', branch: ' + branch + ', to the folder: ' + repo_name)
Repo.clone_from(url=url, to_path=repo_name, branch=branch)
else:
t.log_message('Cloning repo: ' + url + ', to the folder: ' + repo_name)
Repo.clone_from(url=url, to_path=repo_name)
else:
t.log_message('ERROR. URL not found for a repo: ' + repo_name)
def send_output_to_gdrive(self, output_folders: list, drive_folders: list):
if self.gauth is None and self.config.gdrive_folders is not None:
t.log_message('GoogleDrive is unauthorised. Upload canceled.')
return
drive = GoogleDrive(self.gauth)
t.log_message('Uploading an output folders to the Google Drive')
for drive_folder in drive_folders:
for folder in output_folders:
self.upload_to_drive(folder, drive_folder, drive)
def upload_to_drive(self, folder: str, drive_folder_id: str, drive: GoogleDrive):
title: str = os.path.basename(folder)
if not os.path.isdir(folder):
file_metadata = {
"title": title,
"parents": [
{
"kind": "drive#fileLink",
"id": drive_folder_id
}
]
}
file = drive.CreateFile(file_metadata)
file.SetContentFile(folder)
file.Upload()
else:
folder_metadata = {
'title': title,
'mimeType': 'application/vnd.google-apps.folder',
"parents": [
{
"kind": "drive#fileLink",
"id": drive_folder_id
}
]
}
dirve_folder = drive.CreateFile(folder_metadata)
dirve_folder.Upload()
for item in os.listdir(folder):
path = os.path.join(folder, item)
self.upload_to_drive(path, dirve_folder['id'], drive)
def start_auth_telegram(self, client_config):
if self.telegram_bot is None:
t.log_message('telegram bot is None. Telegram auth canceled.')
return
auth = GoogleAuth()
auth.LoadClientConfigFile(client_config_file=client_config)
if auth.flow is None:
auth.GetFlow()
auth.flow.redirect_uri = OOB_CALLBACK_URN
self.telegram_bot.send_message(
'Please go to the following link in your browser and send me a Google verification code. \nAuth url: ' + auth.GetAuthUrl())
dirty = False
code = None
save_credentials = auth.settings.get('save_credentials')
if auth.credentials is None and save_credentials:
auth.LoadCredentials()
if auth.credentials is None:
code = self.telegram_bot.get_code()
dirty = True
else:
if auth.access_token_expired:
if auth.credentials.refresh_token is not None:
auth.Refresh()
else:
code = self.telegram_bot.get_code()
dirty = True
if code is not None:
auth.Auth(code)
if dirty and save_credentials:
auth.SaveCredentials()
return auth
def upload_data_to_kaggle(self):
files = []
for output_folder in self.config.output_folder:
t.log_message('Uploading an output folder to the Kaggle: ' + output_folder)
for item in os.listdir(output_folder):
path = os.path.join(output_folder, item)
if os.path.isfile(path):
token = self.upload_file_to_kaggle(path)
files.append({'token': token})
elif os.path.isdir(path) and self.kaggle_dirmode in ['zip', 'tar']:
temp_dir = tempfile.mkdtemp()
try:
_, dir_name = os.path.split(path)
archive_path = shutil.make_archive(
os.path.join(temp_dir, dir_name), self.kaggle_dirmode,
path)
token = self.upload_file_to_kaggle(archive_path)
files.append({'token': token})
finally:
shutil.rmtree(temp_dir)
t.log_message(output_folder + ' - uploaded.')
dataset = self.prepare_dataset(files)
self.kaggle_api_call(resource='/datasets/create/new', method='POST', body=dataset)
def prepare_dataset(self, files: list):
dataset = {}
if 'dataset_title' in self.config.kaggle:
dataset['title'] = self.config.kaggle['dataset_title']
else:
dataset['title'] = 'Unknown title'
if 'slug' in self.config.kaggle:
dataset['slug'] = self.config.kaggle['slug']
else:
dataset['slug'] = 'unknown_slug'
if 'username' in self.config.kaggle:
dataset['ownerSlug'] = self.config.kaggle['username']
if 'license' in self.config.kaggle:
dataset['licenseName'] = self.config.kaggle['license']
else:
dataset['licenseName'] = 'CC0-1.0'
if 'isPrivate' in self.config.kaggle:
dataset['isPrivate'] = self.config.kaggle['isPrivate']
else:
dataset['isPrivate'] = True
if 'convertToCsv' in self.config.kaggle:
dataset['convertToCsv'] = self.config.kaggle['convertToCsv']
else:
dataset['convertToCsv'] = True
dataset['files'] = files
return dataset
def kaggle_api_call(self, resource, method: str, body=None, post_params=None):
method = method.upper()
assert method in ['GET', 'HEAD', 'DELETE', 'POST', 'PUT',
'PATCH', 'OPTIONS']
request_body = json.dumps(body)
base_uri = "https://www.kaggle.com/api/v1"
auth_key = urllib3.util.make_headers(
basic_auth=self.config.kaggle['username'] + ':' + self.config.kaggle['key']
).get('authorization')
headers = {'Accept': 'application/json', 'Content-Type': 'application/json',
'User-Agent': 'Swagger-Codegen/1/python', "Authorization": auth_key}
if post_params is not None:
del headers['Content-Type']
return self.pool_manager.request(method=method, url=base_uri + resource, encode_multipart=True,
headers=headers, fields=post_params)
else:
return self.pool_manager.request(method=method, url=base_uri + resource, headers=headers, body=request_body)
def upload_file_to_kaggle(self, file_path: str):
file_token = None
try:
file_name = os.path.basename(file_path)
content_length = os.path.getsize(file_path)
last_modified_date_utc = int(os.path.getmtime(file_path))
post_params = [('fileName', file_name)]
kaggle_response = self.kaggle_api_call(
resource='/datasets/upload/file/' + str(content_length) + '/' + str(last_modified_date_utc),
method='POST',
post_params=post_params)
kaggle_data = json.loads(kaggle_response.data.decode('utf8'))
create_url = kaggle_data['createUrl']
with io.open(file_path, 'rb', buffering=0) as fp:
reader = io.BufferedReader(fp)
session = requests.Session()
retries = Retry(total=10, backoff_factor=0.5)
adapter = HTTPAdapter(max_retries=retries)
session.mount('http://', adapter)
session.mount('https://', adapter)
response = session.put(create_url, data=reader)
if response.status_code == 200 or response.status_code == 201:
file_token = kaggle_data['token']
if file_token is None:
t.log_message('Upload unsuccessful: ' + file_path)
except Exception as error:
t.log_message('Upload filed: ' + file_path + '\n' + str(error))
return file_token
class WorkspaceConfig:
def __init__(self, config_yml):
self.dataset_list: list = None
self.extract_archives = True
self.repos: dict = None
self.script_files: list = None
self.output_folder: list = None
self.gdrive_folders: list = None
self.telegram_channels: list = None
self.kaggle = None
if 'dataset_list' in config_yml:
self.dataset_list = config_yml['dataset_list']
if 'extract_archives' in config_yml:
self.extract_archives = config_yml['extract_archives']
if 'repos' in config_yml:
self.repos = config_yml['repos']
if 'script_file' in config_yml:
self.script_files = config_yml['script_file']
if 'output_folder' in config_yml:
self.output_folder = config_yml['output_folder']
if 'google_drive' in config_yml:
self.gdrive_folders = config_yml['google_drive']
if 'telegram_channels' in config_yml:
self.telegram_channels = config_yml['telegram_channels']
if 'kaggle' in config_yml:
self.kaggle = config_yml['kaggle'] | PypiClean |
/DNN_printer-0.0.2.tar.gz/DNN_printer-0.0.2/src/DNN_printer/DNN_printer.py | import torch
import torch.nn as nn
from torch.autograd import Variable
from collections import OrderedDict
import numpy as np
def output_shape(array):
out = 1
for i in array:
out = out * i
return out
def iterlen(x):
return sum(1 for _ in x)
def DNN_printer(model, input_size, batch_size=-1, device="cuda"):
def register_hook(module):
def hook(module, input, output):
class_name = str(module.__class__).split(".")[-1].split("'")[0]
module_idx = len(DNN_printer)
m_key = "%s-%i" % (class_name, module_idx + 1)
DNN_printer[m_key] = OrderedDict()
DNN_printer[m_key]["input_shape"] = list(input[0].size())
DNN_printer[m_key]["input_shape"][0] = batch_size
if isinstance(output, (list, tuple)):
DNN_printer[m_key]["output_shape"] = [
[-1] + list(o.size())[1:] for o in output
]
else:
DNN_printer[m_key]["output_shape"] = list(output.size())
DNN_printer[m_key]["output_shape"][0] = batch_size
params = 0
if hasattr(module, "weight") and hasattr(module.weight, "size"):
params += torch.prod(torch.LongTensor(list(module.weight.size())))
DNN_printer[m_key]["trainable"] = module.weight.requires_grad
if hasattr(module, "bias") and hasattr(module.bias, "size"):
params += torch.prod(torch.LongTensor(list(module.bias.size())))
DNN_printer[m_key]["nb_params"] = params
if (
not isinstance(module, nn.Sequential)
and not isinstance(module, nn.ModuleList)
and not (module == model)
):
hooks.append(module.register_forward_hook(hook))
device = device.lower()
assert device in [
"cuda",
"cpu",
], "Input device is not valid, please specify 'cuda' or 'cpu'"
if device == "cuda" and torch.cuda.is_available():
dtype = torch.cuda.FloatTensor
else:
dtype = torch.FloatTensor
# multiple inputs to the network
if isinstance(input_size, tuple):
input_size = [input_size]
# batch_size of 2 for batchnorm
x = [torch.rand(2, *in_size).type(dtype) for in_size in input_size]
# create properties
DNN_printer = OrderedDict()
hooks = []
# register hook
# model.apply(register_hook)
def apply_hook(module):
for name, submodule in module.named_children():
if (iterlen(submodule.named_children()) == 0):
submodule.apply(register_hook)
else:
apply_hook(submodule)
apply_hook(model)
model(*x)
# remove these hooks
for h in hooks:
h.remove()
print("------------------------------Happy every day! :)---------------------------------")
print("-----------------------------Author: Peiyi & Ping---------------------------------")
line_new = "{:>20} {:>20} {:>15} {:>13} {:>15}".format("Layer (type)", "Output Shape", "O-Size(MB)" ,"Param #","P-Size(MB)")
print(line_new)
print("==================================================================================")
total_params = 0
total_output = 0
trainable_params = 0
for layer in DNN_printer:
# input_shape, output_shape, trainable, nb_params
line_new = "{:>20} {:>20} {:>10} {:>13} {:>15}".format(
layer,
str(DNN_printer[layer]["output_shape"]),
str((output_shape(DNN_printer[layer]["output_shape"])) * 4 / 1024 / 1024 )+" MB",
"{0:,}".format(DNN_printer[layer]["nb_params"]),
str(float(DNN_printer[layer]["nb_params"]) * 4 / 1024 / 1024) + " MB",
)
total_params += DNN_printer[layer]["nb_params"]
total_output += np.prod(DNN_printer[layer]["output_shape"])
if "trainable" in DNN_printer[layer]:
if DNN_printer[layer]["trainable"] == True:
trainable_params += DNN_printer[layer]["nb_params"]
print(line_new)
# assume 4 bytes/number (float on cuda).
total_input_size = abs(np.prod(input_size) * batch_size * 4. / (1024 ** 2.))
total_output_size = abs(2. * total_output * 4. / (1024 ** 2.)) # x2 for gradients
total_params_size = abs(total_params.numpy() * 4. / (1024 ** 2.))
total_size = total_params_size + total_output_size + total_input_size
print("================================================================")
print("Total params: {0:,}".format(total_params))
print("Trainable params: {0:,}".format(trainable_params))
print("Non-trainable params: {0:,}".format(total_params - trainable_params))
print("----------------------------------------------------------------")
print("Input size (MB): %0.2f" % total_input_size)
print("Forward/backward pass size (MB): %0.2f" % total_output_size)
print("Params size (MB): %0.2f" % total_params_size)
print("Estimated Total Size (MB): %0.2f" % total_size)
print("----------------------------------------------------------------") | PypiClean |
/facebook_page_scraper-5.0.2.tar.gz/facebook_page_scraper-5.0.2/README.md | <h1> Facebook Page Scraper </h1>
[![Maintenance](https://img.shields.io/badge/Maintained-Yes-green.svg)](https://github.com/shaikhsajid1111/facebook_page_scraper/graphs/commit-activity)
[![PyPI license](https://img.shields.io/pypi/l/ansicolortags.svg)](https://opensource.org/licenses/MIT) [![Python >=3.6.9](https://img.shields.io/badge/python-3.7+-blue.svg)](https://www.python.org/downloads/release/python-360/)
<p> No need of API key, No limitation on number of requests. Import the library and <b> Just Do It !<b> </p>
<!--TABLE of contents-->
<h2> Table of Contents </h2>
<details open="open">
<summary>Table of Contents</summary>
<ol>
<li>
<a href="#">Getting Started</a>
<ul>
<li><a href="#Prerequisites">Prerequisites</a></li>
<li><a href="#Installation">Installation</a>
<ul>
<li><a href="#sourceInstallation">Installing from source</a></li>
<li><a href="#pypiInstallation">Installing with PyPI</a></li>
</ul>
</li>
</ul>
</li>
<li><a href="#Usage">Usage</a></li>
<ul>
<li><a href="#instantiation">How to instantiate?</a></li>
<ul>
<li><a href="#scraperParameters">Parameters for <code>Facebook_scraper()</code></a></li>
<li><a href="#JSONWay">Scrape in JSON format</a>
<ul><li><a href="#jsonOutput">JSON Output Format</a></li></ul>
</li>
<li><a href="#CSVWay">Scrape in CSV format</a>
<ul><li><a href="#csvParameter">Parameters for scrape_to_csv() method</a></li></ul>
</li>
<li><a href="#outputKeys">Keys of the output data</a></li>
</ul>
</ul>
<li><a href="#tech">Tech</a></li>
<li><a href="#license">License</a></li>
</ol>
</details>
<!--TABLE of contents //-->
<h2 id="Prerequisites"> Prerequisites </h2>
- Internet Connection
- Python 3.7+
- Chrome or Firefox browser installed on your machine
<br>
<hr>
<h2 id="Installation">Installation:</h2>
<h3 id="sourceInstallation"> Installing from source: </h3>
```
git clone https://github.com/shaikhsajid1111/facebook_page_scraper
```
<h4> Inside project's directory </h4>
```
python3 setup.py install
```
<br>
<p id="pypiInstallation">Installing with pypi</p>
```
pip3 install facebook-page-scraper
```
<br>
<hr>
<h2 id="instantiation"> How to use? </h2>
```python
#import Facebook_scraper class from facebook_page_scraper
from facebook_page_scraper import Facebook_scraper
#instantiate the Facebook_scraper class
page_name = "metaai"
posts_count = 10
browser = "firefox"
proxy = "IP:PORT" #if proxy requires authentication then user:password@IP:PORT
timeout = 600 #600 seconds
headless = True
meta_ai = Facebook_scraper(page_name, posts_count, browser, proxy=proxy, timeout=timeout, headless=headless)
```
<h3 id="scraperParameters"> Parameters for <code>Facebook_scraper(page_name, posts_count, browser, proxy, timeout, headless) </code> class </h3>
<table>
<th>
<tr>
<td> Parameter Name </td>
<td> Parameter Type </td>
<td> Description </td>
</tr>
</th>
<tr>
<td>
page_name
</td>
<td>
String
</td>
<td>
Name of the facebook page
</td>
</tr>
<tr>
<td>
posts_count
</td>
<td>
Integer
</td>
<td>
Number of posts to scrap, if not passed default is 10
</td>
</tr>
<tr>
<td>
browser
</td>
<td>
String
</td>
<td>
Which browser to use, either chrome or firefox. if not passed,default is chrome
</td>
</tr>
<tr>
<td>
proxy(optional)
</td>
<td>
String
</td>
<td>
Optional argument, if user wants to set proxy, if proxy requires authentication then the format will be <code> user:password@IP:PORT </code>
</td>
</tr>
<tr>
<td>
timeout
</td>
<td>
Integer
</td>
<td>
The maximum amount of time the bot should run for. If not passed, the default timeout is set to 10 minutes
</code>
</td>
</tr>
<tr>
<td>
headless
</td>
<td>
Boolean
</td>
<td>
Whether to run browser in headless mode?. Default is True
</code>
</td>
</tr>
</table>
<br>
<hr>
<br>
<h3> Done with instantiation?. <b>Let the scraping begin!</b> </h3>
<br
>
<h3 id="JSONWay"> For post's data in <b>JSON</b> format:</h3>
```python
#call the scrap_to_json() method
json_data = meta_ai.scrap_to_json()
print(json_data)
```
Output:
```javascript
{
"2024182624425347": {
"name": "Meta AI",
"shares": 0,
"reactions": {
"likes": 154,
"loves": 19,
"wow": 0,
"cares": 0,
"sad": 0,
"angry": 0,
"haha": 0
},
"reaction_count": 173,
"comments": 2,
"content": "We’ve built data2vec, the first general high-performance self-supervised algorithm for speech, vision, and text. We applied it to different modalities and found it matches or outperforms the best self-supervised algorithms. We hope this brings us closer to a world where computers can learn to solve many different tasks without supervision. Learn more and get the code: https://ai.facebook.com/…/the-first-high-performance-self-s…",
"posted_on": "2022-01-20T22:43:35",
"video": [],
"image": [
"https://scontent-bom1-2.xx.fbcdn.net/v/t39.30808-6/s480x480/272147088_2024182621092014_6532581039236849529_n.jpg?_nc_cat=100&ccb=1-5&_nc_sid=8024bb&_nc_ohc=j4_1PAndJTIAX82OLNq&_nc_ht=scontent-bom1-2.xx&oh=00_AT9us__TvC9eYBqRyQEwEtYSit9r2UKYg0gFoRK7Efrhyw&oe=61F17B71"
],
"post_url": "https://www.facebook.com/MetaAI/photos/a.360372474139712/2024182624425347/?type=3&__xts__%5B0%5D=68.ARBoSaQ-pAC_ApucZNHZ6R-BI3YUSjH4sXsfdZRQ2zZFOwgWGhjt6dmg0VOcmGCLhSFyXpecOY9g1A94vrzU_T-GtYFagqDkJjHuhoyPW2vnkn7fvfzx-ql7fsBYxL5DgQVSsiC1cPoycdCvHmi6BV5Sc4fKADdgDhdFvVvr-ttzXG1ng2DbLzU-XfSes7SAnrPs-gxjODPKJ7AdqkqkSQJ4HrsLgxMgcLFdCsE6feWL7rXjptVWegMVMthhJNVqO0JHu986XBfKKqB60aBFvyAzTSEwJD6o72GtnyzQ-BcH7JxmLtb2_A&__tn__=-R"
}, ...
}
```
<div id="jsonOutput">
Output Structure for JSON format:
``` javascript
{
"id": {
"name": string,
"shares": integer,
"reactions": {
"likes": integer,
"loves": integer,
"wow": integer,
"cares": integer,
"sad": integer,
"angry": integer,
"haha": integer
},
"reaction_count": integer,
"comments": integer,
"content": string,
"video" : list,
"image" : list,
"posted_on": datetime, //string containing datetime in ISO 8601
"post_url": string
}
}
```
</div>
<br>
<hr>
<br>
<h3 id="CSVWay"> For saving post's data directly to <b>CSV</b> file</h3>
``` python
#call scrap_to_csv(filename,directory) method
filename = "data_file" #file name without CSV extension,where data will be saved
directory = "E:\data" #directory where CSV file will be saved
meta_ai.scrap_to_csv(filename, directory)
```
content of ```data_file.csv```:
```csv
id,name,shares,likes,loves,wow,cares,sad,angry,haha,reactions_count,comments,content,posted_on,video,image,post_url
2024182624425347,Meta AI,0,154,19,0,0,0,0,0,173,2,"We’ve built data2vec, the first general high-performance self-supervised algorithm for speech, vision, and text. We applied it to different modalities and found it matches or outperforms the best self-supervised algorithms. We hope this brings us closer to a world where computers can learn to solve many different tasks without supervision. Learn more and get the code: https://ai.facebook.com/…/the-first-high-performance-self-s…",2022-01-20T22:43:35,,https://scontent-bom1-2.xx.fbcdn.net/v/t39.30808-6/s480x480/272147088_2024182621092014_6532581039236849529_n.jpg?_nc_cat=100&ccb=1-5&_nc_sid=8024bb&_nc_ohc=j4_1PAndJTIAX82OLNq&_nc_ht=scontent-bom1-2.xx&oh=00_AT9us__TvC9eYBqRyQEwEtYSit9r2UKYg0gFoRK7Efrhyw&oe=61F17B71,https://www.facebook.com/MetaAI/photos/a.360372474139712/2024182624425347/?type=3&__xts__%5B0%5D=68.ARAse4eiZmZQDOZumNZEDR0tQkE5B6g50K6S66JJPccb-KaWJWg6Yz4v19BQFSZRMd04MeBmV24VqvqMB3oyjAwMDJUtpmgkMiITtSP8HOgy8QEx_vFlq1j-UEImZkzeEgSAJYINndnR5aSQn0GUwL54L3x2BsxEqL1lElL7SnHfTVvIFUDyNfAqUWIsXrkI8X5KjoDchUj7aHRga1HB5EE0x60dZcHogUMb1sJDRmKCcx8xisRgk5XzdZKCQDDdEkUqN-Ch9_NYTMtxlchz1KfR0w9wRt8y9l7E7BNhfLrmm4qyxo-ZpA&__tn__=-R
...
```
<br>
<hr>
<br>
<h3 id="csvParameter"> Parameters for <code> scrap_to_csv(filename, directory) </code> method. </h3>
<table>
<th>
<tr>
<td> Parameter Name </td>
<td> Parameter Type </td>
<td> Description </td>
</tr>
</th>
<tr>
<td>
filename
</td>
<td>
String
</td>
<td>
Name of the CSV file where post's data will be saved
</td>
</tr>
<tr>
<td>
directory
</td>
<td>
String
</td>
<td>
Directory where CSV file have to be stored.
</td>
</tr>
</table>
<br>
<hr>
<br>
<h3 id="outputKeys">Keys of the outputs:</h3>
<table>
<th>
<tr>
<td>
Key
</td>
<td>
Type
</td>
<td>
Description
</td>
<tr>
</th>
<td>
<tr>
<td>
id
</td>
<td>
String
</td>
<td>
Post Identifier(integer casted inside string)
</td>
</tr>
</td>
<tr>
<td>
name
</td>
<td>
String
</td>
<td>
Name of the page
</td>
</tr>
<tr>
<td>
shares
</td>
<td>
Integer
</td>
<td>
Share count of post
</td>
</tr>
<tr>
<td>
reactions
</td>
<td>
Dictionary
</td>
<td>
Dictionary containing reactions as keys and its count as value. Keys => <code> ["likes","loves","wow","cares","sad","angry","haha"] </code>
</td>
</tr>
<tr>
<td>
reaction_count
</td>
<td>
Integer
</td>
<td>
Total reaction count of post
</td>
</tr>
<tr>
<td>
comments
</td>
<td>
Integer
</td>
<td>
Comments count of post
</td>
</tr>
<tr>
<td>
content
</td>
<td>
String
</td>
<td>
Content of post as text
</td>
</tr>
<tr>
<td>
video
</td>
<td>
List
</td>
<td>
URLs of video present in that post
</td>
</tr>
<tr>
<td>
image
</td>
<td>
List
</td>
<td>
List containing URLs of all images present in the post
</td>
</tr>
<tr>
<td>
posted_on
</td>
<td>
Datetime
</td>
<td>
Time at which post was posted(in ISO 8601 format)
</td>
</tr>
<tr>
<td>
post_url
</td>
<td>
String
</td>
<td>
URL for that post
</td>
</tr>
</table>
<br>
<hr>
<h2 id="tech"> Tech </h2>
<p>This project uses different libraries to work properly.</p>
<ul>
<li> <a href="https://www.selenium.dev/" target='_blank'>Selenium</a></li>
<li> <a href="https://pypi.org/project/webdriver-manager/" target='_blank'>Webdriver Manager</a></li>
<li> <a href="https://pypi.org/project/python-dateutil/" target='_blank'>Python Dateutil</a></li>
<li> <a href="https://pypi.org/project/selenium-wire/" target='_blank'>Selenium-wire</a></li>
</ul>
<br>
<hr>
If you encounter anything unusual please feel free to create issue <a href='https://github.com/shaikhsajid1111/facebook_page_scraper/issues'>here</a>
<hr>
<h2 id="license"> LICENSE </h2>
MIT
| PypiClean |
/cdktf_cdktf_provider_hashicups-6.0.0-py3-none-any.whl/cdktf_cdktf_provider_hashicups/provider/__init__.py | import abc
import builtins
import datetime
import enum
import typing
import jsii
import publication
import typing_extensions
from typeguard import check_type
from .._jsii import *
import cdktf as _cdktf_9a9027ec
import constructs as _constructs_77d1e7e8
class HashicupsProvider(
_cdktf_9a9027ec.TerraformProvider,
metaclass=jsii.JSIIMeta,
jsii_type="@cdktf/provider-hashicups.provider.HashicupsProvider",
):
'''Represents a {@link https://registry.terraform.io/providers/hashicorp/hashicups/0.3.1/docs hashicups}.'''
def __init__(
self,
scope: _constructs_77d1e7e8.Construct,
id: builtins.str,
*,
alias: typing.Optional[builtins.str] = None,
host: typing.Optional[builtins.str] = None,
password: typing.Optional[builtins.str] = None,
username: typing.Optional[builtins.str] = None,
) -> None:
'''Create a new {@link https://registry.terraform.io/providers/hashicorp/hashicups/0.3.1/docs hashicups} Resource.
:param scope: The scope in which to define this construct.
:param id: The scoped construct ID. Must be unique amongst siblings in the same scope
:param alias: Alias name. Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/hashicups/0.3.1/docs#alias HashicupsProvider#alias}
:param host: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/hashicups/0.3.1/docs#host HashicupsProvider#host}.
:param password: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/hashicups/0.3.1/docs#password HashicupsProvider#password}.
:param username: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/hashicups/0.3.1/docs#username HashicupsProvider#username}.
'''
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__0ac09006b5290e236227d37eaef067e2bd7fe85933f091a023598b2fd03e23fb)
check_type(argname="argument scope", value=scope, expected_type=type_hints["scope"])
check_type(argname="argument id", value=id, expected_type=type_hints["id"])
config = HashicupsProviderConfig(
alias=alias, host=host, password=password, username=username
)
jsii.create(self.__class__, self, [scope, id, config])
@jsii.member(jsii_name="resetAlias")
def reset_alias(self) -> None:
return typing.cast(None, jsii.invoke(self, "resetAlias", []))
@jsii.member(jsii_name="resetHost")
def reset_host(self) -> None:
return typing.cast(None, jsii.invoke(self, "resetHost", []))
@jsii.member(jsii_name="resetPassword")
def reset_password(self) -> None:
return typing.cast(None, jsii.invoke(self, "resetPassword", []))
@jsii.member(jsii_name="resetUsername")
def reset_username(self) -> None:
return typing.cast(None, jsii.invoke(self, "resetUsername", []))
@jsii.member(jsii_name="synthesizeAttributes")
def _synthesize_attributes(self) -> typing.Mapping[builtins.str, typing.Any]:
return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.invoke(self, "synthesizeAttributes", []))
@jsii.python.classproperty
@jsii.member(jsii_name="tfResourceType")
def TF_RESOURCE_TYPE(cls) -> builtins.str:
return typing.cast(builtins.str, jsii.sget(cls, "tfResourceType"))
@builtins.property
@jsii.member(jsii_name="aliasInput")
def alias_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "aliasInput"))
@builtins.property
@jsii.member(jsii_name="hostInput")
def host_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "hostInput"))
@builtins.property
@jsii.member(jsii_name="passwordInput")
def password_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "passwordInput"))
@builtins.property
@jsii.member(jsii_name="usernameInput")
def username_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "usernameInput"))
@builtins.property
@jsii.member(jsii_name="alias")
def alias(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "alias"))
@alias.setter
def alias(self, value: typing.Optional[builtins.str]) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__13e093d64f3ec73b03318091066b530e4462d5dc166dd5a3bb99bcad5b1b97bf)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "alias", value)
@builtins.property
@jsii.member(jsii_name="host")
def host(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "host"))
@host.setter
def host(self, value: typing.Optional[builtins.str]) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__c29987345cf0743730a50ca0464c508276e9b14696438ddaab1b99fb36c1677d)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "host", value)
@builtins.property
@jsii.member(jsii_name="password")
def password(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "password"))
@password.setter
def password(self, value: typing.Optional[builtins.str]) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__b463c77c7dda0293ca041e0e71f9aed173fa10e24723b151492088fa0a9d41c8)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "password", value)
@builtins.property
@jsii.member(jsii_name="username")
def username(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "username"))
@username.setter
def username(self, value: typing.Optional[builtins.str]) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__4f83d159fe46b5659f73c5df30412f9392f26d30320a7f7884b4cd6228167036)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "username", value)
@jsii.data_type(
jsii_type="@cdktf/provider-hashicups.provider.HashicupsProviderConfig",
jsii_struct_bases=[],
name_mapping={
"alias": "alias",
"host": "host",
"password": "password",
"username": "username",
},
)
class HashicupsProviderConfig:
def __init__(
self,
*,
alias: typing.Optional[builtins.str] = None,
host: typing.Optional[builtins.str] = None,
password: typing.Optional[builtins.str] = None,
username: typing.Optional[builtins.str] = None,
) -> None:
'''
:param alias: Alias name. Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/hashicups/0.3.1/docs#alias HashicupsProvider#alias}
:param host: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/hashicups/0.3.1/docs#host HashicupsProvider#host}.
:param password: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/hashicups/0.3.1/docs#password HashicupsProvider#password}.
:param username: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/hashicups/0.3.1/docs#username HashicupsProvider#username}.
'''
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__1215b2927309adab7120a222f882fa7ad0685d1557e42737fc391243e6a4f2b5)
check_type(argname="argument alias", value=alias, expected_type=type_hints["alias"])
check_type(argname="argument host", value=host, expected_type=type_hints["host"])
check_type(argname="argument password", value=password, expected_type=type_hints["password"])
check_type(argname="argument username", value=username, expected_type=type_hints["username"])
self._values: typing.Dict[builtins.str, typing.Any] = {}
if alias is not None:
self._values["alias"] = alias
if host is not None:
self._values["host"] = host
if password is not None:
self._values["password"] = password
if username is not None:
self._values["username"] = username
@builtins.property
def alias(self) -> typing.Optional[builtins.str]:
'''Alias name.
Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/hashicups/0.3.1/docs#alias HashicupsProvider#alias}
'''
result = self._values.get("alias")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def host(self) -> typing.Optional[builtins.str]:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/hashicups/0.3.1/docs#host HashicupsProvider#host}.'''
result = self._values.get("host")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def password(self) -> typing.Optional[builtins.str]:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/hashicups/0.3.1/docs#password HashicupsProvider#password}.'''
result = self._values.get("password")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def username(self) -> typing.Optional[builtins.str]:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/hashicups/0.3.1/docs#username HashicupsProvider#username}.'''
result = self._values.get("username")
return typing.cast(typing.Optional[builtins.str], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "HashicupsProviderConfig(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
__all__ = [
"HashicupsProvider",
"HashicupsProviderConfig",
]
publication.publish()
def _typecheckingstub__0ac09006b5290e236227d37eaef067e2bd7fe85933f091a023598b2fd03e23fb(
scope: _constructs_77d1e7e8.Construct,
id: builtins.str,
*,
alias: typing.Optional[builtins.str] = None,
host: typing.Optional[builtins.str] = None,
password: typing.Optional[builtins.str] = None,
username: typing.Optional[builtins.str] = None,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__13e093d64f3ec73b03318091066b530e4462d5dc166dd5a3bb99bcad5b1b97bf(
value: typing.Optional[builtins.str],
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__c29987345cf0743730a50ca0464c508276e9b14696438ddaab1b99fb36c1677d(
value: typing.Optional[builtins.str],
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__b463c77c7dda0293ca041e0e71f9aed173fa10e24723b151492088fa0a9d41c8(
value: typing.Optional[builtins.str],
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__4f83d159fe46b5659f73c5df30412f9392f26d30320a7f7884b4cd6228167036(
value: typing.Optional[builtins.str],
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__1215b2927309adab7120a222f882fa7ad0685d1557e42737fc391243e6a4f2b5(
*,
alias: typing.Optional[builtins.str] = None,
host: typing.Optional[builtins.str] = None,
password: typing.Optional[builtins.str] = None,
username: typing.Optional[builtins.str] = None,
) -> None:
"""Type checking stubs"""
pass | PypiClean |
/transia_xrpl_py-1.7.0a1.tar.gz/transia_xrpl_py-1.7.0a1/xrpl/core/binarycodec/binary_wrappers/binary_parser.py | from __future__ import annotations # Requires Python 3.7+
from typing import TYPE_CHECKING, Optional, Tuple, Type, cast
from typing_extensions import Final
from xrpl.core.binarycodec.definitions import definitions
from xrpl.core.binarycodec.definitions.field_header import FieldHeader
from xrpl.core.binarycodec.definitions.field_instance import FieldInstance
from xrpl.core.binarycodec.exceptions import XRPLBinaryCodecException
if TYPE_CHECKING:
# To prevent a circular dependency.
from xrpl.core.binarycodec.types.serialized_type import SerializedType
# Constants used in length prefix decoding:
# Max length that can be represented in a single byte per XRPL serialization encoding
_MAX_SINGLE_BYTE_LENGTH: Final[int] = 192
# Max length that can be represented in 2 bytes per XRPL serialization restrictions
_MAX_DOUBLE_BYTE_LENGTH: Final[int] = 12481
# Max value that can be used in the second byte of a length field
_MAX_SECOND_BYTE_VALUE: Final[int] = 240
# Max value that can be represented using one 8-bit byte (2^8)
_MAX_BYTE_VALUE: Final[int] = 256
# Max value that can be represented in using two 8-bit bytes (2^16)
_MAX_DOUBLE_BYTE_VALUE: Final[int] = 65536
class BinaryParser:
"""Deserializes from hex-encoded XRPL binary format to JSON fields and values."""
def __init__(self: BinaryParser, hex_bytes: str) -> None:
"""Construct a BinaryParser that will parse hex-encoded bytes."""
self.bytes = bytes.fromhex(hex_bytes)
def __len__(self: BinaryParser) -> int:
"""Return the number of bytes in this parser's buffer."""
return len(self.bytes)
def peek(self: BinaryParser) -> Optional[bytes]:
"""
Peek the first byte of the BinaryParser.
Returns:
The first byte of the BinaryParser.
"""
if len(self.bytes) > 0:
return cast(bytes, self.bytes[0])
return None
def skip(self: BinaryParser, n: int) -> None:
"""
Consume the first n bytes of the BinaryParser.
Args:
n: The number of bytes to consume.
Raises:
XRPLBinaryCodecException: If n bytes can't be skipped.
"""
if n > len(self.bytes):
raise XRPLBinaryCodecException(
f"BinaryParser can't skip {n} bytes, only contains {len(self.bytes)}."
)
self.bytes = self.bytes[n:]
def read(self: BinaryParser, n: int) -> bytes:
"""
Consume and return the first n bytes of the BinaryParser.
Args:
n: The number of bytes to read.
Returns:
The bytes read.
"""
first_n_bytes = self.bytes[:n]
self.skip(n)
return first_n_bytes
def read_uint8(self: BinaryParser) -> int:
"""
Read 1 byte from parser and return as unsigned int.
Returns:
The byte read.
"""
return int.from_bytes(self.read(1), byteorder="big", signed=False)
def read_uint16(self: BinaryParser) -> int:
"""
Read 2 bytes from parser and return as unsigned int.
Returns:
The bytes read.
"""
return int.from_bytes(self.read(2), byteorder="big", signed=False)
def read_uint32(self: BinaryParser) -> int:
"""
Read 4 bytes from parser and return as unsigned int.
Returns:
The bytes read.
"""
return int.from_bytes(self.read(4), byteorder="big", signed=False)
def is_end(self: BinaryParser, custom_end: Optional[int] = None) -> bool:
"""
Returns whether the binary parser has finished parsing (e.g. there is nothing
left in the buffer that needs to be processed).
Args:
custom_end: An ending byte-phrase.
Returns:
Whether or not it's the end.
"""
return len(self.bytes) == 0 or (
custom_end is not None and len(self.bytes) <= custom_end
)
def read_variable_length(self: BinaryParser) -> bytes:
"""
Reads and returns variable length encoded bytes.
Returns:
The bytes read.
"""
return self.read(self._read_length_prefix())
def _read_length_prefix(self: BinaryParser) -> int:
"""
Reads a variable length encoding prefix and returns the encoded length.
The formula for decoding a length prefix is described in:
`Length Prefixing <https://xrpl.org/serialization.html#length-prefixing>`_
"""
byte1 = self.read_uint8()
# If the field contains 0 to 192 bytes of data, the first byte defines
# the length of the contents
if byte1 <= _MAX_SINGLE_BYTE_LENGTH:
return byte1
# If the field contains 193 to 12480 bytes of data, the first two bytes
# indicate the length of the field with the following formula:
# 193 + ((byte1 - 193) * 256) + byte2
if byte1 <= _MAX_SECOND_BYTE_VALUE:
byte2 = self.read_uint8()
return (
(_MAX_SINGLE_BYTE_LENGTH + 1)
+ ((byte1 - (_MAX_SINGLE_BYTE_LENGTH + 1)) * _MAX_BYTE_VALUE)
+ byte2
)
# If the field contains 12481 to 918744 bytes of data, the first three
# bytes indicate the length of the field with the following formula:
# 12481 + ((byte1 - 241) * 65536) + (byte2 * 256) + byte3
if byte1 <= 254:
byte2 = self.read_uint8()
byte3 = self.read_uint8()
return (
_MAX_DOUBLE_BYTE_LENGTH
+ ((byte1 - (_MAX_SECOND_BYTE_VALUE + 1)) * _MAX_DOUBLE_BYTE_VALUE)
+ (byte2 * _MAX_BYTE_VALUE)
+ byte3
)
raise XRPLBinaryCodecException(
"Length prefix must contain between 1 and 3 bytes."
)
def read_field_header(self: BinaryParser) -> FieldHeader:
"""
Reads field ID from BinaryParser and returns as a FieldHeader object.
Returns:
The field header.
Raises:
XRPLBinaryCodecException: If the field ID cannot be read.
"""
type_code = self.read_uint8()
field_code = type_code & 15
type_code >>= 4
if type_code == 0:
type_code = self.read_uint8()
if type_code == 0 or type_code < 16:
raise XRPLBinaryCodecException(
"Cannot read field ID, type_code out of range."
)
if field_code == 0:
field_code = self.read_uint8()
if field_code == 0 or field_code < 16:
raise XRPLBinaryCodecException(
"Cannot read field ID, field_code out of range."
)
return FieldHeader(type_code, field_code)
def read_field(self: BinaryParser) -> FieldInstance:
"""
Read the field ordinal at the head of the BinaryParser and return a
FieldInstance object representing information about the field contained
in the following bytes.
Returns:
The field ordinal at the head of the BinaryParser.
"""
field_header = self.read_field_header()
field_name = definitions.get_field_name_from_header(field_header)
return definitions.get_field_instance(field_name)
def read_type(
self: BinaryParser, field_type: Type[SerializedType]
) -> SerializedType:
"""
Read next bytes from BinaryParser as the given type.
Args:
field_type: The field type to read the next bytes as.
Returns:
None
"""
return field_type.from_parser(self, None)
def read_field_value(self: BinaryParser, field: FieldInstance) -> SerializedType:
"""
Read value of the type specified by field from the BinaryParser.
Args:
field: The FieldInstance specifying the field to read.
Returns:
A SerializedType read from the BinaryParser.
Raises:
XRPLBinaryCodecException: If a parser cannot be constructed from field.
"""
field_type = field.associated_type
if field.is_variable_length_encoded:
size_hint = self._read_length_prefix()
value = field_type.from_parser(self, size_hint)
else:
value = field_type.from_parser(self, None)
if value is None:
raise XRPLBinaryCodecException(
f"from_parser for {field.name}, {field.type} returned None."
)
return value
def read_field_and_value(
self: BinaryParser,
) -> Tuple[FieldInstance, SerializedType]:
"""
Get the next field and value from the BinaryParser.
Returns:
A (FieldInstance, SerializedType) pair as read from the BinaryParser.
"""
field = self.read_field()
return field, self.read_field_value(field) | PypiClean |
/python-tripleoclient-20.0.0.tar.gz/python-tripleoclient-20.0.0/tripleoclient/v1/overcloud_profiles.py |
import logging
from osc_lib.i18n import _
from tripleoclient import command
from tripleoclient import exceptions
from tripleoclient import utils
DEPRECATION_MSG = '''
This command has been DEPRECATED and will be removed. The compute service is no
longer used on the undercloud by default, hence profile matching with compute
flavors is no longer used.
'''
class MatchProfiles(command.Command):
"""Assign and validate profiles on nodes"""
log = logging.getLogger(__name__ + ".MatchProfiles")
def get_parser(self, prog_name):
parser = super(MatchProfiles, self).get_parser(prog_name)
parser.epilog = DEPRECATION_MSG
parser.add_argument(
'--dry-run',
action='store_true',
default=False,
help=_('Only run validations, but do not apply any changes.')
)
utils.add_deployment_plan_arguments(parser)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)" % parsed_args)
self.log.warning(DEPRECATION_MSG)
bm_client = self.app.client_manager.baremetal
flavors = self._collect_flavors(parsed_args)
errors, warnings = utils.assign_and_verify_profiles(
bm_client, flavors,
assign_profiles=True,
dry_run=parsed_args.dry_run
)
if errors:
raise exceptions.ProfileMatchingError(
_('Failed to validate and assign profiles.'))
def _collect_flavors(self, parsed_args):
"""Collect nova flavors in use.
:returns: dictionary flavor name -> (flavor object, scale)
"""
compute_client = self.app.client_manager.compute
flavors = {f.name: f for f in compute_client.flavors.list()}
result = {}
message = "Provided --{}-flavor, '{}', does not exist"
for target, (flavor_name, scale) in (
utils.get_roles_info(parsed_args).items()
):
if flavor_name is None or not scale:
self.log.debug("--{}-flavor not used".format(target))
continue
try:
flavor = flavors[flavor_name]
except KeyError:
raise exceptions.ProfileMatchingError(
message.format(target, flavor_name))
result[flavor_name] = (flavor, scale)
return result
POSTFIX = '_profile'
class ListProfiles(command.Lister):
"""List overcloud node profiles"""
log = logging.getLogger(__name__ + ".ListProfiles")
def get_parser(self, prog_name):
parser = super(ListProfiles, self).get_parser(prog_name)
parser.epilog = DEPRECATION_MSG
parser.add_argument(
'--all',
action='store_true',
default=False,
help=_('List all nodes, even those not available to Nova.')
)
utils.add_deployment_plan_arguments(parser)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)" % parsed_args)
self.log.warning(DEPRECATION_MSG)
bm_client = self.app.client_manager.baremetal
compute_client = self.app.client_manager.compute
hypervisors = {h.hypervisor_hostname: h
for h in compute_client.hypervisors.list()
if h.hypervisor_type == 'ironic'}
result = []
maintenance = None if parsed_args.all else False
for node in bm_client.node.list(detail=True, maintenance=maintenance):
error = ''
if node.provision_state not in ('active', 'available'):
error = "Provision state %s" % node.provision_state
elif node.power_state in (None, 'error'):
error = "Power state %s" % node.power_state
elif node.maintenance:
error = "Maintenance"
else:
try:
hypervisor = hypervisors[node.uuid]
except KeyError:
error = 'No hypervisor record'
else:
if hypervisor.status != 'enabled':
error = 'Compute service disabled'
elif hypervisor.state != 'up':
error = 'Compute service down'
if error and not parsed_args.all:
continue
caps = utils.node_get_capabilities(node)
profile = caps.get('profile')
possible_profiles = [k[:-len(POSTFIX)]
for k, v in caps.items()
if k.endswith(POSTFIX) and
v.lower() in ('1', 'true')]
# sorting for convenient display and testing
possible_profiles.sort()
record = (node.uuid, node.name or '', node.provision_state,
profile, ', '.join(possible_profiles))
if parsed_args.all:
record += (error,)
result.append(record)
cols = ("Node UUID", "Node Name", "Provision State", "Current Profile",
"Possible Profiles")
if parsed_args.all:
cols += ('Error',)
return (cols, result) | PypiClean |
/jexp-0.1.2.tar.gz/jexp-0.1.2/README.rst | jexp
====
:synopsis: A silly little JS expression builder to let you use native Python to build Javascript expression strings.
This package only allows the creation of simple (that is, non-assignment) Javascript expressions using an intuitive Python DSL.
Logical Expressions
===================
>>> from jexp import J
>>> e = J('var_1') & J('var_2')
>>> str(e)
'(var_1&&var_2)'
>>> str(e | 'abc')
'((var_1&&var_2)||"abc")'
The argument to the J class will be output as a str in the resulting JS- so ``J('my_var')`` is a good way to refer to a var, and ``J(5)`` to the number literal 5. If you need an actual string, you can either add the quotes yourself in the J call - ``J('"my string"')`` - or otherwise combine the J object with a str, as shown above.
Mathematical Expressions
========================
>>> str(J(5) + 28)
'(5+28)'
>>> str(J('my_var') + 28)
'(my_var+28)'
Division hasn't been implemented, but other things you expect are there.
Comparisons
===========
>>> e = J(5) <= 6
>>> str(e)
'(5<=6)'
>>> str(e == "test_string")
'((5<=6)=="test_string")'
Attribute Access
================
>>> e = J('my_var').attribute
>>> str(e)
'my_var.attribute'
This should work for any attribute that doesn't start with an underscore (and some that do).
Function Calling
================
>>> e = J('func')('a','b')
>>> str(e)
'func("a","b")'
You can also try this with other J objects.
>>> str(J('func')(J('arg1'),J('arg2')))
'func(arg1,arg2)'
| PypiClean |
/torch-tensornet-1.3.3.tar.gz/torch-tensornet-1.3.3/tensornet/models/optimizer/__init__.py | import torch
import torch.optim as optim
from typing import Tuple
def sgd(
model: torch.nn.Module,
learning_rate: float = 0.01,
momentum: int = 0,
dampening: int = 0,
l2_factor: float = 0.0,
nesterov: bool = False,
):
"""SGD optimizer.
Args:
model (torch.nn.Module): Model Instance.
learning_rate (:obj:`float`, optional): Learning rate for the optimizer. (default: 0.01)
momentum (:obj:`float`, optional): Momentum factor. (default: 0)
dampening (:obj:`float`, optional): Dampening for momentum. (default: 0)
l2_factor (:obj:`float`, optional): Factor for L2 regularization. (default: 0)
nesterov (:obj:`bool`, optional): Enables nesterov momentum. (default: False)
Returns:
SGD optimizer.
"""
return optim.SGD(
model.parameters(),
lr=learning_rate,
momentum=momentum,
dampening=dampening,
weight_decay=l2_factor,
nesterov=nesterov
)
def adam(
model: torch.nn.Module,
learning_rate: float = 0.001,
betas: Tuple[float] = (0.9, 0.999),
eps: float = 1e-08,
l2_factor: float = 0.0,
amsgrad: bool = False,
):
"""Adam optimizer.
Args:
model (torch.nn.Module): Model Instance.
learning_rate (:obj:`float`, optional): Learning rate for the optimizer. (default: 0.001)
betas (:obj:`tuple`, optional): Coefficients used for computing running averages of
gradient and its square. (default: (0.9, 0.999))
eps (:obj:`float`, optional): Term added to the denominator to improve numerical stability.
(default: 1e-8)
l2_factor (:obj:`float`, optional): Factor for L2 regularization. (default: 0)
amsgrad (:obj:`bool`, optional): Whether to use the AMSGrad variant of this algorithm from the
paper `On the Convergence of Adam and Beyond <https://openreview.net/forum?id=ryQu7f-RZ>`_.
(default: False)
Returns:
Adam optimizer.
"""
return optim.Adam(
model.parameters(),
lr=learning_rate,
betas=betas,
eps=eps,
weight_decay=l2_factor,
amsgrad=amsgrad
) | PypiClean |
/esub-epipe-1.11.0.tar.gz/esub-epipe-1.11.0/src/esub/utils.py |
import os
import math
import sys
import shutil
import datetime
import subprocess
import shlex
import portalocker
import multiprocessing
from functools import partial
import numpy as np
import time
from ekit import logger as logger_utils
LOGGER = logger_utils.init_logger(__name__)
TIMEOUT_MESSAGE = (
"Maximum number of pending jobs reached, will sleep for 30 minutes and retry"
)
def decimal_hours_to_str(dec_hours):
"""Transforms decimal hours into the hh:mm format
:param dec_hours: decimal hours, float or int
:return: string in the format hh:mm
"""
full_hours = math.floor(dec_hours)
minutes = math.ceil((dec_hours - full_hours) * 60)
if minutes == 60:
full_hours += 1
minutes = 0
if minutes < 10:
time_str = "{}:0{}".format(full_hours, minutes)
else:
time_str = "{}:{}".format(full_hours, minutes)
return time_str
def make_resource_string(
function,
main_memory,
main_time,
main_scratch,
main_n_cores,
main_gpu,
main_gpu_memory,
watchdog_memory,
watchdog_time,
watchdog_scratch,
watchdog_n_cores,
watchdog_gpu,
watchdog_gpu_memory,
merge_memory,
merge_time,
merge_scratch,
merge_n_cores,
merge_gpu,
merge_gpu_memory,
system,
verbosity=3,
):
"""
Creates the part of the submission string which handles
the allocation of ressources
:param function: The name of the function defined
in the executable that will be submitted
:param main_memory: Memory per core to allocate for the main job
:param main_time: The Wall time requested for the main job
:param main_scratch: Scratch per core to allocate for the main job
:param main_n_cores: Number of cores to allocate for the main job
:param main_gpu: Number of GPUs to allocate for the main job
:param main_gpu_memory: Memory per GPU to allocate for the main job
:param watchdog_memory: Memory per core to allocate for the watchdog job
:param watchdog_time: The Wall time requested for the watchdog job
:param watchdog_scratch: Scratch to allocate for the watchdog job
:param watchdog_n_cores: Number of cores to allocate for the watchdog job
:param watchdog_gpu: Number of GPUs to allocate for the watchdog job
:param watchdog_gpu_memory: Memory per GPU to allocate for the watchdog job
:param merge_memory: Memory per core to allocate for the merge job
:param merge_time: The Wall time requested for the merge job
:param merge_scratch: Scratch to allocate for the merge jo
:param merge_n_cores: Number of cores to allocate for the merge job
:param merge_gpu: Number of GPUs to allocate for the merge job
:param merge_gpu_memory: Memory per GPU to allocate for the merge job
:param system: The type of the queing system of the cluster
:param verbosity: Verbosity level (0 - 4).
:return: A string that is part of the submission string and the gpu string that
needs to added to srun
"""
logger_utils.set_logger_level(LOGGER, verbosity)
gpu_cmd = ""
if function == "main":
mem = main_memory
time = main_time
scratch = main_scratch
n_cores = main_n_cores
gpu = main_gpu
gpu_memory = main_gpu_memory
elif function == "watchdog":
mem = watchdog_memory
time = watchdog_time
scratch = watchdog_scratch
n_cores = watchdog_n_cores
gpu = watchdog_gpu
gpu_memory = watchdog_gpu_memory
elif function == "merge":
mem = merge_memory
time = merge_time
scratch = merge_scratch
n_cores = merge_n_cores
gpu = merge_gpu
gpu_memory = merge_gpu_memory
elif function == "rerun_missing":
mem = main_memory
time = main_time
scratch = main_scratch
n_cores = main_n_cores
gpu = main_gpu
gpu_memory = main_gpu_memory
elif function == "merge_log_files":
mem = main_memory
time = 4
scratch = main_scratch
n_cores = 1
gpu = 0
gpu_memory = 0
if system == "bsub":
resource_string = (
"-W {} -R rusage[mem={}] "
"-R rusage[scratch={}] "
"-n {}".format(decimal_hours_to_str(time), mem, scratch, n_cores)
)
elif system == "slurm":
resource_string = (
"#SBATCH --time={}:00 \n"
"#SBATCH --mem-per-cpu={} \n"
"#SBATCH --tmp={} \n"
"#SBATCH --cpus-per-task={} \n".format(
decimal_hours_to_str(time), int(mem), int(scratch), n_cores
)
)
if gpu > 0:
resource_string += "#SBATCH --gpus={} \n".format(gpu)
gpumem_in_gb = gpu_memory / 1024
resource_string += f"#SBATCH --gres=gpumem:{gpumem_in_gb:.0f}g \n"
# slurm needs the gpus and gres argument to be passed to srun and sbatch
gpu_cmd = f"--gpus={gpu} --gres=gpumem:{gpumem_in_gb:.0f}g "
resource_string += "\n"
elif system == "daint":
resource_string = "#SBATCH --time={}:00 \n#SBATCH --mem={} \n \n".format(
decimal_hours_to_str(time), mem
)
# TODO: local scratch for slurm
if scratch > 0:
LOGGER.warning(
"Not Implemented Warning: Automatic local scratch "
"allocation not supported for DAINT system. Ignoring."
)
LOGGER.debug(f"Built resource string as {resource_string}")
return resource_string, gpu_cmd
def get_log_filenames(log_dir, job_name, function, system="bsub"):
"""
Builds the filenames of the stdout and stderr log files for a
given job name and a given function to run.
:param log_dir: directory where the logs are stored
:param job_name: Name of the job that will write to the log files
:param function: Function that will be executed
:param system: The type of the queing system of the cluster
:return: filenames for stdout and stderr logs
"""
job_name_ext = job_name + "_" + function
if system == "slurm" and (function == "main" or function == "main_r"):
stdout_log = os.path.join(log_dir, "{}_index%a.o".format(job_name_ext))
stderr_log = os.path.join(log_dir, "{}_index%a.e".format(job_name_ext))
else:
stdout_log = os.path.join(log_dir, "{}.o".format(job_name_ext))
stderr_log = os.path.join(log_dir, "{}.e".format(job_name_ext))
return stdout_log, stderr_log
def get_source_cmd(source_file, verbosity=3):
"""
Builds the command to source a given file if the file exists,
otherwise returns an empty string.
:param source_file: path to the (possibly non-existing) source file,
can be relative and can contain "~"
:param verbosity: Verbosity level (0 - 4).
:return: command to source the file if it exists or empty string
"""
logger_utils.set_logger_level(LOGGER, verbosity)
source_file_abs = os.path.abspath(os.path.expanduser(source_file))
if os.path.isfile(source_file_abs):
source_cmd = "source {}; ".format(source_file_abs)
LOGGER.debug(f"Running source script at {source_file_abs}")
else:
LOGGER.warning("Source file {} not found, skipping".format(source_file))
source_cmd = ""
return source_cmd
def get_dependency_string(function, jobids, ext_dependencies, system, verbosity=3):
"""
Constructs the dependency string which handles which other jobs
this job is dependent on.
:param function: The type o function to submit
:param jobids: Dictionary of the jobids for each job already submitted
:param ext_dependencies: If external dependencies are given they get
added to the dependency string
(this happens if epipe is used)
:param system: The type of the queing system of the cluster
:param verbosity: Verbosity level (0 - 4).
:return: A sting which is used as a substring for the submission string
and it handles the dependencies of the job
"""
logger_utils.set_logger_level(LOGGER, verbosity)
dep_string = ""
# no dependencies for main
if function == "main":
if ext_dependencies != "":
dep_string = '-w "' + ext_dependencies + '"'
else:
dep_string = ""
return dep_string
if system == "slurm":
dep_string = dep_string.replace("ended(", "afterany:")
dep_string = dep_string.replace("started(", "after:")
dep_string = dep_string.replace(") && ", ",")
dep_string = dep_string.replace(")", "")
dep_string = dep_string.replace('-w "', '--dependency="')
return dep_string
# watchdog starts along with main
elif function == "watchdog":
if "main" in jobids.keys():
for id in jobids["main"]:
dep_string += "{}({}) && ".format("started", id)
else:
LOGGER.warning(
"Function {} has not been submitted -> Skipping "
"in dependencies for {}".format("main", function)
)
# rerun missing starts after main
elif function == "rerun_missing":
if "main" in jobids.keys():
for id in jobids["main"]:
dep_string += "{}({}) && ".format("ended", id)
else:
LOGGER.warning(
"Function {} has not been submitted -> Skipping "
"in dependencies for {}".format("main", function)
)
# merge_log_files starts after main or rerun_missing
elif function == "merge_log_files":
if "rerun_missing" in jobids.keys():
for id in jobids["rerun_missing"]:
dep_string += "{}({}) && ".format("ended", id)
elif "main" in jobids.keys():
for id in jobids["main"]:
dep_string += "{}({}) && ".format("ended", id)
# merge starts after all the others
elif function == "merge":
if "main" in jobids.keys():
for id in jobids["main"]:
dep_string += "{}({}) && ".format("ended", id)
else:
LOGGER.warning(
"Function {} has not been submitted -> Skipping "
"in dependencies for {}".format("main", function)
)
if "watchdog" in jobids.keys():
for id in jobids["watchdog"]:
dep_string += "{}({}) && ".format("ended", id)
else:
LOGGER.warning(
"Function {} has not been submitted -> Skipping "
"in dependencies for {}".format("watchdog", function)
)
if "rerun_missing" in jobids.keys():
for id in jobids["rerun_missing"]:
dep_string += "{}({}) && ".format("ended", id)
else:
LOGGER.warning(
"Function {} has not been submitted -> Skipping "
"in dependencies for {}".format("rerun_missing", function)
)
else:
raise ValueError("Dependencies for function" " {} not defined".format(function))
# remove trailing &&
if len(dep_string) > 0:
dep_string = dep_string[:-4]
if ext_dependencies != "":
dep_string = dep_string + " && " + ext_dependencies
# remove leading &&
if dep_string[:4] == " && ":
dep_string = dep_string[4:]
dep_string = '-w "' + dep_string + '"'
if system == "slurm":
dep_string = dep_string.replace("ended(", "afterany:")
dep_string = dep_string.replace("started(", "after:")
dep_string = dep_string.replace(") && ", ",")
dep_string = dep_string.replace(")", "")
dep_string = dep_string.replace('-w "', '--dependency="')
if len(dep_string) > 0:
LOGGER.debug(f"Built dependency string as {dep_string}")
return dep_string
def make_cmd_string(
function,
source_file,
n_jobs,
tasks,
mode,
job_name,
function_args,
exe,
main_memory,
main_time,
main_scratch,
main_n_cores,
main_gpu,
main_gpu_memory,
watchdog_time,
watchdog_memory,
watchdog_scratch,
watchdog_n_cores,
watchdog_gpu,
watchdog_gpu_memory,
merge_memory,
merge_time,
merge_scratch,
merge_n_cores,
merge_gpu,
merge_gpu_memory,
log_dir,
dependency,
system,
main_name="main",
batchsize=100000,
max_njobs=-1,
add_args="",
add_bsub="",
discard_output=False,
verbosity=3,
main_mode="jobarray",
# main_n_cores=1,
nodes=1,
MPI_tasks_per_core=1,
MPI_tasks_per_node=1,
OpenMP_threads_per_task=1,
):
"""
Creates the submission string which gets submitted to the queing system
:param function: The name of the function defined in the
executable that will be submitted
:param source_file: A file which gets executed
before running the actual function(s)
:param n_jobs: The number of jobs that will be requested for the job
:param tasks: The task string, which will get parsed into the job indices
:param mode: The mode in which the job will be
ran (MPI-job or as a jobarray)
:param job_name: The name of the job
:param function_args: The remaining arguments that
will be forwarded to the executable
:param exe: The path of the executable
:param main_memory: Memory per core to allocate for the main job
:param main_time: The Wall time requested for the main job
:param main_scratch: Scratch per core to allocate for the main job
:param main_n_cores: Number of cores to allocate for the main job
:param main_gpu: Number of GPUs to allocate for the main job
:param main_gpu_memory: Memory per GPU to allocate for the main job
:param watchdog_memory: Memory per core to allocate for the watchdog job
:param watchdog_time: The Wall time requested for the watchdog job
:param watchdog_scratch: Scratch to allocate for the watchdog job
:param watchdog_n_cores: Number of cores to allocate for the watchdog job
:param watchdog_gpu: Number of GPUs to allocate for the watchdog job
:param watchdog_gpu_memory: Memory per GPU to allocate for the watchdog job
:param merge_memory: Memory per core to allocate for the merge job
:param merge_time: The Wall time requested for the merge job
:param merge_scratch: Scratch to allocate for the merge job
:param merge_n_cores: Number of cores to allocate for the merge job
:param merge_gpu: Number of GPUs to allocate for the merge job
:param merge_gpu_memory: Memory per GPU to allocate for the merge job
:param log_dir: log_dir: The path to the log directory
:param dependency: The dependency string
:param system: The type of the queing system of the cluster
:param main_name: name of the main function
:param batchsize: If not zero the jobarray gets divided into batches.
:param max_njobs: Maximum number of jobs allowed to run at the same time.
:param add_args: Additional cluster-specific arguments
:param add_bsub: Additional bsub arguments to pass
:param discard_output: If True writes stdout/stderr to /dev/null
:param verbosity: Verbosity level (0 - 4).
:return: The submission string that wil get submitted to the cluster
"""
logger_utils.set_logger_level(LOGGER, verbosity)
LOGGER.info("")
if function == "main_rerun":
function = "main"
if system == "slurm":
# reruns should use different log files to avoid overwriting
log_function = "main_r"
else:
log_function = "main"
else:
log_function = function
# allocate computing resources
resource_string, gpu_cmd = make_resource_string(
function,
main_memory,
main_time,
main_scratch,
main_n_cores,
main_gpu,
main_gpu_memory,
watchdog_memory,
watchdog_time,
watchdog_scratch,
watchdog_n_cores,
watchdog_gpu,
watchdog_gpu_memory,
merge_memory,
merge_time,
merge_scratch,
merge_n_cores,
merge_gpu,
merge_gpu_memory,
system,
verbosity,
)
# get the job name for the submission system and the log files
job_name_ext = job_name + "_" + function
stdout_log, stderr_log = get_log_filenames(log_dir, job_name, log_function, system)
# construct the string of arguments passed to the executable
args_string = ""
for arg in function_args:
args_string += arg + " "
# make submission string
source_cmd = get_source_cmd(source_file, verbosity)
if mode == "mpi":
run_cmd = "mpirun python"
elif mode == "jobarray":
run_cmd = "python"
else:
raise ValueError(f"Run mode {mode} is not known")
extra_args_string = (
"--source_file={} --main_memory={} --main_time={} "
"--main_scratch={} --function={} "
"--executable={} --n_jobs={} "
"--log_dir={} --system={} "
"--main_name={} --batchsize={} --max_njobs={} "
"--main_n_cores={} --main_gpu={} --main_gpu_memory={} "
'--esub_verbosity={} --main_mode={} --mode={} {}"'.format(
source_file,
main_memory,
main_time,
main_scratch,
function,
exe,
n_jobs,
log_dir,
system,
main_name,
batchsize,
max_njobs,
main_n_cores,
main_gpu,
main_gpu_memory,
verbosity,
main_mode,
mode,
args_string,
)
)
if (function == "main") & (max_njobs > 0):
max_string = "%{}".format(max_njobs)
else:
max_string = ""
if system == "bsub":
if n_jobs <= batchsize:
cmd_string = (
"bsub -o {} -e {} -J {}[1-{}]{} "
"{} {} {}"
' "{} {} -m esub.submit --job_name={} '
"--tasks='{}' {}".format(
stdout_log,
stderr_log,
job_name_ext,
n_jobs,
max_string,
resource_string,
add_bsub,
dependency,
source_cmd,
run_cmd,
job_name,
tasks,
extra_args_string,
)
)
else:
LOGGER.warning(
"You have requested a jobarray with more "
f"than {batchsize} cores"
". Euler cannot handle this. I break down this job into "
"multiple subarrays and submit them one by one. "
"Note that this feature currently breakes the rerun "
"missing capability. Also note that"
" this process needs to keep running...."
)
n_batches = math.ceil(n_jobs / batchsize)
cmd_string = []
for rank in range(n_batches):
if rank < (n_batches - 1):
jobs = batchsize
else:
jobs = n_jobs % batchsize
if jobs == 0:
jobs = batchsize
first_task = get_indices_splitted(tasks, n_jobs, rank * batchsize)
first_task = first_task[0]
last_task = get_indices_splitted(
tasks, n_jobs, rank * batchsize + jobs - 1
)
last_task = last_task[-1]
tasks_ = f"{first_task} > {last_task + 1}"
jobname_ = f"{job_name}_{rank}"
stdout_log_ = stdout_log[:-2] + f"_{rank}.o"
stderr_log_ = stdout_log[:-2] + f"_{rank}.e"
cs = (
"bsub -o {} -e {} -J {}[1-{}]{} "
'{} {} "{} '
"{} -m esub.submit --job_name={} --tasks='{}' {}".format(
stdout_log_,
stderr_log_,
job_name_ext,
jobs,
max_string,
resource_string,
dependency,
source_cmd,
run_cmd,
jobname_,
tasks_,
extra_args_string,
)
)
cmd_string.append(cs)
if discard_output:
if isinstance(cmd_string, list):
for i in range(len(cmd_string)):
cmd_string[i] = cmd_string[i] + " --discard_output &> /dev/null"
else:
cmd_string += " --discard_output &> /dev/null"
elif system == "slurm":
# split add_args
if len(add_args) > 0:
add_args = add_args.split(",")
else:
add_args = []
cmd_string = "sbatch {} submit_{}.slurm".format(dependency, job_name_ext)
# write submission file
with open(f"submit_{job_name_ext}.slurm", "w+") as f:
f.write("#! /bin/bash \n#\n")
if discard_output:
f.write("#SBATCH --output=/dev/null \n")
f.write("#SBATCH --error=/dev/null \n")
else:
f.write("#SBATCH --output={} \n".format(stdout_log))
f.write("#SBATCH --error={} \n".format(stderr_log))
f.write("#SBATCH --job-name={} \n".format(job_name_ext))
for arg in add_args:
f.write("#SBATCH {} \n".format(arg))
f.write("#SBATCH --array=1-{}{} \n".format(n_jobs, max_string))
f.write(resource_string)
f.write(
"srun {}bash; {} {} -m esub.submit --job_name={} "
"--tasks='{}' {}".format(
gpu_cmd,
source_cmd,
run_cmd,
job_name,
tasks,
extra_args_string[:-1],
)
)
elif system == "daint":
# split add_args
if len(add_args) > 0:
add_args = add_args.split(",")
else:
add_args = []
cmd_string = "sbatch {} submit_{}.slurm".format(dependency, job_name_ext)
# write submission file
with open(f"submit_{job_name_ext}.slurm", "w+") as f:
f.write("#! /bin/bash \n#\n")
if discard_output:
f.write("#SBATCH --output=/dev/null \n")
f.write("#SBATCH --error=/dev/null \n")
else:
f.write("#SBATCH --output={} \n".format(stdout_log))
f.write("#SBATCH --error={} \n".format(stderr_log))
f.write("#SBATCH --job-name={} \n".format(job_name_ext))
f.write("#SBATCH --constraint=gpu \n")
f.write("#SBATCH --nodes={} \n".format(nodes))
f.write("#SBATCH --ntasks-per-core={} \n".format(MPI_tasks_per_core))
f.write("#SBATCH --ntasks-per-node={} \n".format(MPI_tasks_per_node))
f.write("#SBATCH --cpus-per-task={} \n".format(OpenMP_threads_per_task))
for arg in add_args:
f.write("#SBATCH {} \n".format(arg))
f.write(resource_string)
if len(source_cmd) > 0:
f.write("srun {} \n".format(source_cmd))
f.write(
"srun python -m esub.submit --job_name={} "
"--tasks='{}' {}".format(job_name, tasks, extra_args_string[:-1])
)
LOGGER.debug(f"Built total command string as {cmd_string}")
return cmd_string
def submit_job(
tasks,
mode,
exe,
log_dir,
function_args,
function="main",
source_file="",
n_jobs=1,
job_name="job",
main_memory=100,
main_time=1,
main_scratch=1000,
main_n_cores=1,
main_gpu=0,
main_gpu_memory=1000,
watchdog_memory=100,
watchdog_time=1,
watchdog_scratch=1000,
watchdog_n_cores=1,
watchdog_gpu=0,
watchdog_gpu_memory=1000,
merge_memory=100,
merge_time=1,
merge_scratch=1000,
merge_n_cores=1,
merge_gpu=0,
merge_gpu_memory=1000,
dependency="",
system="bsub",
main_name="main",
test=False,
batchsize=100000,
max_njobs=100000,
add_args="",
add_bsub="",
discard_output=False,
verbosity=3,
main_mode="jobarray",
keep_submit_files=False,
nodes=1,
MPI_tasks_per_core=1,
MPI_tasks_per_node=1,
OpenMP_threads_per_task=1,
):
"""
Based on arguments gets the submission string and submits it to the cluster
:param tasks: The task string, which will get parsed into the job indices
:param mode: The mode in which the job will be ran
(MPI-job or as a jobarray)
:param exe: The path of the executable
:param log_dir: The path to the log directory
:param function_args: The remaining arguments that will
be forwarded to the executable
:param function: The name of the function defined in the
executable that will be submitted
:param source_file: A file which gets executed before
running the actual function(s)
:param n_jobs: The number of jobs that will be requested for the job
:param job_name: The name of the job
:param main_memory: Memory per core to allocate for the main job
:param main_time: The Wall time requested for the main job
:param main_scratch: Scratch per core to allocate for the main job
:param main_n_cores: Number of cores to allocate for the main job
:param main_gpu: Number of GPUs to allocate for the main job
:param main_gpu_memory: Memory per GPU to allocate for the main job
:param watchdog_memory: Memory per core to allocate for the watchdog job
:param watchdog_time: The Wall time requested for the watchdog job
:param watchdog_scratch: Scratch to allocate for the watchdog job
:param watchdog_n_cores: Number of cores to allocate for the watchdog job
:param watchdog_gpu: Number of GPUs to allocate for the watchdog job
:param watchdog_gpu_memory: Memory per GPU to allocate for the watchdog job
:param merge_memory: Memory per core to allocate for the merge job
:param merge_time: The Wall time requested for the merge job
:param merge_scratch: Scratch to allocate for the merge job
:param merge_n_cores: Number of cores to allocate for the merge job
:param merge_gpu: Number of GPUs to allocate for the merge job
:param merge_gpu_memory: Memory per GPU to allocate for the merge job
:param dependency: The jobids of the jobs on which this job depends on
:param system: The type of the queing system of the cluster
:param main_name: name of the main function
:param test: If True no submission but just printing submission string to
log
:param batchsize: If number of cores requested is > batchsize, break up
jobarrays into jobarrys of size batchsize
:param max_njobs: Maximum number of jobs allowed to run at the same time
:param add_args: Additional cluster-specific arguments
:param add_bsub: Additional bsub arguments to pass
:param discard_output: If True writes stdout/stderr to /dev/null
:param verbosity: Verbosity level (0 - 4).
:param keep_submit_files: If True store SLURM submission files
:return: The jobid of the submitted job
"""
logger_utils.set_logger_level(LOGGER, verbosity)
# assess if number of tasks is valid
n_tasks = len(get_indices_splitted(tasks, 1, 0))
if (n_jobs > n_tasks) & (("mpi" not in mode) & (mode != "tasks")):
raise Exception(
"You tried to request more jobs than you have tasks. "
"I assume this is a mistake. Aborting..."
)
# get submission string
cmd_string = make_cmd_string(
function,
source_file,
n_jobs,
tasks,
mode,
job_name,
function_args,
exe,
main_memory,
main_time,
main_scratch,
main_n_cores,
main_gpu,
main_gpu_memory,
watchdog_time,
watchdog_memory,
watchdog_scratch,
watchdog_n_cores,
watchdog_gpu,
watchdog_gpu_memory,
merge_memory,
merge_time,
merge_scratch,
merge_n_cores,
merge_gpu,
merge_gpu_memory,
log_dir,
dependency,
system,
main_name,
batchsize,
max_njobs,
add_bsub=add_bsub,
add_args=add_args,
discard_output=discard_output,
verbosity=verbosity,
main_mode=main_mode,
# main_n_cores=main_n_cores,
nodes=nodes,
MPI_tasks_per_core=MPI_tasks_per_core,
MPI_tasks_per_node=MPI_tasks_per_node,
OpenMP_threads_per_task=OpenMP_threads_per_task,
)
LOGGER.debug(cmd_string)
if test:
path_log = get_path_log(log_dir, job_name)
write_to_log(path_log, cmd_string)
return []
# message the system sends if the
# maximum number of pendings jobs is reached
msg_limit_reached = "Pending job threshold reached."
pipe_limit_reached = "stderr"
if isinstance(cmd_string, str):
cmd_string = [cmd_string]
jobids = []
for cs in cmd_string:
LOGGER.info("Submitting command:")
LOGGER.info(cs)
# submit
while True:
output = dict(stdout=[], stderr=[])
with subprocess.Popen(
shlex.split(cs),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=1,
universal_newlines=True,
) as proc:
# check for limit concerning maximum number of pending jobs
if system == "bsub":
for line in getattr(proc, pipe_limit_reached):
pending_limit_reached = msg_limit_reached in line
if pending_limit_reached:
break
else:
output[pipe_limit_reached].append(line)
# if the limit has been reached, kill process and sleep
if pending_limit_reached:
proc.kill()
LOGGER.warning(TIMEOUT_MESSAGE)
time.sleep(60 * 30)
continue
# read rest of the output
for line in proc.stdout:
output["stdout"].append(line)
for line in proc.stderr:
output["stderr"].append(line)
break
# check if process terminated successfully
if proc.returncode != 0:
raise RuntimeError(
'Running the command "{}" failed with'
"exit code {}. Error: \n{}".format(
cmd_string, proc.returncode, "\n".join(output["stderr"])
)
)
# get id of submitted job
if system == "bsub":
jobid = output["stdout"][-1].split("<")[1]
jobid = jobid.split(">")[0]
elif system == "slurm":
jobid = output["stdout"][-1].split("job ")[-1]
if not keep_submit_files:
for cs in cmd_string:
os.remove(f"{cs.split(' ')[-1]}")
elif system == "daint":
jobid = output["stdout"][-1].split("job ")[-1]
if not keep_submit_files:
for cs in cmd_string:
os.remove(f"{cs.split(' ')[-1]}")
jobids.append(int(jobid))
LOGGER.info("Submitted job and got jobid(s): {}".format(jobid))
return jobids
def robust_remove(path):
"""
Remove a file or directory if existing
:param path: path to possible non-existing file or directory
"""
if os.path.isfile(path):
os.remove(path)
elif os.path.isdir(path):
shutil.rmtree(path)
# recreate
open(path, "a").close()
def get_path_log(log_dir, job_name):
"""
Construct the path of the esub log file
:param log_dir: directory where log files are stored
:param job_name: name of the job that will be logged
:return: path of the log file
"""
path_log = os.path.join(log_dir, job_name + ".log")
return path_log
def get_path_finished_indices(log_dir, job_name):
"""
Construct the path of the file containing the finished indices
:param log_dir: directory where log files are stored
:param job_name: name of the job for which the indices will be store
:return: path of the file for the finished indices
"""
path_finished = os.path.join(log_dir, job_name + "_done.dat")
return path_finished
def import_executable(exe):
"""
Imports the functions defined in the executable file.
:param exe: path of the executable
:return: executable imported as python module
"""
sys.path.insert(0, os.path.dirname(exe))
to_import = os.path.basename(exe).replace(".py", "")
try:
executable = __import__(to_import)
except ImportError:
raise ImportError(f"Failed to import your executable {exe}")
return executable
def save_write(path, str_to_write, mode="a"):
"""
Write a string to a file, with the file being locked in the meantime.
:param path: path of file
:param str_to_write: string to be written
:param mode: mode in which file is opened
"""
with portalocker.Lock(path, mode=mode, timeout=math.inf) as f:
# write
f.write(str_to_write)
# flush and sync to filesystem
f.flush()
os.fsync(f.fileno())
def write_index(index, finished_file):
"""
Writes the index number on a new line of the
file containing the finished indices
:param index: A job index
:param finished_file: The file to which the
jobs will write that they are done
"""
save_write(finished_file, "{}\n".format(index))
def check_indices(
indices,
finished_file,
exe,
function_args,
check_indices_file=True,
verbosity=3,
):
"""
Checks which of the indices are missing in
the file containing the finished indices
:param indices: Job indices that should be checked
:param finished_file: The file from which the jobs will be read
:param exe: Path to executable
:param check_indices_file: If True adds indices from index file otherwise
only use check_missing function
:param verbosity: Verbosity level (0 - 4).
:return: Returns the indices that are missing
"""
if check_indices_file:
LOGGER.debug("Checking missing file for missing indices...")
# wait for the indices file to be written
if os.path.exists(finished_file):
# first get the indices missing in the log file (crashed jobs)
done = []
with open(finished_file, "r") as f:
for line in f:
# Ignore empty lines
if line != "\n":
done.append(int(line.replace("\n", "")))
failed = list(set(indices) - set(done))
LOGGER.debug(f"Found failed indices: {failed}")
else:
LOGGER.warning(
"Did not find File {} -> None of the main functions "
"recorded its indices. "
"Not rerunning any jobs".format(finished_file)
)
failed = []
else:
failed = []
# if provided use check_missing function
# (finished jobs but created corrupted output)
if hasattr(exe, "check_missing"):
LOGGER.info("Found check_missing function in executable. Running...")
corrupted = getattr(exe, "check_missing")(indices, function_args)
LOGGER.debug(f"Found corruped indices: {corrupted}")
else:
corrupted = []
missing = failed + corrupted
missing = np.unique(np.asarray(missing))
LOGGER.debug(f"Found failed/corrputed indices: {missing}")
return missing
def write_to_log(path, line, mode="a"):
"""
Write a line to a esub log file
:param path: path of the log file
:param line: line (string) to write
:param mode: mode in which the log file will be opened
"""
extended_line = "{} {}\n".format(datetime.datetime.now(), line)
save_write(path, extended_line, mode=mode)
def cd_local_scratch(verbosity=3):
"""
Change to current working directory to the local scratch if set.
:param verbosity: Verbosity level (0 - 4).
"""
if "ESUB_LOCAL_SCRATCH" in os.environ:
if os.path.isdir(os.environ["ESUB_LOCAL_SCRATCH"]):
submit_dir = os.getcwd()
os.chdir(os.environ["ESUB_LOCAL_SCRATCH"])
os.environ["SUBMIT_DIR"] = submit_dir
LOGGER.warning(
"Changed current working directory to {} and "
"set $SUBMIT_DIR to {}".format(os.getcwd(), os.environ["SUBMIT_DIR"])
)
else:
LOGGER.error(
"$ESUB_LOCAL_SCRATCH is set to non-existing "
"directory {}, skipping...".format(os.environ["ESUB_LOCAL_SCRATCH"])
)
else:
LOGGER.debug(
"Environment variable ESUB_LOCAL_SCRATCH not set. "
"Not chaning working directory."
)
def run_local_mpi_job(
exe, n_cores, function_args, logger, main_name="main", verbosity=3
):
"""
This function runs an MPI job locally
:param exe: Path to executable
:param n_cores: Number of cores
:param function_args: A list of arguments to be passed to the executable
:param index: Index number to run
:param logger: logger instance for logging
:param main_name: Name of main function in executable
:param verbosity: Verbosity level (0 - 4).
:param main_name:
"""
# construct the string of arguments passed to the executable
args_string = ""
for arg in function_args:
args_string += arg + " "
# make command string
cmd_string = (
"mpirun -np {} python -m esub.submit"
" --executable={} --tasks='0' --main_name={} "
"--esub_verbosity={} {}".format(n_cores, exe, main_name, verbosity, args_string)
)
for line in execute_local_mpi_job(cmd_string):
line = line.strip()
if len(line) > 0:
logger.info(line)
def get_indices(tasks):
"""
Parses the jobids from the tasks string.
:param tasks: The task string, which will get parsed into the job indices
:return: A list of the jobids that should be executed
"""
# parsing a list of indices from the tasks argument
if ">" in tasks:
tasks = tasks.split(">")
start = tasks[0].replace(" ", "")
stop = tasks[1].replace(" ", "")
indices = list(range(int(start), int(stop)))
elif "," in tasks:
indices = tasks.split(",")
indices = list(map(int, indices))
elif os.path.exists(tasks):
with open(tasks, "r") as f:
content = f.readline()
indices = get_indices(content)
else:
try:
indices = [int(tasks)]
except ValueError:
raise ValueError("Tasks argument is not in the correct format!")
return indices
def get_indices_splitted(tasks, n_jobs, rank):
"""
Parses the jobids from the tasks string.
Performs load-balance splitting of the jobs and returns the indices
corresponding to rank. This is only used for job array submission.
:param tasks: The task string, which will get parsed into the job indices
:param n_jobs: The number of cores that will be requested for the job
:param rank: The rank of the core
:return: A list of the jobids that should
be executed by the core with number rank
"""
# Parse
indices = get_indices(tasks)
# Load-balanced splitter
steps = len(indices)
size = n_jobs
chunky = int(steps / size)
rest = steps - chunky * size
mini = chunky * rank
maxi = chunky * (rank + 1)
if rank >= (size - 1) - rest:
maxi += 2 + rank - size + rest
mini += rank - size + 1 + rest
mini = int(mini)
maxi = int(maxi)
return indices[mini:maxi]
def function_wrapper(indices, args, func):
"""
Wrapper that converts a generator to a function.
:param generator: A generator
"""
inds = []
for ii in func(indices, args):
inds.append(ii)
return inds
def run_local_tasks(exe, n_jobs, function_args, tasks, function):
"""
Executes an MPI job locally, running each splitted index list on one core.
:param exe: The executable from where the main function is imported.
:param n_jobs: The number of cores to allocate.
:param function_args: The arguments that
will get passed to the main function.
:param tasks: The indices to run on.
:param function: The function name to run
"""
LOGGER.warning(
"NotImplementedWarning: Using run-tasks creates a multiprocessing "
"worker pool with just one thread per job. "
"The n_core arguments are ignored."
)
# get executable
func = getattr(exe, function)
# Fix function arguments for all walkers
run_func = partial(function_wrapper, args=function_args, func=func)
# get splitted indices
nums = []
for rank in range(n_jobs):
nums.append(get_indices_splitted(tasks, n_jobs, rank))
# Setup mutltiprocessing pool
pool = multiprocessing.Pool(processes=n_jobs)
if int(multiprocessing.cpu_count()) < n_jobs:
raise Exception(
"Number of CPUs available is smaller \
than requested number of CPUs"
)
# run and retrive the finished indices
out = pool.map(run_func, nums)
out = [item for sublist in out for item in sublist]
return out
def execute_local_mpi_job(cmd_string):
"""
Execution of local MPI job
:param cmd_string: The command string to run
"""
popen = subprocess.Popen(
shlex.split(cmd_string),
stdout=subprocess.PIPE,
universal_newlines=True,
)
for stdout_line in iter(popen.stdout.readline, ""):
yield stdout_line
popen.stdout.close()
return_code = popen.wait()
if return_code:
raise subprocess.CalledProcessError(return_code, cmd_string) | PypiClean |
/pulumi_azure_native-2.5.1a1693590910.tar.gz/pulumi_azure_native-2.5.1a1693590910/pulumi_azure_native/sql/v20221101preview/elastic_pool.py |
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['ElasticPoolArgs', 'ElasticPool']
@pulumi.input_type
class ElasticPoolArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
server_name: pulumi.Input[str],
availability_zone: Optional[pulumi.Input[Union[str, 'AvailabilityZoneType']]] = None,
elastic_pool_name: Optional[pulumi.Input[str]] = None,
high_availability_replica_count: Optional[pulumi.Input[int]] = None,
license_type: Optional[pulumi.Input[Union[str, 'ElasticPoolLicenseType']]] = None,
location: Optional[pulumi.Input[str]] = None,
maintenance_configuration_id: Optional[pulumi.Input[str]] = None,
max_size_bytes: Optional[pulumi.Input[float]] = None,
min_capacity: Optional[pulumi.Input[float]] = None,
per_database_settings: Optional[pulumi.Input['ElasticPoolPerDatabaseSettingsArgs']] = None,
preferred_enclave_type: Optional[pulumi.Input[Union[str, 'AlwaysEncryptedEnclaveType']]] = None,
sku: Optional[pulumi.Input['SkuArgs']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
zone_redundant: Optional[pulumi.Input[bool]] = None):
"""
The set of arguments for constructing a ElasticPool resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param pulumi.Input[str] server_name: The name of the server.
:param pulumi.Input[Union[str, 'AvailabilityZoneType']] availability_zone: Specifies the availability zone the pool's primary replica is pinned to.
:param pulumi.Input[str] elastic_pool_name: The name of the elastic pool.
:param pulumi.Input[int] high_availability_replica_count: The number of secondary replicas associated with the elastic pool that are used to provide high availability. Applicable only to Hyperscale elastic pools.
:param pulumi.Input[Union[str, 'ElasticPoolLicenseType']] license_type: The license type to apply for this elastic pool.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] maintenance_configuration_id: Maintenance configuration id assigned to the elastic pool. This configuration defines the period when the maintenance updates will will occur.
:param pulumi.Input[float] max_size_bytes: The storage limit for the database elastic pool in bytes.
:param pulumi.Input[float] min_capacity: Minimal capacity that serverless pool will not shrink below, if not paused
:param pulumi.Input['ElasticPoolPerDatabaseSettingsArgs'] per_database_settings: The per database settings for the elastic pool.
:param pulumi.Input[Union[str, 'AlwaysEncryptedEnclaveType']] preferred_enclave_type: Type of enclave requested on the elastic pool.
:param pulumi.Input['SkuArgs'] sku: The elastic pool SKU.
The list of SKUs may vary by region and support offer. To determine the SKUs (including the SKU name, tier/edition, family, and capacity) that are available to your subscription in an Azure region, use the `Capabilities_ListByLocation` REST API or the following command:
```azurecli
az sql elastic-pool list-editions -l <location> -o table
````
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[bool] zone_redundant: Whether or not this elastic pool is zone redundant, which means the replicas of this elastic pool will be spread across multiple availability zones.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "server_name", server_name)
if availability_zone is not None:
pulumi.set(__self__, "availability_zone", availability_zone)
if elastic_pool_name is not None:
pulumi.set(__self__, "elastic_pool_name", elastic_pool_name)
if high_availability_replica_count is not None:
pulumi.set(__self__, "high_availability_replica_count", high_availability_replica_count)
if license_type is not None:
pulumi.set(__self__, "license_type", license_type)
if location is not None:
pulumi.set(__self__, "location", location)
if maintenance_configuration_id is not None:
pulumi.set(__self__, "maintenance_configuration_id", maintenance_configuration_id)
if max_size_bytes is not None:
pulumi.set(__self__, "max_size_bytes", max_size_bytes)
if min_capacity is not None:
pulumi.set(__self__, "min_capacity", min_capacity)
if per_database_settings is not None:
pulumi.set(__self__, "per_database_settings", per_database_settings)
if preferred_enclave_type is not None:
pulumi.set(__self__, "preferred_enclave_type", preferred_enclave_type)
if sku is not None:
pulumi.set(__self__, "sku", sku)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if zone_redundant is not None:
pulumi.set(__self__, "zone_redundant", zone_redundant)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="serverName")
def server_name(self) -> pulumi.Input[str]:
"""
The name of the server.
"""
return pulumi.get(self, "server_name")
@server_name.setter
def server_name(self, value: pulumi.Input[str]):
pulumi.set(self, "server_name", value)
@property
@pulumi.getter(name="availabilityZone")
def availability_zone(self) -> Optional[pulumi.Input[Union[str, 'AvailabilityZoneType']]]:
"""
Specifies the availability zone the pool's primary replica is pinned to.
"""
return pulumi.get(self, "availability_zone")
@availability_zone.setter
def availability_zone(self, value: Optional[pulumi.Input[Union[str, 'AvailabilityZoneType']]]):
pulumi.set(self, "availability_zone", value)
@property
@pulumi.getter(name="elasticPoolName")
def elastic_pool_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the elastic pool.
"""
return pulumi.get(self, "elastic_pool_name")
@elastic_pool_name.setter
def elastic_pool_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "elastic_pool_name", value)
@property
@pulumi.getter(name="highAvailabilityReplicaCount")
def high_availability_replica_count(self) -> Optional[pulumi.Input[int]]:
"""
The number of secondary replicas associated with the elastic pool that are used to provide high availability. Applicable only to Hyperscale elastic pools.
"""
return pulumi.get(self, "high_availability_replica_count")
@high_availability_replica_count.setter
def high_availability_replica_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "high_availability_replica_count", value)
@property
@pulumi.getter(name="licenseType")
def license_type(self) -> Optional[pulumi.Input[Union[str, 'ElasticPoolLicenseType']]]:
"""
The license type to apply for this elastic pool.
"""
return pulumi.get(self, "license_type")
@license_type.setter
def license_type(self, value: Optional[pulumi.Input[Union[str, 'ElasticPoolLicenseType']]]):
pulumi.set(self, "license_type", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="maintenanceConfigurationId")
def maintenance_configuration_id(self) -> Optional[pulumi.Input[str]]:
"""
Maintenance configuration id assigned to the elastic pool. This configuration defines the period when the maintenance updates will will occur.
"""
return pulumi.get(self, "maintenance_configuration_id")
@maintenance_configuration_id.setter
def maintenance_configuration_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "maintenance_configuration_id", value)
@property
@pulumi.getter(name="maxSizeBytes")
def max_size_bytes(self) -> Optional[pulumi.Input[float]]:
"""
The storage limit for the database elastic pool in bytes.
"""
return pulumi.get(self, "max_size_bytes")
@max_size_bytes.setter
def max_size_bytes(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "max_size_bytes", value)
@property
@pulumi.getter(name="minCapacity")
def min_capacity(self) -> Optional[pulumi.Input[float]]:
"""
Minimal capacity that serverless pool will not shrink below, if not paused
"""
return pulumi.get(self, "min_capacity")
@min_capacity.setter
def min_capacity(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "min_capacity", value)
@property
@pulumi.getter(name="perDatabaseSettings")
def per_database_settings(self) -> Optional[pulumi.Input['ElasticPoolPerDatabaseSettingsArgs']]:
"""
The per database settings for the elastic pool.
"""
return pulumi.get(self, "per_database_settings")
@per_database_settings.setter
def per_database_settings(self, value: Optional[pulumi.Input['ElasticPoolPerDatabaseSettingsArgs']]):
pulumi.set(self, "per_database_settings", value)
@property
@pulumi.getter(name="preferredEnclaveType")
def preferred_enclave_type(self) -> Optional[pulumi.Input[Union[str, 'AlwaysEncryptedEnclaveType']]]:
"""
Type of enclave requested on the elastic pool.
"""
return pulumi.get(self, "preferred_enclave_type")
@preferred_enclave_type.setter
def preferred_enclave_type(self, value: Optional[pulumi.Input[Union[str, 'AlwaysEncryptedEnclaveType']]]):
pulumi.set(self, "preferred_enclave_type", value)
@property
@pulumi.getter
def sku(self) -> Optional[pulumi.Input['SkuArgs']]:
"""
The elastic pool SKU.
The list of SKUs may vary by region and support offer. To determine the SKUs (including the SKU name, tier/edition, family, and capacity) that are available to your subscription in an Azure region, use the `Capabilities_ListByLocation` REST API or the following command:
```azurecli
az sql elastic-pool list-editions -l <location> -o table
````
"""
return pulumi.get(self, "sku")
@sku.setter
def sku(self, value: Optional[pulumi.Input['SkuArgs']]):
pulumi.set(self, "sku", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="zoneRedundant")
def zone_redundant(self) -> Optional[pulumi.Input[bool]]:
"""
Whether or not this elastic pool is zone redundant, which means the replicas of this elastic pool will be spread across multiple availability zones.
"""
return pulumi.get(self, "zone_redundant")
@zone_redundant.setter
def zone_redundant(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "zone_redundant", value)
class ElasticPool(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
availability_zone: Optional[pulumi.Input[Union[str, 'AvailabilityZoneType']]] = None,
elastic_pool_name: Optional[pulumi.Input[str]] = None,
high_availability_replica_count: Optional[pulumi.Input[int]] = None,
license_type: Optional[pulumi.Input[Union[str, 'ElasticPoolLicenseType']]] = None,
location: Optional[pulumi.Input[str]] = None,
maintenance_configuration_id: Optional[pulumi.Input[str]] = None,
max_size_bytes: Optional[pulumi.Input[float]] = None,
min_capacity: Optional[pulumi.Input[float]] = None,
per_database_settings: Optional[pulumi.Input[pulumi.InputType['ElasticPoolPerDatabaseSettingsArgs']]] = None,
preferred_enclave_type: Optional[pulumi.Input[Union[str, 'AlwaysEncryptedEnclaveType']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
server_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['SkuArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
zone_redundant: Optional[pulumi.Input[bool]] = None,
__props__=None):
"""
An elastic pool.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Union[str, 'AvailabilityZoneType']] availability_zone: Specifies the availability zone the pool's primary replica is pinned to.
:param pulumi.Input[str] elastic_pool_name: The name of the elastic pool.
:param pulumi.Input[int] high_availability_replica_count: The number of secondary replicas associated with the elastic pool that are used to provide high availability. Applicable only to Hyperscale elastic pools.
:param pulumi.Input[Union[str, 'ElasticPoolLicenseType']] license_type: The license type to apply for this elastic pool.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] maintenance_configuration_id: Maintenance configuration id assigned to the elastic pool. This configuration defines the period when the maintenance updates will will occur.
:param pulumi.Input[float] max_size_bytes: The storage limit for the database elastic pool in bytes.
:param pulumi.Input[float] min_capacity: Minimal capacity that serverless pool will not shrink below, if not paused
:param pulumi.Input[pulumi.InputType['ElasticPoolPerDatabaseSettingsArgs']] per_database_settings: The per database settings for the elastic pool.
:param pulumi.Input[Union[str, 'AlwaysEncryptedEnclaveType']] preferred_enclave_type: Type of enclave requested on the elastic pool.
:param pulumi.Input[str] resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param pulumi.Input[str] server_name: The name of the server.
:param pulumi.Input[pulumi.InputType['SkuArgs']] sku: The elastic pool SKU.
The list of SKUs may vary by region and support offer. To determine the SKUs (including the SKU name, tier/edition, family, and capacity) that are available to your subscription in an Azure region, use the `Capabilities_ListByLocation` REST API or the following command:
```azurecli
az sql elastic-pool list-editions -l <location> -o table
````
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[bool] zone_redundant: Whether or not this elastic pool is zone redundant, which means the replicas of this elastic pool will be spread across multiple availability zones.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ElasticPoolArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
An elastic pool.
:param str resource_name: The name of the resource.
:param ElasticPoolArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ElasticPoolArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
availability_zone: Optional[pulumi.Input[Union[str, 'AvailabilityZoneType']]] = None,
elastic_pool_name: Optional[pulumi.Input[str]] = None,
high_availability_replica_count: Optional[pulumi.Input[int]] = None,
license_type: Optional[pulumi.Input[Union[str, 'ElasticPoolLicenseType']]] = None,
location: Optional[pulumi.Input[str]] = None,
maintenance_configuration_id: Optional[pulumi.Input[str]] = None,
max_size_bytes: Optional[pulumi.Input[float]] = None,
min_capacity: Optional[pulumi.Input[float]] = None,
per_database_settings: Optional[pulumi.Input[pulumi.InputType['ElasticPoolPerDatabaseSettingsArgs']]] = None,
preferred_enclave_type: Optional[pulumi.Input[Union[str, 'AlwaysEncryptedEnclaveType']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
server_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['SkuArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
zone_redundant: Optional[pulumi.Input[bool]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ElasticPoolArgs.__new__(ElasticPoolArgs)
__props__.__dict__["availability_zone"] = availability_zone
__props__.__dict__["elastic_pool_name"] = elastic_pool_name
__props__.__dict__["high_availability_replica_count"] = high_availability_replica_count
__props__.__dict__["license_type"] = license_type
__props__.__dict__["location"] = location
__props__.__dict__["maintenance_configuration_id"] = maintenance_configuration_id
__props__.__dict__["max_size_bytes"] = max_size_bytes
__props__.__dict__["min_capacity"] = min_capacity
__props__.__dict__["per_database_settings"] = per_database_settings
__props__.__dict__["preferred_enclave_type"] = preferred_enclave_type
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if server_name is None and not opts.urn:
raise TypeError("Missing required property 'server_name'")
__props__.__dict__["server_name"] = server_name
__props__.__dict__["sku"] = sku
__props__.__dict__["tags"] = tags
__props__.__dict__["zone_redundant"] = zone_redundant
__props__.__dict__["creation_date"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["name"] = None
__props__.__dict__["state"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-native:sql:ElasticPool"), pulumi.Alias(type_="azure-native:sql/v20140401:ElasticPool"), pulumi.Alias(type_="azure-native:sql/v20171001preview:ElasticPool"), pulumi.Alias(type_="azure-native:sql/v20200202preview:ElasticPool"), pulumi.Alias(type_="azure-native:sql/v20200801preview:ElasticPool"), pulumi.Alias(type_="azure-native:sql/v20201101preview:ElasticPool"), pulumi.Alias(type_="azure-native:sql/v20210201preview:ElasticPool"), pulumi.Alias(type_="azure-native:sql/v20210501preview:ElasticPool"), pulumi.Alias(type_="azure-native:sql/v20210801preview:ElasticPool"), pulumi.Alias(type_="azure-native:sql/v20211101:ElasticPool"), pulumi.Alias(type_="azure-native:sql/v20211101preview:ElasticPool"), pulumi.Alias(type_="azure-native:sql/v20220201preview:ElasticPool"), pulumi.Alias(type_="azure-native:sql/v20220501preview:ElasticPool"), pulumi.Alias(type_="azure-native:sql/v20220801preview:ElasticPool"), pulumi.Alias(type_="azure-native:sql/v20230201preview:ElasticPool")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ElasticPool, __self__).__init__(
'azure-native:sql/v20221101preview:ElasticPool',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ElasticPool':
"""
Get an existing ElasticPool resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ElasticPoolArgs.__new__(ElasticPoolArgs)
__props__.__dict__["availability_zone"] = None
__props__.__dict__["creation_date"] = None
__props__.__dict__["high_availability_replica_count"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["license_type"] = None
__props__.__dict__["location"] = None
__props__.__dict__["maintenance_configuration_id"] = None
__props__.__dict__["max_size_bytes"] = None
__props__.__dict__["min_capacity"] = None
__props__.__dict__["name"] = None
__props__.__dict__["per_database_settings"] = None
__props__.__dict__["preferred_enclave_type"] = None
__props__.__dict__["sku"] = None
__props__.__dict__["state"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
__props__.__dict__["zone_redundant"] = None
return ElasticPool(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="availabilityZone")
def availability_zone(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the availability zone the pool's primary replica is pinned to.
"""
return pulumi.get(self, "availability_zone")
@property
@pulumi.getter(name="creationDate")
def creation_date(self) -> pulumi.Output[str]:
"""
The creation date of the elastic pool (ISO8601 format).
"""
return pulumi.get(self, "creation_date")
@property
@pulumi.getter(name="highAvailabilityReplicaCount")
def high_availability_replica_count(self) -> pulumi.Output[Optional[int]]:
"""
The number of secondary replicas associated with the elastic pool that are used to provide high availability. Applicable only to Hyperscale elastic pools.
"""
return pulumi.get(self, "high_availability_replica_count")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[str]:
"""
Kind of elastic pool. This is metadata used for the Azure portal experience.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter(name="licenseType")
def license_type(self) -> pulumi.Output[Optional[str]]:
"""
The license type to apply for this elastic pool.
"""
return pulumi.get(self, "license_type")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="maintenanceConfigurationId")
def maintenance_configuration_id(self) -> pulumi.Output[Optional[str]]:
"""
Maintenance configuration id assigned to the elastic pool. This configuration defines the period when the maintenance updates will will occur.
"""
return pulumi.get(self, "maintenance_configuration_id")
@property
@pulumi.getter(name="maxSizeBytes")
def max_size_bytes(self) -> pulumi.Output[Optional[float]]:
"""
The storage limit for the database elastic pool in bytes.
"""
return pulumi.get(self, "max_size_bytes")
@property
@pulumi.getter(name="minCapacity")
def min_capacity(self) -> pulumi.Output[Optional[float]]:
"""
Minimal capacity that serverless pool will not shrink below, if not paused
"""
return pulumi.get(self, "min_capacity")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="perDatabaseSettings")
def per_database_settings(self) -> pulumi.Output[Optional['outputs.ElasticPoolPerDatabaseSettingsResponse']]:
"""
The per database settings for the elastic pool.
"""
return pulumi.get(self, "per_database_settings")
@property
@pulumi.getter(name="preferredEnclaveType")
def preferred_enclave_type(self) -> pulumi.Output[Optional[str]]:
"""
Type of enclave requested on the elastic pool.
"""
return pulumi.get(self, "preferred_enclave_type")
@property
@pulumi.getter
def sku(self) -> pulumi.Output[Optional['outputs.SkuResponse']]:
"""
The elastic pool SKU.
The list of SKUs may vary by region and support offer. To determine the SKUs (including the SKU name, tier/edition, family, and capacity) that are available to your subscription in an Azure region, use the `Capabilities_ListByLocation` REST API or the following command:
```azurecli
az sql elastic-pool list-editions -l <location> -o table
````
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def state(self) -> pulumi.Output[str]:
"""
The state of the elastic pool.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="zoneRedundant")
def zone_redundant(self) -> pulumi.Output[Optional[bool]]:
"""
Whether or not this elastic pool is zone redundant, which means the replicas of this elastic pool will be spread across multiple availability zones.
"""
return pulumi.get(self, "zone_redundant") | PypiClean |
/target-parquet-0.0.1.tar.gz/target-parquet-0.0.1/target_parquet.py | import argparse
import collections
import csv
from datetime import datetime
import io
import http.client
import json
from jsonschema.validators import Draft4Validator
import os
import pandas as pd
import pkg_resources
import pyarrow
import singer
import sys
import urllib
import threading
LOGGER = singer.get_logger()
def emit_state(state):
if state is not None:
line = json.dumps(state)
LOGGER.debug('Emitting state {}'.format(line))
sys.stdout.write("{}\n".format(line))
sys.stdout.flush()
def flatten(dictionary, parent_key = '', sep = '__'):
'''Function that flattens a nested structure, using the separater given as parameter, or uses '__' as default
E.g:
dictionary = {
'key_1': 1,
'key_2': {
'key_3': 2,
'key_4': {
'key_5': 3,
'key_6' : ['10', '11']
}
}
}
By calling the function with the dictionary above as parameter, you will get the following strucure:
{
'key_1': 1,
'key_2__key_3': 2,
'key_2__key_4__key_5': 3,
'key_2__key_4__key_6': "['10', '11']"
}
'''
items = []
for k, v in dictionary.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(flatten(v, new_key, sep = sep).items())
else:
items.append((new_key, str(v) if type(v) is list else v))
return dict(items)
def persist_messages(messages, destination_path, compression_method = None):
state = None
schemas = {}
key_properties = {}
headers = {}
validators = {}
records = [] # A list of dictionaries that will contain the records that are retrieved from the tap
for message in messages:
try:
message = singer.parse_message(message).asdict()
except json.decoder.JSONDecodeError:
raise Exception("Unable to parse:\n{}".format(message))
timestamp = datetime.utcnow().strftime('%Y%m%d_%H%M%S')
message_type = message['type']
if message_type == 'STATE':
LOGGER.debug('Setting state to {}'.format(message['value']))
state = message['value']
elif message_type == 'SCHEMA':
stream = message['stream']
schemas[stream] = message['schema']
validators[stream] = Draft4Validator(message['schema'])
key_properties[stream] = message['key_properties']
elif message_type == 'RECORD':
if message['stream'] not in schemas:
raise Exception("A record for stream {} was encountered before a corresponding schema".format(message['stream']))
stream_name = message['stream']
validators[message['stream']].validate(message['record'])
flattened_record = flatten(message['record'])
# Once the record is flattenned, it is added to the final record list, which will be stored in the parquet file.
records.append(flattened_record)
state = None
else:
LOGGER.warning("Unknown message type {} in message {}".format(message['type'], message))
if len(records) == 0:
# If there are not any records retrieved, it is not necessary to create a file.
LOGGER.info("There were not any records retrieved.")
return state
# Create a dataframe out of the record list and store it into a parquet file with the timestamp in the name.
dataframe = pd.DataFrame(records)
filename = stream_name + '-' + timestamp + '.parquet'
filepath = os.path.expanduser(os.path.join(destination_path, filename))
if compression_method:
# The target is prepared to accept all the compression methods provided by the pandas module, with the mapping below,
# but, at the moment, pyarrow only allow gzip compression.
extension_mapping = {
'gzip' : '.gz',
'bz2' : '.bz2',
'zip' : '.zip',
'xz' : '.xz'
}
dataframe.to_parquet(filepath + extension_mapping[compression_method], engine = 'pyarrow', compression = compression_method)
else:
dataframe.to_parquet(filepath, engine = 'pyarrow')
return state
def send_usage_stats():
try:
version = pkg_resources.get_distribution('target-parquet').version
conn = http.client.HTTPConnection('collector.singer.io', timeout = 10)
conn.connect()
params = {
'e': 'se',
'aid': 'singer',
'se_ca': 'target-parquet',
'se_ac': 'open',
'se_la': version,
}
conn.request('GET', '/i?' + urllib.parse.urlencode(params))
response = conn.getresponse()
conn.close()
except:
LOGGER.debug('Collection request failed')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', help = 'Config file')
args = parser.parse_args()
if args.config:
with open(args.config) as input_json:
config = json.load(input_json)
else:
config = {}
if not config.get('disable_collection', False):
LOGGER.info('Sending version information to singer.io. ' +
'To disable sending anonymous usage data, set ' +
'the config parameter "disable_collection" to true')
threading.Thread(target = send_usage_stats).start()
# The target expects that the tap generates UTF-8 encoded text.
input_messages = io.TextIOWrapper(sys.stdin.buffer, encoding = 'utf-8')
state = persist_messages(input_messages,
config.get('destination_path', ''),
config.get('compression_method'))
emit_state(state)
LOGGER.debug("Exiting normally")
if __name__ == '__main__':
main() | PypiClean |
/skytime-0.16.1-py3-none-any.whl/build/lib/build/lib/sktime/regression/compose/_ensemble.py | """Implements a composite Time series Forest Regressor that accepts a pipeline."""
__author__ = ["mloning", "AyushmaanSeth"]
__all__ = ["ComposableTimeSeriesForestRegressor"]
import numbers
from warnings import warn
import numpy as np
from joblib import Parallel, delayed
from sklearn.ensemble._base import _partition_estimators
from sklearn.ensemble._forest import (
_generate_unsampled_indices,
_get_n_samples_bootstrap,
)
from sklearn.metrics import r2_score
from sklearn.pipeline import Pipeline
from sklearn.tree import DecisionTreeRegressor
from sktime.regression.base import BaseRegressor
from sktime.series_as_features.base.estimators._ensemble import BaseTimeSeriesForest
from sktime.transformations.panel.summarize import RandomIntervalFeatureExtractor
from sktime.utils.slope_and_trend import _slope
from sktime.utils.validation.panel import check_X, check_X_y
class ComposableTimeSeriesForestRegressor(BaseTimeSeriesForest, BaseRegressor):
"""Time-Series Forest Regressor.
A time series forest is a meta estimator and an adaptation of the random
forest for time-series/panel data that fits a number of decision tree
regressors on various sub-samples of a transformed dataset and uses
averaging to improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original input sample size
but the samples are drawn with replacement if `bootstrap=True` (default).
Parameters
----------
estimator : Pipeline
A pipeline consisting of series-to-tabular transformations
and a decision tree regressor as final estimator.
n_estimators : integer, optional (default=100)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion and minimizes the L2 loss
using the mean of each terminal node, "friedman_mse", which uses mean
squared error with Friedman's improvement score for potential splits,
and "mae" for the mean absolute error, which minimizes the L1 loss
using the median of each terminal node.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a fraction and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node.
A split point at any depth will only be considered if it leaves at
least ``min_samples_leaf`` training samples in each of the left and
right branches. This may have the effect of smoothing the model,
especially in regression.
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a fraction and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a fraction and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)` (same as "auto").
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_decrease : float, optional (default=0.)
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
min_impurity_split : float, (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool (default=False)
Whether to use out-of-bag samples to estimate
the generalization accuracy.
n_jobs : int or None, optional (default=None)
The number of jobs to run in parallel for both `fit` and `predict`.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity when fitting and predicting.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
Note that for multioutput (including multilabel) weights should be
defined for each class of every column in its own dict. For example,
for four-class multilabel classification weights should be
[{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of
[{1:1}, {2:5}, {3:1}, {4:1}].
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that
weights are computed based on the bootstrap sample for every tree
grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
class_weight: dict, list of dicts, "balanced", "balanced_subsample" or \
None, optional (default=None)
Not needed here, added in the constructor to align with base class \
sharing both Classifier and Regressor parameters.
"""
def __init__(
self,
estimator=None,
n_estimators=100,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.0,
max_features=None,
max_leaf_nodes=None,
min_impurity_decrease=0.0,
min_impurity_split=None,
bootstrap=False,
oob_score=False,
n_jobs=None,
random_state=None,
verbose=0,
warm_start=False,
max_samples=None,
):
self.estimator = estimator
# Assign values, even though passed on to base estimator below,
# necessary here for cloning
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_decrease = min_impurity_decrease
self.min_impurity_split = min_impurity_split
self.max_samples = max_samples
# Pass on params.
super(ComposableTimeSeriesForestRegressor, self).__init__(
base_estimator=None,
n_estimators=n_estimators,
estimator_params=None,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
max_samples=max_samples,
)
# We need to add is-fitted state when inheriting from scikit-learn
self._is_fitted = False
def _validate_estimator(self):
if not isinstance(self.n_estimators, numbers.Integral):
raise ValueError(
"n_estimators must be an integer, "
"got {0}.".format(type(self.n_estimators))
)
if self.n_estimators <= 0:
raise ValueError(
"n_estimators must be greater than zero, "
"got {0}.".format(self.n_estimators)
)
# Set base estimator
if self.estimator is None:
# Set default time series forest
features = [np.mean, np.std, _slope]
steps = [
(
"transform",
RandomIntervalFeatureExtractor(
n_intervals="sqrt",
features=features,
random_state=self.random_state,
),
),
("clf", DecisionTreeRegressor(random_state=self.random_state)),
]
self.estimator_ = Pipeline(steps)
else:
# else check given estimator is a pipeline with prior
# transformations and final decision tree
if not isinstance(self.estimator, Pipeline):
raise ValueError("`estimator` must be pipeline with transforms.")
if not isinstance(self.estimator.steps[-1][1], DecisionTreeRegressor):
raise ValueError(
"Last step in `estimator` must be DecisionTreeRegressor."
)
self.estimator_ = self.estimator
# Set parameters according to naming in pipeline
estimator_params = {
"criterion": self.criterion,
"max_depth": self.max_depth,
"min_samples_split": self.min_samples_split,
"min_samples_leaf": self.min_samples_leaf,
"min_weight_fraction_leaf": self.min_weight_fraction_leaf,
"max_features": self.max_features,
"max_leaf_nodes": self.max_leaf_nodes,
"min_impurity_decrease": self.min_impurity_decrease,
"min_impurity_split": self.min_impurity_split,
}
final_estimator = self.estimator_.steps[-1][0]
self.estimator_params = {
f"{final_estimator}__{pname}": pval
for pname, pval in estimator_params.items()
}
# Set renamed estimator parameters
for pname, pval in self.estimator_params.items():
self.__setattr__(pname, pval)
def fit(self, X, y, **kwargs):
"""Wrap BaseForest._fit.
This is a temporary measure prior to the BaseRegressor refactor.
"""
X, y = check_X_y(X, y, coerce_to_numpy=True, enforce_univariate=True)
return BaseTimeSeriesForest._fit(self, X, y, **kwargs)
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the trees in the forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted values.
"""
self.check_is_fitted()
# Check data
X = check_X(X, enforce_univariate=True)
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
delayed(e.predict)(X, check_input=True) for e in self.estimators_
)
return np.sum(y_hat, axis=0) / len(self.estimators_)
def _set_oob_score(self, X, y):
"""Compute out-of-bag scores."""
X, y = check_X_y(X, y, enforce_univariate=True)
n_samples = y.shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
n_predictions = np.zeros((n_samples, self.n_outputs_))
n_samples_bootstrap = _get_n_samples_bootstrap(n_samples, self.max_samples)
for estimator in self.estimators_:
final_estimator = estimator.steps[-1][1]
unsampled_indices = _generate_unsampled_indices(
final_estimator.random_state, n_samples, n_samples_bootstrap
)
p_estimator = estimator.predict(X[unsampled_indices, :], check_input=False)
if self.n_outputs_ == 1:
p_estimator = p_estimator[:, np.newaxis]
predictions[unsampled_indices, :] += p_estimator
n_predictions[unsampled_indices, :] += 1
if (n_predictions == 0).any():
warn(
"Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates."
)
n_predictions[n_predictions == 0] = 1
predictions /= n_predictions
self.oob_prediction_ = predictions
if self.n_outputs_ == 1:
self.oob_prediction_ = self.oob_prediction_.reshape((n_samples,))
self.oob_score_ = 0.0
for k in range(self.n_outputs_):
self.oob_score_ += r2_score(y[:, k], predictions[:, k])
self.oob_score_ /= self.n_outputs_
def _validate_y_class_weight(self, y):
# in regression, we don't validate class weights
# TODO remove from regression
return y, None
def _fit(self, X, y):
"""Empty method to satisfy abstract parent. Needs refactoring."""
def _predict(self, X):
"""Empty method to satisfy abstract parent. Needs refactoring."""
@classmethod
def get_test_params(cls, parameter_set="default"):
"""Return testing parameter settings for the estimator.
Parameters
----------
parameter_set : str, default="default"
Name of the set of test parameters to return, for use in tests. If no
special parameters are defined for a value, will return `"default"` set.
Returns
-------
params : dict or list of dict, default = {}
Parameters to create testing instances of the class
Each dict are parameters to construct an "interesting" test instance, i.e.,
`MyClass(**params)` or `MyClass(**params[i])` creates a valid test instance.
`create_test_instance` uses the first (or only) dictionary in `params`
"""
return {"n_estimators": 3} | PypiClean |
/git-goggles-0.2.12.tar.gz/git-goggles-0.2.12/README.rst | #######################
git-goggles Readme
#######################
git-goggles is a git management utilities that allows you to manage your source code as
it evolves through its development lifecycle.
Overview
========
This project accomplishes two things:
* Manage the code review state of your branches
* Gives a snapshot of the where your local branches are vs origin in terms of being ahead / behind on commits
There is a nice blog post describing the features along with screenshots at http://bit.ly/git-goggles
Field Reference
===============
In the table outputted by git-goggles, each row corresponds to a branch, with the following fields:
* Status: the current status of your branch
* new: this is a branch that has never been through the review process
* review: this branch has code that needs to be reviewed
* merge: everything has been reviewed, but needs to be merged into parent (same as done for being ahead)
* done: reviewed and merged (doens't matter if you're behind but you can't be ahead)
* Branch: the branch name
* Review: how many commits have taken place since the last review
* Ahead: how many commits are in your local branch that are not in origin
* Behind: how many commits are in origin that are not in your local branch
* Pull & Push: whether your branches need to be pushed or pulled to track origin
* green checkbox: you don't need to pull
* red cross: you need to pull
* question mark: you either don't have a checked out copy of this branch or you need to prune your local tree
* Modified: the last time that HEAD was modified (NOT the last time the review happened)
Installation
============
To install from PyPi you should run one of the following commands. (If you use pip for your package installation, you should take a look!)
::
pip install git-goggles
or
::
easy_install git-goggles
Checkout the project from github http://github.com/nowells/git-goggles
::
git clone git://github.com/nowells/git-goggles.git
Run setup.py as root
::
cd git-goggles
sudo python setup.py install
**Documentation**:
With `Sphinx <http://sphinx.pocoo.org/>`_ docs deployment: in the docs/ directory, type:
::
make html
Then open ``docs/_build/index.html``
Usage
=====
Viewing the status of your branches:
::
git goggles
Starting your review process (shows an origin diff):
::
git goggles codereview
Complete your review process (automatically pushes up):
::
git goggles codereview complete
Configuration
=============
You can set a few configuration variables to alter to way git-goggles works out of the box.
Disable automatic fetching from all remote servers.
::
git config --global gitgoggles.fetch false
Disable colorized output
::
git config --global gitgoggles.colors false
Alter the symbols used to display success, failure, unknown states
::
git config --global gitgoggles.icons.success "OK"
git config --global gitgoggles.icons.failure "FAIL"
git config --global gitgoggles.icons.unknown "N/A"
Alter the colors of branch states. The available colors are [grey, red, green, yellow, blue, magenta, cyan, white]
::
git config --global gitgoggles.colors.local cyan
git config --global gitgoggles.colors.new red
git config --global gitgoggles.colors.review red
git config --global gitgoggles.colors.merge yellow
git config --global gitgoggles.colors.done green
Alter the width of branch column to turn on wordwrap.
::
git config --global gitgoggles.table.branch-width 15
Alter the table cell padding (defaults to 0)
::
git config --global gitgoggles.table.left-padding 1
git config --global gitgoggles.table.right-padding 1
Alter the display of horizontal rule between rows of table (default false)
::
git config --global gitgoggles.table.horizontal-rule true
Internals
=========
git-goggles works by creating and managing special tags called
'codereview-<branch_name>' and tracking them against HEAD.
The first time a codereview is completed, the tag is created. Subsequent
reviews delete and re-create the tag so that it awlays accurately tracks HEAD. | PypiClean |
/waveshare_epaper-1.2.0.tar.gz/waveshare_epaper-1.2.0/epaper/e-Paper/RaspberryPi_JetsonNano/python/lib/waveshare_epd/epd2in9b_V3.py |
import logging
from . import epdconfig
# Display resolution
EPD_WIDTH = 128
EPD_HEIGHT = 296
logger = logging.getLogger(__name__)
class EPD:
def __init__(self):
self.reset_pin = epdconfig.RST_PIN
self.dc_pin = epdconfig.DC_PIN
self.busy_pin = epdconfig.BUSY_PIN
self.cs_pin = epdconfig.CS_PIN
self.width = EPD_WIDTH
self.height = EPD_HEIGHT
# Hardware reset
def reset(self):
epdconfig.digital_write(self.reset_pin, 1)
epdconfig.delay_ms(200)
epdconfig.digital_write(self.reset_pin, 0)
epdconfig.delay_ms(2)
epdconfig.digital_write(self.reset_pin, 1)
epdconfig.delay_ms(200)
def send_command(self, command):
epdconfig.digital_write(self.dc_pin, 0)
epdconfig.digital_write(self.cs_pin, 0)
epdconfig.spi_writebyte([command])
epdconfig.digital_write(self.cs_pin, 1)
def send_data(self, data):
epdconfig.digital_write(self.dc_pin, 1)
epdconfig.digital_write(self.cs_pin, 0)
epdconfig.spi_writebyte([data])
epdconfig.digital_write(self.cs_pin, 1)
# send a lot of data
def send_data2(self, data):
epdconfig.digital_write(self.dc_pin, 1)
epdconfig.digital_write(self.cs_pin, 0)
epdconfig.spi_writebyte2(data)
epdconfig.digital_write(self.cs_pin, 1)
def ReadBusy(self):
logger.debug("e-Paper busy")
self.send_command(0X71)
while(epdconfig.digital_read(self.busy_pin) == 0): # 0: idle, 1: busy
self.send_command(0X71)
epdconfig.delay_ms(200)
logger.debug("e-Paper busy release")
def init(self):
if (epdconfig.module_init() != 0):
return -1
# EPD hardware init start
self.reset()
self.send_command(0x04)
self.ReadBusy()#waiting for the electronic paper IC to release the idle signal
self.send_command(0x00) #panel setting
self.send_data(0x0f) #LUT from OTP,128x296
self.send_data(0x89) #Temperature sensor, boost and other related timing settings
self.send_command(0x61) #resolution setting
self.send_data (0x80)
self.send_data (0x01)
self.send_data (0x28)
self.send_command(0X50) #VCOM AND DATA INTERVAL SETTING
self.send_data(0x77) #WBmode:VBDF 17|D7 VBDW 97 VBDB 57
# WBRmode:VBDF F7 VBDW 77 VBDB 37 VBDR B7
return 0
def getbuffer(self, image):
# logger.debug("bufsiz = ",int(self.width/8) * self.height)
buf = [0xFF] * (int(self.width/8) * self.height)
image_monocolor = image.convert('1')
imwidth, imheight = image_monocolor.size
pixels = image_monocolor.load()
# logger.debug("imwidth = %d, imheight = %d",imwidth,imheight)
if(imwidth == self.width and imheight == self.height):
logger.debug("Vertical")
for y in range(imheight):
for x in range(imwidth):
# Set the bits for the column of pixels at the current position.
if pixels[x, y] == 0:
buf[int((x + y * self.width) / 8)] &= ~(0x80 >> (x % 8))
elif(imwidth == self.height and imheight == self.width):
logger.debug("Horizontal")
for y in range(imheight):
for x in range(imwidth):
newx = y
newy = self.height - x - 1
if pixels[x, y] == 0:
buf[int((newx + newy*self.width) / 8)] &= ~(0x80 >> (y % 8))
return buf
def display(self, blackimage, ryimage): # ryimage: red or yellow image
if (blackimage != None):
self.send_command(0X10)
self.send_data2(blackimage)
if (ryimage != None):
self.send_command(0X13)
self.send_data2(ryimage)
self.send_command(0x12)
epdconfig.delay_ms(200)
self.ReadBusy()
def Clear(self):
self.send_command(0X10)
self.send_data2([0xff] * int(self.width * self.height / 8))
self.send_command(0X13)
self.send_data2([0xff] * int(self.width * self.height / 8))
self.send_command(0x12)
epdconfig.delay_ms(200)
self.ReadBusy()
def sleep(self):
self.send_command(0X02) # power off
self.ReadBusy()
self.send_command(0X07) # deep sleep
self.send_data(0xA5)
epdconfig.delay_ms(2000)
epdconfig.module_exit()
### END OF FILE ### | PypiClean |
/wts_nerdler-1.1.0-py3-none-any.whl/wts_nerdler/windows_task_scheduler.py | import os
import subprocess
import csv
from datetime import datetime
import calendar
TASK_CODECS_HEX = {
"0x00000000": "The operation completed successfully.",
"0x00000001": "Incorrect function called or unknown function called.",
"0x00000002": "File not found.",
"0x00000010": "The environment is incorrect.",
"0x00041300": "Task is ready to run at its next scheduled time.",
"0x00041301": "The task is currently running.",
"0x00041302": "The task has been disabled.",
"0x00041303": "The task has not yet run.",
"0x00041304": "There are no more runs scheduled for this task.",
"0x00041305": "One or more of the properties that are needed to run this task have not been set.",
"0x00041306": "The last run of the task was terminated by the user.",
"0x00041307": "Either the task has no triggers or the existing triggers are disabled or not set.",
"0x00041308": "Event triggers do not have set run times.",
"0x80010002": "Call was canceled by the message filter",
"0x80041309": "A task's trigger is not found.",
"0x8004130A": "One or more of the properties required to run this task have not been set.",
"0x8004130B": "There is no running instance of the task.",
"0x8004130C": "The Task Scheduler service is not installed on this computer.",
"0x8004130D": "The task object could not be opened.",
"0x8004130E": "The object is either an invalid task object or is not a task object.",
"0x8004130F": "No account information could be found in the Task Scheduler security database for the task indicated.",
"0x80041310": "Unable to establish existence of the account specified.",
"0x80041311": "Corruption was detected in the Task Scheduler security database",
"0x80041312": "Task Scheduler security services are available only on Windows NT.",
"0x80041313": "The task object version is either unsupported or invalid.",
"0x80041314": "The task has been configured with an unsupported combination of account settings and run time options.",
"0x80041315": "The Task Scheduler Service is not running.",
"0x80041316": "The task XML contains an unexpected node.",
"0x80041317": "The task XML contains an element or attribute from an unexpected namespace.",
"0x80041318": "The task XML contains a value which is incorrectly formatted or out of range.",
"0x80041319": "The task XML is missing a required element or attribute.",
"0x8004131A": "The task XML is malformed.",
"0x0004131B": "The task is registered, but not all specified triggers will start the task.",
"0x0004131C": "The task is registered, but may fail to start. Batch logon privilege needs to be enabled for the task principal.",
"0x8004131D": "The task XML contains too many nodes of the same type.",
"0x8004131E": "The task cannot be started after the trigger end boundary.",
"0x8004131F": "An instance of this task is already running.",
"0x80041320": "The task will not run because the user is not logged on.",
"0x80041321": "The task image is corrupt or has been tampered with.",
"0x80041322": "The Task Scheduler service is not available.",
"0x80041323": "The Task Scheduler service is too busy to handle your request. Please try again later.",
"0x80041324": "The Task Scheduler service attempted to run the task, but the task did not run due to one of the constraints in the task definition.",
"0x00041325": "The Task Scheduler service has asked the task to run.",
"0x80041326": "The task is disabled.",
"0x80041327": "The task has properties that are not compatible with earlier versions of Windows.",
"0x80041328": "The task settings do not allow the task to start on demand.",
"0xC000013A": "The application terminated as a result of a CTRL+C.",
"0xC0000142": "The application failed to initialize properly."
}
class windows_task_scheduler():
def __init__(self,task_root_path:str = "\\",temp_path_root:str=f"{os.getcwd()}\\temp\\"):
'''
'task_root_path': can't be empty and must end with double backslash
'''
self.task_root_path = task_root_path
self.temp_path_root = temp_path_root
'''
Create the path if not exists
'''
if not os.path.exists(self.temp_path_root):
os.mkdir(self.temp_path_root)
def get_status_desc(self,hexcode:str)->str:
try:
desc = TASK_CODECS_HEX[hexcode]
return desc
except KeyError:
return ""
def run_command(self,cmd):
completed = subprocess.run(["powershell", "-Command", cmd], capture_output=True)
return completed
def get_now_timestamp(self)->dict:
d = datetime.utcnow()
unixtime = calendar.timegm(d.utctimetuple())
return {"int": unixtime, "string": str(unixtime)}
def get_task_scheduler_status(self)->list:
'''
Returns all states of scheduled tasks of windows
'''
path = f"'{self.task_root_path}\\*'"
csv_temp = f"{self.temp_path_root}{self.get_now_timestamp()['string']}_tasks.csv"
csv_temp_command = csv_temp.replace("\\","/")
command = f'powershell -command "Get-ScheduledTask -TaskPath {path} | Select-Object TaskName, State, '+"@{Name='LastRunTime'; Expression={(Get-ScheduledTaskInfo $_).LastRunTime}}, @{Name='LastTaskResult'; Expression={(Get-ScheduledTaskInfo $_).lastTaskResult}}, @{Name='LastTaskResultHex'; Expression={'0x{0:X8}' -f (Get-ScheduledTaskInfo $_).lastTaskResult}}, @{Name='NextRunTime'; Expression={(Get-ScheduledTaskInfo $_).NextRunTime}}"+f" | Export-Csv -Path '{csv_temp_command}' -NoTypeInformation -Delimiter ';'"+'"'
subprocess.call(command, shell=True)
last_check = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
data = []
with open(csv_temp, "r", encoding="utf-8") as csvfile:
csv_data = csv.reader(csvfile,delimiter=";")
next(csv_data)
for d in csv_data:
alert = True
if d[4] == "0x00000000":
alert = False
data.append({'task_path': self.task_root_path, 'task_full_path': self.task_root_path+"\\"+d[0],'task_name': d[0], 'state': d[1], 'last_runtime': d[2], 'last_task_result': d[3], 'last_task_result_hex': d[4], 'next_run_time': d[5], 'active_alert': alert, 'result_description': self.get_status_desc(d[4]), 'last_check': last_check})
os.remove(csv_temp)
return data
def enable_task(self,task_path:str):
'''
Enables task in task scheduler
task_path = full path of the task
'''
path = f"'{task_path}'"
command = f'powershell -command "Get-ScheduledTask -TaskPath {path} |enable-ScheduledTask"'
subprocess.call(command, shell=True)
def disable_task(self,task_path:str):
'''
Disable task in task scheduler
task_path = full path of the task
'''
path = f"'{task_path}'"
command = f'powershell -command "Get-ScheduledTask -TaskPath {path} |disable-ScheduledTask"'
subprocess.call(command, shell=True)
def start_task(self,task_path:str):
'''
Enables task in task scheduler
task_path = full path of the task
'''
path = f"'{task_path}'"
command = f'powershell -command "Get-ScheduledTask -TaskPath {path} |start-ScheduledTask"'
subprocess.call(command, shell=True)
def stop_task(self,task_path:str):
'''
Stop task in task scheduler
task_path = full path of the task
'''
path = f"'{task_path}'"
command = f'powershell -command "Get-ScheduledTask -TaskPath {path} |stop-ScheduledTask"'
subprocess.call(command, shell=True)
def restart_task(self,task_path:str):
'''
Restart task in task scheduler
task_path = full path of the task
'''
self.stop_task(task_path)
self.start_task(task_path) | PypiClean |
/opps-piston-0.2.4.tar.gz/opps-piston-0.2.4/piston/store.py | import oauth
from models import Nonce, Token, Consumer
from models import generate_random, VERIFIER_SIZE
class DataStore(oauth.OAuthDataStore):
"""Layer between Python OAuth and Django database."""
def __init__(self, oauth_request):
self.signature = oauth_request.parameters.get('oauth_signature', None)
self.timestamp = oauth_request.parameters.get('oauth_timestamp', None)
self.scope = oauth_request.parameters.get('scope', 'all')
def lookup_consumer(self, key):
try:
self.consumer = Consumer.objects.get(key=key)
return self.consumer
except Consumer.DoesNotExist:
return None
def lookup_token(self, token_type, token):
if token_type == 'request':
token_type = Token.REQUEST
elif token_type == 'access':
token_type = Token.ACCESS
try:
self.request_token = Token.objects.get(key=token,
token_type=token_type)
return self.request_token
except Token.DoesNotExist:
return None
def lookup_nonce(self, oauth_consumer, oauth_token, nonce):
if oauth_token is None:
return None
nonce, created = Nonce.objects.get_or_create(consumer_key=oauth_consumer.key,
token_key=oauth_token.key,
key=nonce)
if created:
return None
else:
return nonce.key
def fetch_request_token(self, oauth_consumer, oauth_callback):
if oauth_consumer.key == self.consumer.key:
self.request_token = Token.objects.create_token(consumer=self.consumer,
token_type=Token.REQUEST,
timestamp=self.timestamp)
if oauth_callback:
self.request_token.set_callback(oauth_callback)
return self.request_token
return None
def fetch_access_token(self, oauth_consumer, oauth_token, oauth_verifier):
if oauth_consumer.key == self.consumer.key \
and oauth_token.key == self.request_token.key \
and oauth_verifier == self.request_token.verifier \
and self.request_token.is_approved:
self.access_token = Token.objects.create_token(consumer=self.consumer,
token_type=Token.ACCESS,
timestamp=self.timestamp,
user=self.request_token.user)
return self.access_token
return None
def authorize_request_token(self, oauth_token, user):
if oauth_token.key == self.request_token.key:
# authorize the request token in the store
self.request_token.is_approved = True
self.request_token.user = user
self.request_token.verifier = generate_random(VERIFIER_SIZE)
self.request_token.save()
return self.request_token
return None | PypiClean |
/MapProxy-1.16.0.tar.gz/MapProxy-1.16.0/mapproxy/script/defrag.py |
from __future__ import print_function
import glob
import optparse
import os.path
import re
import sys
from collections import OrderedDict
from mapproxy.cache.compact import CompactCacheV1, CompactCacheV2
from mapproxy.cache.tile import Tile
from mapproxy.config import local_base_config
from mapproxy.config.loader import load_configuration, ConfigurationError
import logging
log = logging.getLogger('mapproxy.defrag')
def defrag_command(args=None):
parser = optparse.OptionParser("%prog defrag-compact [options] -f mapproxy_conf")
parser.add_option("-f", "--mapproxy-conf", dest="mapproxy_conf",
help="MapProxy configuration.")
parser.add_option("--min-percent", type=float, default=10.0,
help="Only defrag if fragmentation is larger (10 means at least 10% of the file does not have to be used)")
parser.add_option("--min-mb", type=float, default=1.0,
help="Only defrag if fragmentation is larger (2 means at least 2MB the file does not have to be used)")
parser.add_option("--dry-run", "-n", action="store_true",
help="Do not de-fragment, only print output")
parser.add_option("--caches", dest="cache_names", metavar='cache1,cache2,...',
help="only defragment the named caches")
from mapproxy.script.util import setup_logging
import logging
setup_logging(logging.INFO, format="[%(asctime)s] %(msg)s")
if args:
args = args[1:] # remove script name
(options, args) = parser.parse_args(args)
if not options.mapproxy_conf:
parser.print_help()
sys.exit(1)
try:
proxy_configuration = load_configuration(options.mapproxy_conf)
except IOError as e:
print('ERROR: ', "%s: '%s'" % (e.strerror, e.filename), file=sys.stderr)
sys.exit(2)
except ConfigurationError as e:
print(e, file=sys.stderr)
print('ERROR: invalid configuration (see above)', file=sys.stderr)
sys.exit(2)
with local_base_config(proxy_configuration.base_config):
available_caches = OrderedDict()
for name, cache_conf in proxy_configuration.caches.items():
for grid, extent, tile_mgr in cache_conf.caches():
if isinstance(tile_mgr.cache, (CompactCacheV1, CompactCacheV2)):
available_caches.setdefault(name, []).append(tile_mgr.cache)
if options.cache_names:
defrag_caches = options.cache_names.split(',')
missing = set(defrag_caches).difference(available_caches.keys())
if missing:
print('unknown caches: %s' % (', '.join(missing), ))
print('available compact caches: %s' %
(', '.join(available_caches.keys()), ))
sys.exit(1)
else:
defrag_caches = None
for name, caches in available_caches.items():
if defrag_caches and name not in defrag_caches:
continue
for cache in caches:
logger = DefragLog(name)
defrag_compact_cache(cache,
min_percent=options.min_percent/100,
min_bytes=options.min_mb*1024*1024,
dry_run=options.dry_run,
log_progress=logger,
)
def bundle_offset(fname):
"""
>>> bundle_offset("path/to/R0000C0000.bundle")
(0, 0)
>>> bundle_offset("path/to/R0380C1380.bundle")
(4992, 896)
"""
match = re.search(r'R([A-F0-9]{4,})C([A-F0-9]{4,}).bundle$', fname, re.IGNORECASE)
if match:
r = int(match.group(1), 16)
c = int(match.group(2), 16)
return c, r
class DefragLog(object):
def __init__(self, cache_name):
self.cache_name = cache_name
def log(self, fname, fragmentation, fragmentation_bytes, num, total, defrag):
msg = "%s: %3d/%d (%s) fragmentation is %.1f%% (%dkb)" % (
self.cache_name, num, total, fname, fragmentation, fragmentation_bytes/1024
)
if defrag:
msg += " - defragmenting"
else:
msg += " - skipping"
log.info(msg)
def defrag_compact_cache(cache, min_percent=0.1, min_bytes=1024*1024, log_progress=None, dry_run=False):
bundles = glob.glob(os.path.join(cache.cache_dir, 'L??', 'R????C????.bundle'))
for i, bundle_file in enumerate(bundles):
offset = bundle_offset(bundle_file)
b = cache.bundle_class(bundle_file.rstrip('.bundle'), offset)
size, file_size = b.size()
defrag = 1 - float(size) / file_size
defrag_bytes = file_size - size
skip = False
if defrag < min_percent or defrag_bytes < min_bytes:
skip = True
if log_progress:
log_progress.log(
fname=bundle_file,
fragmentation=defrag * 100,
fragmentation_bytes=defrag_bytes,
num=i+1, total=len(bundles),
defrag=not skip,
)
if skip or dry_run:
continue
tmp_bundle = os.path.join(cache.cache_dir, 'tmp_defrag')
defb = cache.bundle_class(tmp_bundle, offset)
stored_tiles = False
for y in range(128):
tiles = [Tile((x, y, 0)) for x in range(128)]
b.load_tiles(tiles)
tiles = [t for t in tiles if t.source]
if tiles:
stored_tiles = True
defb.store_tiles(tiles)
# remove first
# - in case bundle is empty
# - windows does not support rename to existing files
if os.path.exists(bundle_file):
os.remove(bundle_file)
if os.path.exists(bundle_file[:-1] + 'x'):
os.remove(bundle_file[:-1] + 'x')
if stored_tiles:
os.rename(tmp_bundle + '.bundle', bundle_file)
if os.path.exists(tmp_bundle + '.bundlx'):
os.rename(tmp_bundle + '.bundlx', bundle_file[:-1] + 'x')
if os.path.exists(tmp_bundle + '.lck'):
os.unlink(tmp_bundle + '.lck') | PypiClean |
/lytekit-0.15.3.tar.gz/lytekit-0.15.3/flytekit/models/literals.py | from datetime import datetime as _datetime
from typing import List, Optional
import pytz as _pytz
from flyteidl.core import literals_pb2 as _literals_pb2
from google.protobuf.struct_pb2 import Struct
from flytekit.exceptions import user as _user_exceptions
from flytekit.models import common as _common
from flytekit.models.core import types as _core_types
from flytekit.models.types import LiteralType as _LiteralType
from flytekit.models.types import OutputReference as _OutputReference
class RetryStrategy(_common.FlyteIdlEntity):
def __init__(self, retries):
"""
:param int retries: Number of retries to attempt on recoverable failures. If retries is 0, then
only one attempt will be made.
"""
self._retries = retries
@property
def retries(self):
"""
Number of retries to attempt on recoverable failures. If retries is 0, then only one attempt will be made.
:rtype: int
"""
return self._retries
def to_flyte_idl(self):
"""
:rtype: flyteidl.core.literals_pb2.RetryStrategy
"""
return _literals_pb2.RetryStrategy(retries=self.retries)
@classmethod
def from_flyte_idl(cls, pb2_object):
"""
:param flyteidl.core.literals_pb2.RetryStrategy pb2_object:
:rtype: RetryStrategy
"""
return cls(retries=pb2_object.retries)
class Primitive(_common.FlyteIdlEntity):
def __init__(
self,
integer=None,
float_value=None,
string_value=None,
boolean=None,
datetime=None,
duration=None,
):
"""
This object proxies the primitives supported by the Flyte IDL system. Only one value can be set.
:param int integer: [Optional]
:param float float_value: [Optional]
:param Text string_value: [Optional]
:param bool boolean: [Optional]
:param datetime.timestamp datetime: [Optional]
:param datetime.timedelta duration: [Optional]
"""
self._integer = integer
self._float_value = float_value
self._string_value = string_value
self._boolean = boolean
if datetime is None:
self._datetime = None
elif isinstance(datetime, _datetime):
self._datetime = datetime
else: # TODO Check for timestamp type?
self._datetime = _datetime.utcfromtimestamp(datetime.seconds)
self._duration = duration
@property
def integer(self):
"""
:rtype: int
"""
return self._integer
@property
def float_value(self):
"""
:rtype: float
"""
return self._float_value
@property
def string_value(self):
"""
:rtype: Text
"""
return self._string_value
@property
def boolean(self):
"""
:rtype: bool
"""
return self._boolean
@property
def datetime(self):
"""
:rtype: datetime.datetime
"""
if self._datetime is None or self._datetime.tzinfo is not None:
return self._datetime
return self._datetime.replace(tzinfo=_pytz.UTC)
@property
def duration(self):
"""
:rtype: datetime.timedelta
"""
return self._duration
@property
def value(self):
"""
This returns whichever field is set.
:rtype: T
"""
for value in [
self.integer,
self.float_value,
self.string_value,
self.boolean,
self.datetime,
self.duration,
]:
if value is not None:
return value
def to_flyte_idl(self):
"""
:rtype: flyteidl.core.literals_pb2.Primitive
"""
primitive = _literals_pb2.Primitive(
integer=self.integer,
float_value=self.float_value,
string_value=self.string_value,
boolean=self.boolean,
)
if self.datetime is not None:
# Convert to UTC and remove timezone so protobuf behaves.
primitive.datetime.FromDatetime(self.datetime.astimezone(_pytz.UTC).replace(tzinfo=None))
if self.duration is not None:
primitive.duration.FromTimedelta(self.duration)
return primitive
@classmethod
def from_flyte_idl(cls, proto):
"""
:param flyteidl.core.literals_pb2.Primitive proto:
:rtype: Primitive
"""
return cls(
integer=proto.integer if proto.HasField("integer") else None,
float_value=proto.float_value if proto.HasField("float_value") else None,
string_value=proto.string_value if proto.HasField("string_value") else None,
boolean=proto.boolean if proto.HasField("boolean") else None,
datetime=proto.datetime.ToDatetime().replace(tzinfo=_pytz.UTC) if proto.HasField("datetime") else None,
duration=proto.duration.ToTimedelta() if proto.HasField("duration") else None,
)
class Binary(_common.FlyteIdlEntity):
def __init__(self, value, tag):
"""
:param bytes value:
:param Text tag:
"""
self._value = value
self._tag = tag
@property
def value(self):
"""
:rtype: bytes
"""
return self._value
@property
def tag(self):
"""
:rtype: Text
"""
return self._tag
def to_flyte_idl(self):
"""
:rtype: flyteidl.core.literals_pb2.Binary
"""
return _literals_pb2.Binary(value=self.value, tag=self.tag)
@classmethod
def from_flyte_idl(cls, pb2_object):
"""
:param flyteidl.core.literals_pb2.Binary pb2_object:
:rtype: Binary
"""
return cls(value=pb2_object.value, tag=pb2_object.tag)
class BlobMetadata(_common.FlyteIdlEntity):
"""
This is metadata for the Blob literal.
"""
def __init__(self, type):
"""
:param flytekit.models.core.types.BlobType type: The type of the underlying blob
"""
self._type = type
@property
def type(self):
"""
:rtype: flytekit.models.core.types.BlobType
"""
return self._type
def to_flyte_idl(self):
"""
:rtype: flyteidl.core.literals_pb2.BlobMetadata
"""
return _literals_pb2.BlobMetadata(type=self.type.to_flyte_idl())
@classmethod
def from_flyte_idl(cls, proto):
"""
:param flyteidl.core.literals_pb2.BlobMetadata proto:
:rtype: BlobMetadata
"""
return cls(type=_core_types.BlobType.from_flyte_idl(proto.type))
class Blob(_common.FlyteIdlEntity):
def __init__(self, metadata, uri):
"""
This literal model is used to represent binary data offloaded to some storage location which is
identifiable with a unique string. See :py:class:`flytekit.FlyteFile` as an example.
:param BlobMetadata metadata:
:param Text uri: The location of this blob
"""
self._metadata = metadata
self._uri = uri
@property
def uri(self):
"""
:rtype: Text
"""
return self._uri
@property
def metadata(self):
"""
:rtype: BlobMetadata
"""
return self._metadata
def to_flyte_idl(self):
"""
:rtype: flyteidl.core.literals_pb2.Blob
"""
return _literals_pb2.Blob(metadata=self.metadata.to_flyte_idl(), uri=self.uri)
@classmethod
def from_flyte_idl(cls, proto):
"""
:param flyteidl.core.literals_pb2.Blob proto:
:rtype: Blob
"""
return cls(metadata=BlobMetadata.from_flyte_idl(proto.metadata), uri=proto.uri)
class Void(_common.FlyteIdlEntity):
def to_flyte_idl(self):
"""
:rtype: flyteidl.core.literals_pb2.Void
"""
return _literals_pb2.Void()
@classmethod
def from_flyte_idl(cls, proto):
"""
:param flyteidl.core.literals_pb2.Void proto:
:rtype: Void
"""
return cls()
class BindingDataMap(_common.FlyteIdlEntity):
def __init__(self, bindings):
"""
A map of BindingData items. Can be a recursive structure
:param dict[string, BindingData] bindings: Map of strings to Bindings
"""
self._bindings = bindings
@property
def bindings(self):
"""
Map of strings to Bindings
:rtype: dict[string, BindingData]
"""
return self._bindings
def to_flyte_idl(self):
"""
:rtype: flyteidl.core.literals_pb2.BindingDataMap
"""
return _literals_pb2.BindingDataMap(bindings={k: v.to_flyte_idl() for (k, v) in self.bindings.items()})
@classmethod
def from_flyte_idl(cls, pb2_object):
"""
:param flyteidl.core.literals_pb2.BindingDataMap pb2_object:
:rtype: flytekit.models.literals.BindingDataMap
"""
return cls({k: BindingData.from_flyte_idl(v) for (k, v) in pb2_object.bindings.items()})
class BindingDataCollection(_common.FlyteIdlEntity):
def __init__(self, bindings):
"""
A list of BindingData items.
:param list[BindingData] bindings:
"""
self._bindings = bindings
@property
def bindings(self):
"""
:rtype: list[BindingData]
"""
return self._bindings
def to_flyte_idl(self):
"""
:rtype: flyteidl.core.literals_pb2.BindingDataCollection
"""
return _literals_pb2.BindingDataCollection(bindings=[b.to_flyte_idl() for b in self.bindings])
@classmethod
def from_flyte_idl(cls, pb2_object):
"""
:param flyteidl.core.literals_pb2.BindingDataCollection pb2_object:
:rtype: flytekit.models.literals.BindingDataCollection
"""
return cls([BindingData.from_flyte_idl(b) for b in pb2_object.bindings])
class BindingDataRecordField(_common.FlyteIdlEntity):
def __init__(self, key: str, binding: "BindingData"):
self._key = key
self._binding = binding
@property
def key(self):
return self._key
@property
def binding(self):
return self._binding
def to_flyte_idl(self):
return _literals_pb2.BindingDataRecordField(key=self.key, binding=self.binding)
@classmethod
def from_flyte_idl(cls, pb2_object):
return cls(key=pb2_object.key, binding=BindingData.from_flyte_idl(pb2_object.binding))
class BindingDataRecord(_common.FlyteIdlEntity):
def __init__(self, fields: List[BindingDataRecordField]):
self._fields = fields
@property
def fields(self):
return self._fields
def to_flyte_idl(self):
return _literals_pb2.BindingDataRecord(fields=self.fields)
@classmethod
def from_flyte_idl(cls, pb2_object):
return cls(fields=[BindingDataRecordField.from_flyte_idl(x) for x in pb2_object.fields])
class BindingData(_common.FlyteIdlEntity):
def __init__(self, scalar=None, collection=None, record=None, promise=None, map=None):
"""
Specifies either a simple value or a reference to another output. Only one of the input arguments may be
specified.
:param Scalar scalar: [Optional] A simple scalar value.
:param BindingDataCollection collection: [Optional] A collection of binding data. This allows nesting of
binding data to any number of levels.
:param flytekit.models.types.OutputReference promise: [Optional] References an output promised by another node.
:param BindingDataMap map: [Optional] A map of bindings. The key is always a string.
"""
self._scalar = scalar
self._collection = collection
self._record = record
self._promise = promise
self._map = map
@property
def scalar(self):
"""
A simple scalar value.
:rtype: Scalar
"""
return self._scalar
@property
def collection(self):
"""
[Optional] A collection of binding data. This allows nesting of binding data to any number of levels.
:rtype: BindingDataCollection
"""
return self._collection
@property
def record(self):
return self._record
@property
def promise(self):
"""
[Optional] References an output promised by another node.
:rtype: flytekit.models.types.OutputReference
"""
return self._promise
@property
def map(self):
"""
[Optional] A map of bindings. The key is always a string.
:rtype: BindingDataMap
"""
return self._map
@property
def value(self):
"""
Returns whichever value is set
:rtype: T
"""
return self.scalar or self.collection or self.promise or self.map
def to_flyte_idl(self):
"""
:rtype: flyteidl.core.literals_pb2.BindingData
"""
return _literals_pb2.BindingData(
scalar=self.scalar.to_flyte_idl() if self.scalar is not None else None,
collection=self.collection.to_flyte_idl() if self.collection is not None else None,
record=self.record.to_flyte_idl() if self.record is not None else None,
promise=self.promise.to_flyte_idl() if self.promise is not None else None,
map=self.map.to_flyte_idl() if self.map is not None else None,
)
@classmethod
def from_flyte_idl(cls, pb2_object):
"""
:param flyteidl.core.literals_pb2.BindingData pb2_object:
:return: BindingData
"""
return cls(
scalar=Scalar.from_flyte_idl(pb2_object.scalar) if pb2_object.HasField("scalar") else None,
collection=BindingDataCollection.from_flyte_idl(pb2_object.collection)
if pb2_object.HasField("collection")
else None,
record=BindingDataRecord.from_flyte_idl(pb2_object.record) if pb2_object.HasField("record") else None,
promise=_OutputReference.from_flyte_idl(pb2_object.promise) if pb2_object.HasField("promise") else None,
map=BindingDataMap.from_flyte_idl(pb2_object.map) if pb2_object.HasField("map") else None,
)
def to_literal_model(self):
"""
Converts current binding data into a Literal asserting that there are no promises in the bindings.
:rtype: Literal
"""
if self.promise:
raise _user_exceptions.FlyteValueException(
self.promise,
"Cannot convert BindingData to a Literal because it has a promise.",
)
elif self.scalar:
return Literal(scalar=self.scalar)
elif self.collection:
return Literal(
collection=LiteralCollection(
literals=[binding.to_literal_model() for binding in self.collection.bindings]
)
)
elif self.map:
return Literal(map=LiteralMap(literals={k: binding.to_literal_model() for k, binding in self.map.bindings}))
elif self.record:
return Literal(
record=Record(fields=[RecordField(f.key, f.binding.to_literal_model()) for f in self.record.fields])
)
class Binding(_common.FlyteIdlEntity):
def __init__(self, var, binding):
"""
An input/output binding of a variable to either static value or a node output.
:param Text var: A variable name, must match an input or output variable of the node.
:param BindingData binding: Data to use to bind this variable.
"""
self._var = var
self._binding = binding
@property
def var(self):
"""
A variable name, must match an input or output variable of the node.
:rtype: Text
"""
return self._var
@property
def binding(self):
"""
Data to use to bind this variable.
:rtype: BindingData
"""
return self._binding
def to_flyte_idl(self):
"""
:rtype: flyteidl.core.literals_pb2.Binding
"""
return _literals_pb2.Binding(var=self.var, binding=self.binding.to_flyte_idl())
@classmethod
def from_flyte_idl(cls, pb2_object):
"""
:param flyteidl.core.literals_pb2.Binding pb2_object:
:rtype: flytekit.core.models.literals.Binding
"""
return cls(pb2_object.var, BindingData.from_flyte_idl(pb2_object.binding))
class Union(_common.FlyteIdlEntity):
def __init__(self, value, stored_type):
"""
The runtime representation of a tagged union value. See `UnionType` for more details.
:param flytekit.models.literals.Literal value:
:param flytekit.models.types.LiteralType stored_type:
"""
self._value = value
self._type = stored_type
@property
def value(self):
"""
:rtype: flytekit.models.literals.Literal
"""
return self._value
@property
def stored_type(self):
"""
:rtype: flytekit.models.types.LiteralType
"""
return self._type
def to_flyte_idl(self):
"""
:rtype: flyteidl.core.literals_pb2.Union
"""
return _literals_pb2.Union(value=self.value.to_flyte_idl(), type=self._type.to_flyte_idl())
@classmethod
def from_flyte_idl(cls, pb2_object):
"""
:param flyteidl.core.literals_pb2.Schema pb2_object:
:rtype: Schema
"""
return cls(
value=Literal.from_flyte_idl(pb2_object.value), stored_type=_LiteralType.from_flyte_idl(pb2_object.type)
)
class RecordField(_common.FlyteIdlEntity):
def __init__(self, key: str, value: "Literal"):
self._key = key
self._value = value
@property
def key(self):
return self._key
@property
def value(self):
return self._value
def to_flyte_idl(self) -> _literals_pb2.RecordField:
return _literals_pb2.RecordField(key=self._key, value=self._value.to_flyte_idl())
@classmethod
def from_flyte_idl(cls, proto: _literals_pb2.RecordField):
return cls(key=proto.key, value=Literal.from_flyte_idl(proto.value))
class Record(_common.FlyteIdlEntity):
def __init__(self, fields: List[RecordField]):
self._fields = fields
@property
def fields(self):
return self._fields
def to_flyte_idl(self) -> _literals_pb2.Record:
return _literals_pb2.Record(fields=[x.to_flyte_idl() for x in self.fields])
@classmethod
def from_flyte_idl(cls, proto: _literals_pb2.Record):
return cls(fields=[RecordField.from_flyte_idl(x) for x in proto.fields])
class LiteralCollection(_common.FlyteIdlEntity):
def __init__(self, literals):
"""
:param list[Literal] literals: underlying list of literals in this collection.
"""
self._literals = literals
@property
def literals(self):
"""
:rtype: list[Literal]
"""
return self._literals
def to_flyte_idl(self):
"""
:rtype: flyteidl.core.literals_pb2.LiteralCollection
"""
return _literals_pb2.LiteralCollection(literals=[l.to_flyte_idl() for l in self.literals])
@classmethod
def from_flyte_idl(cls, pb2_object):
"""
:param flyteidl.core.literals_pb2.LiteralCollection pb2_object:
:rtype: LiteralCollection
"""
return cls([Literal.from_flyte_idl(l) for l in pb2_object.literals])
class LiteralMap(_common.FlyteIdlEntity):
def __init__(self, literals):
"""
:param dict[Text, Literal] literals: A dictionary mapping Text key names to Literal objects.
"""
self._literals = literals
@property
def literals(self):
"""
A dictionary mapping Text key names to Literal objects.
:rtype: dict[Text, Literal]
"""
return self._literals
def to_flyte_idl(self):
"""
:rtype: flyteidl.core.literals_pb2.LiteralMap
"""
return _literals_pb2.LiteralMap(literals={k: v.to_flyte_idl() for k, v in self.literals.items()})
@classmethod
def from_flyte_idl(cls, pb2_object):
"""
:param flyteidl.core.literals_pb2.LiteralMap pb2_object:
:rtype: LiteralMap
"""
return cls({k: Literal.from_flyte_idl(v) for k, v in pb2_object.literals.items()})
class Scalar(_common.FlyteIdlEntity):
def __init__(
self,
primitive: Primitive = None,
blob: Blob = None,
binary: Binary = None,
union: Union = None,
none_type: Void = None,
error=None,
generic: Struct = None,
):
"""
Scalar wrapper around Flyte types. Only one can be specified.
:param Primitive primitive:
:param Blob blob:
:param Binary binary:
:param Schema schema:
:param Void none_type:
:param error:
:param google.protobuf.struct_pb2.Struct generic:
"""
self._primitive = primitive
self._blob = blob
self._binary = binary
self._union = union
self._none_type = none_type
self._error = error
self._generic = generic
@property
def primitive(self):
"""
:rtype: Primitive
"""
return self._primitive
@property
def blob(self):
"""
:rtype: Blob
"""
return self._blob
@property
def binary(self):
"""
:rtype: Binary
"""
return self._binary
@property
def union(self):
"""
:rtype: Union
"""
return self._union
@property
def none_type(self):
"""
:rtype: Void
"""
return self._none_type
@property
def error(self):
"""
:rtype: TODO
"""
return self._error
@property
def generic(self):
"""
:rtype: google.protobuf.struct_pb2.Struct
"""
return self._generic
@property
def value(self):
"""
Returns whichever value is set
:rtype: T
"""
return (
self.primitive
or self.blob
or self.binary
or self.union
or self.none_type
or self.error
or self.generic
)
def to_flyte_idl(self):
"""
:rtype: flyteidl.core.literals_pb2.Scalar
"""
return _literals_pb2.Scalar(
primitive=self.primitive.to_flyte_idl() if self.primitive is not None else None,
blob=self.blob.to_flyte_idl() if self.blob is not None else None,
binary=self.binary.to_flyte_idl() if self.binary is not None else None,
union=self.union.to_flyte_idl() if self.union is not None else None,
none_type=self.none_type.to_flyte_idl() if self.none_type is not None else None,
error=self.error if self.error is not None else None,
generic=self.generic,
)
@classmethod
def from_flyte_idl(cls, pb2_object):
"""
:param flyteidl.core.literals_pb2.Scalar pb2_object:
:rtype: flytekit.models.literals.Scalar
"""
# todo finish
return cls(
primitive=Primitive.from_flyte_idl(pb2_object.primitive) if pb2_object.HasField("primitive") else None,
blob=Blob.from_flyte_idl(pb2_object.blob) if pb2_object.HasField("blob") else None,
binary=Binary.from_flyte_idl(pb2_object.binary) if pb2_object.HasField("binary") else None,
union=Union.from_flyte_idl(pb2_object.union) if pb2_object.HasField("union") else None,
none_type=Void.from_flyte_idl(pb2_object.none_type) if pb2_object.HasField("none_type") else None,
error=pb2_object.error if pb2_object.HasField("error") else None,
generic=pb2_object.generic if pb2_object.HasField("generic") else None,
)
class Literal(_common.FlyteIdlEntity):
def __init__(
self,
scalar: Optional[Scalar] = None,
collection: Optional[LiteralCollection] = None,
map: Optional[LiteralMap] = None,
record: Optional[Record] = None,
hash: Optional[str] = None,
):
"""
This IDL message represents a literal value in the Flyte ecosystem.
:param Scalar scalar:
:param LiteralCollection collection:
:param LiteralMap map:
"""
self._scalar = scalar
self._collection = collection
self._map = map
self._record = record
self._hash = hash
@property
def scalar(self):
"""
If not None, this value holds a scalar value which can be further unpacked.
:rtype: Scalar
"""
return self._scalar
@property
def collection(self):
"""
If not None, this value holds a collection of Literal values which can be further unpacked.
:rtype: LiteralCollection
"""
return self._collection
@property
def map(self):
"""
If not None, this value holds a map of Literal values which can be further unpacked.
:rtype: LiteralMap
"""
return self._map
@property
def record(self):
return self._record
@property
def value(self):
"""
Returns one of the scalar, collection, or map properties based on which one is set.
:rtype: T
"""
return self.scalar or self.collection or self.map
@property
def hash(self):
"""
If not None, this value holds a hash that represents the literal for caching purposes.
:rtype: str
"""
return self._hash
@hash.setter
def hash(self, value):
self._hash = value
def to_flyte_idl(self):
"""
:rtype: flyteidl.core.literals_pb2.Literal
"""
return _literals_pb2.Literal(
scalar=self.scalar.to_flyte_idl() if self.scalar is not None else None,
collection=self.collection.to_flyte_idl() if self.collection is not None else None,
map=self.map.to_flyte_idl() if self.map is not None else None,
record=self.record.to_flyte_idl() if self.record is not None else None,
hash=self.hash,
)
@classmethod
def from_flyte_idl(cls, pb2_object):
"""
:param flyteidl.core.literals_pb2.Literal pb2_object:
:rtype: Literal
"""
collection = None
if pb2_object.HasField("collection"):
collection = LiteralCollection.from_flyte_idl(pb2_object.collection)
return cls(
scalar=Scalar.from_flyte_idl(pb2_object.scalar) if pb2_object.HasField("scalar") else None,
collection=collection,
map=LiteralMap.from_flyte_idl(pb2_object.map) if pb2_object.HasField("map") else None,
record=Record.from_flyte_idl(pb2_object.record) if pb2_object.HasField("record") else None,
hash=pb2_object.hash if pb2_object.hash else None,
) | PypiClean |
/pulumi_aws-6.1.0a1693529760.tar.gz/pulumi_aws-6.1.0a1693529760/pulumi_aws/s3control/object_lambda_access_point.py |
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['ObjectLambdaAccessPointArgs', 'ObjectLambdaAccessPoint']
@pulumi.input_type
class ObjectLambdaAccessPointArgs:
def __init__(__self__, *,
configuration: pulumi.Input['ObjectLambdaAccessPointConfigurationArgs'],
account_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a ObjectLambdaAccessPoint resource.
:param pulumi.Input['ObjectLambdaAccessPointConfigurationArgs'] configuration: A configuration block containing details about the Object Lambda Access Point. See Configuration below for more details.
:param pulumi.Input[str] account_id: The AWS account ID for the owner of the bucket for which you want to create an Object Lambda Access Point. Defaults to automatically determined account ID of the AWS provider.
:param pulumi.Input[str] name: The name for this Object Lambda Access Point.
"""
pulumi.set(__self__, "configuration", configuration)
if account_id is not None:
pulumi.set(__self__, "account_id", account_id)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def configuration(self) -> pulumi.Input['ObjectLambdaAccessPointConfigurationArgs']:
"""
A configuration block containing details about the Object Lambda Access Point. See Configuration below for more details.
"""
return pulumi.get(self, "configuration")
@configuration.setter
def configuration(self, value: pulumi.Input['ObjectLambdaAccessPointConfigurationArgs']):
pulumi.set(self, "configuration", value)
@property
@pulumi.getter(name="accountId")
def account_id(self) -> Optional[pulumi.Input[str]]:
"""
The AWS account ID for the owner of the bucket for which you want to create an Object Lambda Access Point. Defaults to automatically determined account ID of the AWS provider.
"""
return pulumi.get(self, "account_id")
@account_id.setter
def account_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "account_id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name for this Object Lambda Access Point.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class _ObjectLambdaAccessPointState:
def __init__(__self__, *,
account_id: Optional[pulumi.Input[str]] = None,
arn: Optional[pulumi.Input[str]] = None,
configuration: Optional[pulumi.Input['ObjectLambdaAccessPointConfigurationArgs']] = None,
name: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering ObjectLambdaAccessPoint resources.
:param pulumi.Input[str] account_id: The AWS account ID for the owner of the bucket for which you want to create an Object Lambda Access Point. Defaults to automatically determined account ID of the AWS provider.
:param pulumi.Input[str] arn: Amazon Resource Name (ARN) of the Object Lambda Access Point.
:param pulumi.Input['ObjectLambdaAccessPointConfigurationArgs'] configuration: A configuration block containing details about the Object Lambda Access Point. See Configuration below for more details.
:param pulumi.Input[str] name: The name for this Object Lambda Access Point.
"""
if account_id is not None:
pulumi.set(__self__, "account_id", account_id)
if arn is not None:
pulumi.set(__self__, "arn", arn)
if configuration is not None:
pulumi.set(__self__, "configuration", configuration)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="accountId")
def account_id(self) -> Optional[pulumi.Input[str]]:
"""
The AWS account ID for the owner of the bucket for which you want to create an Object Lambda Access Point. Defaults to automatically determined account ID of the AWS provider.
"""
return pulumi.get(self, "account_id")
@account_id.setter
def account_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "account_id", value)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
"""
Amazon Resource Name (ARN) of the Object Lambda Access Point.
"""
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter
def configuration(self) -> Optional[pulumi.Input['ObjectLambdaAccessPointConfigurationArgs']]:
"""
A configuration block containing details about the Object Lambda Access Point. See Configuration below for more details.
"""
return pulumi.get(self, "configuration")
@configuration.setter
def configuration(self, value: Optional[pulumi.Input['ObjectLambdaAccessPointConfigurationArgs']]):
pulumi.set(self, "configuration", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name for this Object Lambda Access Point.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
class ObjectLambdaAccessPoint(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_id: Optional[pulumi.Input[str]] = None,
configuration: Optional[pulumi.Input[pulumi.InputType['ObjectLambdaAccessPointConfigurationArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a resource to manage an S3 Object Lambda Access Point.
An Object Lambda access point is associated with exactly one standard access point and thus one Amazon S3 bucket.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example_bucket_v2 = aws.s3.BucketV2("exampleBucketV2")
example_access_point = aws.s3.AccessPoint("exampleAccessPoint", bucket=example_bucket_v2.id)
example_object_lambda_access_point = aws.s3control.ObjectLambdaAccessPoint("exampleObjectLambdaAccessPoint", configuration=aws.s3control.ObjectLambdaAccessPointConfigurationArgs(
supporting_access_point=example_access_point.arn,
transformation_configurations=[aws.s3control.ObjectLambdaAccessPointConfigurationTransformationConfigurationArgs(
actions=["GetObject"],
content_transformation=aws.s3control.ObjectLambdaAccessPointConfigurationTransformationConfigurationContentTransformationArgs(
aws_lambda=aws.s3control.ObjectLambdaAccessPointConfigurationTransformationConfigurationContentTransformationAwsLambdaArgs(
function_arn=aws_lambda_function["example"]["arn"],
),
),
)],
))
```
## Import
Using `pulumi import`, import Object Lambda Access Points using the `account_id` and `name`, separated by a colon (`:`). For example:
```sh
$ pulumi import aws:s3control/objectLambdaAccessPoint:ObjectLambdaAccessPoint example 123456789012:example
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_id: The AWS account ID for the owner of the bucket for which you want to create an Object Lambda Access Point. Defaults to automatically determined account ID of the AWS provider.
:param pulumi.Input[pulumi.InputType['ObjectLambdaAccessPointConfigurationArgs']] configuration: A configuration block containing details about the Object Lambda Access Point. See Configuration below for more details.
:param pulumi.Input[str] name: The name for this Object Lambda Access Point.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ObjectLambdaAccessPointArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a resource to manage an S3 Object Lambda Access Point.
An Object Lambda access point is associated with exactly one standard access point and thus one Amazon S3 bucket.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example_bucket_v2 = aws.s3.BucketV2("exampleBucketV2")
example_access_point = aws.s3.AccessPoint("exampleAccessPoint", bucket=example_bucket_v2.id)
example_object_lambda_access_point = aws.s3control.ObjectLambdaAccessPoint("exampleObjectLambdaAccessPoint", configuration=aws.s3control.ObjectLambdaAccessPointConfigurationArgs(
supporting_access_point=example_access_point.arn,
transformation_configurations=[aws.s3control.ObjectLambdaAccessPointConfigurationTransformationConfigurationArgs(
actions=["GetObject"],
content_transformation=aws.s3control.ObjectLambdaAccessPointConfigurationTransformationConfigurationContentTransformationArgs(
aws_lambda=aws.s3control.ObjectLambdaAccessPointConfigurationTransformationConfigurationContentTransformationAwsLambdaArgs(
function_arn=aws_lambda_function["example"]["arn"],
),
),
)],
))
```
## Import
Using `pulumi import`, import Object Lambda Access Points using the `account_id` and `name`, separated by a colon (`:`). For example:
```sh
$ pulumi import aws:s3control/objectLambdaAccessPoint:ObjectLambdaAccessPoint example 123456789012:example
```
:param str resource_name: The name of the resource.
:param ObjectLambdaAccessPointArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ObjectLambdaAccessPointArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_id: Optional[pulumi.Input[str]] = None,
configuration: Optional[pulumi.Input[pulumi.InputType['ObjectLambdaAccessPointConfigurationArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ObjectLambdaAccessPointArgs.__new__(ObjectLambdaAccessPointArgs)
__props__.__dict__["account_id"] = account_id
if configuration is None and not opts.urn:
raise TypeError("Missing required property 'configuration'")
__props__.__dict__["configuration"] = configuration
__props__.__dict__["name"] = name
__props__.__dict__["arn"] = None
super(ObjectLambdaAccessPoint, __self__).__init__(
'aws:s3control/objectLambdaAccessPoint:ObjectLambdaAccessPoint',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
account_id: Optional[pulumi.Input[str]] = None,
arn: Optional[pulumi.Input[str]] = None,
configuration: Optional[pulumi.Input[pulumi.InputType['ObjectLambdaAccessPointConfigurationArgs']]] = None,
name: Optional[pulumi.Input[str]] = None) -> 'ObjectLambdaAccessPoint':
"""
Get an existing ObjectLambdaAccessPoint resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_id: The AWS account ID for the owner of the bucket for which you want to create an Object Lambda Access Point. Defaults to automatically determined account ID of the AWS provider.
:param pulumi.Input[str] arn: Amazon Resource Name (ARN) of the Object Lambda Access Point.
:param pulumi.Input[pulumi.InputType['ObjectLambdaAccessPointConfigurationArgs']] configuration: A configuration block containing details about the Object Lambda Access Point. See Configuration below for more details.
:param pulumi.Input[str] name: The name for this Object Lambda Access Point.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ObjectLambdaAccessPointState.__new__(_ObjectLambdaAccessPointState)
__props__.__dict__["account_id"] = account_id
__props__.__dict__["arn"] = arn
__props__.__dict__["configuration"] = configuration
__props__.__dict__["name"] = name
return ObjectLambdaAccessPoint(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="accountId")
def account_id(self) -> pulumi.Output[str]:
"""
The AWS account ID for the owner of the bucket for which you want to create an Object Lambda Access Point. Defaults to automatically determined account ID of the AWS provider.
"""
return pulumi.get(self, "account_id")
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
Amazon Resource Name (ARN) of the Object Lambda Access Point.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter
def configuration(self) -> pulumi.Output['outputs.ObjectLambdaAccessPointConfiguration']:
"""
A configuration block containing details about the Object Lambda Access Point. See Configuration below for more details.
"""
return pulumi.get(self, "configuration")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name for this Object Lambda Access Point.
"""
return pulumi.get(self, "name") | PypiClean |
/dbqq-1.5.0.tar.gz/dbqq-1.5.0/license.md | MIT License
Copyright (c) 2023 Chris
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
| PypiClean |
/uniohomeassistant-0.1.3.tar.gz/uniohomeassistant-0.1.3/homeassistant/components/bond/utils.py | import logging
from typing import List, Optional
from bond_api import Action, Bond
_LOGGER = logging.getLogger(__name__)
class BondDevice:
"""Helper device class to hold ID and attributes together."""
def __init__(self, device_id: str, attrs: dict, props: dict):
"""Create a helper device from ID and attributes returned by API."""
self.device_id = device_id
self.props = props
self._attrs = attrs
def __repr__(self):
"""Return readable representation of a bond device."""
return {
"device_id": self.device_id,
"props": self.props,
"attrs": self._attrs,
}.__repr__()
@property
def name(self) -> str:
"""Get the name of this device."""
return self._attrs["name"]
@property
def type(self) -> str:
"""Get the type of this device."""
return self._attrs["type"]
@property
def trust_state(self) -> bool:
"""Check if Trust State is turned on."""
return self.props.get("trust_state", False)
def supports_speed(self) -> bool:
"""Return True if this device supports any of the speed related commands."""
actions: List[str] = self._attrs["actions"]
return bool([action for action in actions if action in [Action.SET_SPEED]])
def supports_direction(self) -> bool:
"""Return True if this device supports any of the direction related commands."""
actions: List[str] = self._attrs["actions"]
return bool([action for action in actions if action in [Action.SET_DIRECTION]])
def supports_light(self) -> bool:
"""Return True if this device supports any of the light related commands."""
actions: List[str] = self._attrs["actions"]
return bool(
[
action
for action in actions
if action in [Action.TURN_LIGHT_ON, Action.TURN_LIGHT_OFF]
]
)
def supports_set_brightness(self) -> bool:
"""Return True if this device supports setting a light brightness."""
actions: List[str] = self._attrs["actions"]
return bool([action for action in actions if action in [Action.SET_BRIGHTNESS]])
class BondHub:
"""Hub device representing Bond Bridge."""
def __init__(self, bond: Bond):
"""Initialize Bond Hub."""
self.bond: Bond = bond
self._version: Optional[dict] = None
self._devices: Optional[List[BondDevice]] = None
async def setup(self):
"""Read hub version information."""
self._version = await self.bond.version()
_LOGGER.debug("Bond reported the following version info: %s", self._version)
# Fetch all available devices using Bond API.
device_ids = await self.bond.devices()
self._devices = [
BondDevice(
device_id,
await self.bond.device(device_id),
await self.bond.device_properties(device_id),
)
for device_id in device_ids
]
_LOGGER.debug("Discovered Bond devices: %s", self._devices)
@property
def bond_id(self) -> str:
"""Return unique Bond ID for this hub."""
return self._version["bondid"]
@property
def target(self) -> str:
"""Return this hub model."""
return self._version.get("target")
@property
def fw_ver(self) -> str:
"""Return this hub firmware version."""
return self._version.get("fw_ver")
@property
def devices(self) -> List[BondDevice]:
"""Return a list of all devices controlled by this hub."""
return self._devices
@property
def is_bridge(self) -> bool:
"""Return if the Bond is a Bond Bridge."""
# If False, it means that it is a Smart by Bond product. Assumes that it is if the model is not available.
return self._version.get("model", "BD-").startswith("BD-") | PypiClean |
/bazaar_cli-0.1.0-py3-none-any.whl/bazaar_cli/bazaarwrapper.py | from enum import Enum
from typing import Union
import requests
MB_API = "https://mb-api.abuse.ch/api/v1"
# class syntax
class QueryType(Enum):
TAG = "get_taginfo"
SIG = "get_siginfo"
FILE_TYPE = "get_file_type"
RECENT = "get_recent"
class Bazaar:
"""MalwareBazaar wrapper class."""
def __init__(self, api_key: str = None):
if api_key is None:
raise Exception("No API key specified")
self.api_key = api_key
self.headers = {"API-KEY": api_key}
def __repr__(self):
return "<bazaar.bazaarwrapper.Bazaar(api_key='{}')>".format(self.api_key)
def _query(
self,
url: str,
method="GET",
raw: bool = False,
data=None,
params=None,
) -> Union[dict, requests.Response]:
"""Perform a request using the `self._req` HTTP client.
Upon requesting a non-standard URL (not returning JSON),
the `raw` flag allow to return a `requests.Response` object
instead of a dictionnary.
"""
response = requests.request(
method,
url,
data=data,
headers=self.headers,
timeout=50,
)
if raw:
return response # type: requests.Response
return response.json() # dict
def list_samples(self, query_type: QueryType, key: str, limit: int = 100) -> dict:
"""Currently only lists by tags."""
match query_type:
case QueryType.TAG:
key_type = "tag"
case QueryType.SIG:
key_type = "signature"
case QueryType.FILE_TYPE:
key_type = "file_type"
case QueryType.RECENT:
key_type = "selector"
case _:
key_type = "tag"
samples = self._query(
MB_API,
"POST",
data={"query": query_type.value, key_type: key, "limit": limit},
)
if samples.get("data", {}) == {}:
return samples.get('query_status')
return samples
def download_sample(self, sample_hash):
"""Download a sample by its hash."""
if len(sample_hash) != 64:
raise Exception("Hash is not recognized")
sample = self._query(
MB_API,
"POST",
data={"query": "get_file", "sha256_hash": sample_hash},
raw=True,
).content
if sample == b'{\n "query_status": "file_not_found"\n}':
raise Exception("File not found")
open(f"{sample_hash}.zip", "wb").write(sample) | PypiClean |
/Twista-0.3.4b1.tar.gz/Twista-0.3.4b1/twista/navigator.py | from flask import Flask, escape, request, Response, render_template, redirect, url_for, jsonify
from py2neo import Graph
from collections import Counter
from datetime import datetime as dt
from datetime import timedelta
import json
import os
from dateutil import parser, relativedelta
import random as rand
import string
templates = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'templates')
statics = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'static')
app = Flask("Twista navigator", template_folder=templates, static_folder=statics)
app.jinja_options['extensions'].append('jinja2.ext.do')
graph = None
@app.template_filter()
def render_tweet(tweet, of={}, ctx=[]):
return render_template('tweet_snippet.html', tweet=tweet, user=of, ctx=ctx)
@app.template_filter()
def tweetlist(tweets):
return render_template('tweet_list.html', tweets=tweets)
@app.template_filter()
def mark(content, link="", n=0):
return f"<a href='{ link }'><span class='marker'>{ content }</span><span class='frequency'>{ n }</span></a>"
@app.template_filter()
def card(content, text="", title="", media="", actions=""):
return render_template('card_snippet.html', title=title, text=content, media=media, actions=actions)
@app.template_filter()
def chip(content, data=None):
if data:
return "".join([
f'<span class="mdl-chip mdl-chip--contact">',
f'<span class="mdl-chip__contact mdl-color--teal mdl-color-text--white">{ data }</span>',
f'<span class="mdl-chip__text">{ content }</span>',
f'</span>'
])
return f'<span class="mdl-chip"><span class="mdl-chip__text">{ content }</span></span>'
@app.template_filter()
def link(content, url, classes=[]):
cs = " ".join(classes)
return f"<a class='{ cs }' href='{ url }'>{ content }</a>"
@app.template_filter()
def datetime(dt):
return parser.parse(str(dt)).strftime("%Y-%m-%d %H:%M:%S")
def filter(args):
begin = args.get("begin", default="1970-01-01")
end = args.get("end", default=dt.now().strftime("%Y-%m-%d"))
if begin == "null":
begin = "1970-01-01"
if end == "null":
end = dt.now().strftime("%Y-%m-%d")
return (begin, end)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/tag/<id>')
def tag(id):
(begin, end) = filter(request.args)
return render_template('tag.html', tag=id)
@app.route('/tag/<id>/volume')
def tag_activity(id):
(begin, end) = filter(request.args)
volume = [(r['date'], r['n']) for r in graph.run("""
MATCH (tag:Tag) <-[:HAS_TAG]- (t:Tweet)
WHERE toUpper(tag.id) = toUpper({ id }) AND
t.created_at >= datetime({ begin }) AND
t.created_at <= datetime({ end })
RETURN date(t.created_at) AS date, count(t) AS n
ORDER BY date
""", id=id, begin=begin, end=end)]
return jsonify([{
'x': [str(d) for d, n in volume],
'y': [n for d, n in volume],
'type': 'scatter',
'name': 'posts'
}
])
@app.route('/tag/<id>/behaviour')
def tag_behaviour(id):
(begin, end) = filter(request.args)
volume = [(r['type'], r['n']) for r in graph.run("""
MATCH (tag:Tag) <-[:HAS_TAG]- (t:Tweet)
WHERE toUpper(tag.id) = toUpper({ id }) AND
t.created_at >= datetime({ begin }) AND
t.created_at <= datetime({ end })
RETURN t.type AS type, count(t) AS n
ORDER BY type
""", id=id, begin=begin, end=end)]
return jsonify([{
'labels': [t for t, n in volume],
'values': [n for d, n in volume],
'type': 'pie'
}
])
@app.route('/tag/<id>/tags')
def tag_correlated_tags(id):
(begin, end) = filter(request.args)
tags = [(r['tag'], r['n']) for r in graph.run("""
MATCH (tag:Tag) <-[:HAS_TAG]- (t:Tweet) -[:HAS_TAG]-> (other:Tag)
WHERE toUpper(tag.id) = toUpper({ id }) AND
t.created_at >= datetime({ begin }) AND
t.created_at <= datetime({ end }) AND
tag <> other
RETURN toUpper(other.id) AS tag, count(other) AS n
ORDER BY n DESCENDING
LIMIT 50
""", id=id, begin=begin, end=end)]
return " ".join(
[link(chip("#" + tag, data=n), f"/tag/{tag}", classes=['filtered']) for tag, n in tags]
)
@app.route('/tag/<id>/mentioned_users')
def tag_correlated_users(id):
(begin, end) = filter(request.args)
users = [(r['user'], r['n']) for r in graph.run("""
MATCH (tag:Tag) <-[:HAS_TAG]- (t:Tweet) -[m:MENTIONS]-> (u:User)
WHERE toUpper(tag.id) = toUpper({ id }) AND
t.created_at >= datetime({ begin }) AND
t.created_at <= datetime({ end })
RETURN u AS user, count(u) AS n
ORDER BY n DESCENDING
LIMIT 50
""", id=id, begin=begin, end=end)]
return "\n".join(
[link(chip("@" + user['screen_name'], data=n), f"/user/{ user['id'] }", classes=['filtered']) for user, n in users]
)
@app.route('/tag/<id>/posting_users')
def tag_posting_users(id):
(begin, end) = filter(request.args)
users = [(r['user'], r['n']) for r in graph.run("""
MATCH (tag:Tag) <-[:HAS_TAG]- (t:Tweet) <-[:POSTS]- (u:User)
WHERE toUpper(tag.id) = toUpper({ id }) AND
t.created_at >= datetime({ begin }) AND
t.created_at <= datetime({ end })
RETURN u AS user, count(u) AS n
ORDER BY n DESCENDING
LIMIT 50
""", id=id, begin=begin, end=end)]
return "\n".join(
[link(chip("@" + user['screen_name'], data=n), f"/user/{ user['id'] }", classes=['filtered']) for user, n in users]
)
@app.route('/tweet/<id>')
def tweet(id):
(begin, end) = filter(request.args)
result = graph.run("MATCH (t:Tweet{id: {id}}) <-[:POSTS]- (u:User) RETURN t, u", id=id).data()
tweet = result[0]['t']
usr = result[0]['u']
context = [{ 'tweet': ctx['tweet'], 'user': ctx['usr'] } for ctx in graph.run("""
MATCH (:Tweet{id: {id}}) -[:REFERS_TO*]-> (tweet:Tweet) <-[:POSTS]- (usr:User)
WHERE tweet.created_at >= datetime({ begin }) AND
tweet.created_at <= datetime({ end })
RETURN tweet, usr
ORDER BY tweet.created_at DESCENDING
""", id=id, begin=begin, end=end)]
return render_template('tweet.html',
tweet=tweet,
user=usr,
ctx=context
)
@app.route('/tweet/<id>/interactions')
def tweet_interactions(id):
(begin, end) = filter(request.args)
tweet = graph.run("MATCH (t:Tweet{id: { id }}) RETURN t", id=id).evaluate()
volume = [(r['date'], r['hour'], r['n']) for r in graph.run("""
MATCH (:Tweet{id: { id }}) -[:REFERS_TO*]- (t:Tweet)
WHERE t.created_at >= datetime({ begin }) AND
t.created_at <= datetime({ end })
RETURN date(t.created_at) AS date, t.created_at.hour AS hour, count(t) AS n
ORDER BY date, hour
""", id=id, begin=begin, end=end)]
d0 = dt(tweet['created_at'].year, tweet['created_at'].month, tweet['created_at'].day, tweet['created_at'].hour)
return jsonify([{
'x': [str(dt(d.year, d.month, d.day, h)) for d, h, n in volume],
'y': [n for d, h, n in volume],
'type': 'scatter',
'name': 'Interactions'
}, {
'x': [str(d0), str(d0)],
'y': [0, max([n for d, h, n in volume], default=0)],
'type': 'scatter',
'name': 'Current tweet'
}])
@app.route('/tweet/<id>/interaction-types')
def tweet_interaction_types(id):
(begin, end) = filter(request.args)
volume = [(r['type'], r['n']) for r in graph.run("""
MATCH (:Tweet{id: { id }}) -[:REFERS_TO*]- (t:Tweet)
WHERE t.created_at >= datetime({ begin }) AND
t.created_at <= datetime({ end })
RETURN t.type AS type, count(t) AS n
ORDER BY type
""", id=id, begin=begin, end=end)]
return jsonify([{
'labels': [t for t, n in volume],
'values': [n for t, n in volume],
'type': 'pie',
}])
@app.route('/tweet/<id>/tags')
def tweet_tags(id):
(begin, end) = filter(request.args)
tags = [(r['tag'], r['n']) for r in graph.run("""
MATCH (:Tweet{id: { id }}) -[:REFERS_TO*]- (t:Tweet) -[:HAS_TAG]-> (tag:Tag)
WHERE t.created_at >= datetime({ begin }) AND
t.created_at <= datetime({ end })
RETURN tag.id AS tag, count(tag) AS n
ORDER BY n DESCENDING
LIMIT 50
""", id=id, begin=begin, end=end)]
return " ".join(
[link(chip("#" + tag, data=n), f"/tag/{ tag }", classes=['filtered']) for tag, n in tags]
)
@app.route('/tweet/<id>/users')
def tweet_users(id):
(begin, end) = filter(request.args)
users = [(r['user'], r['n']) for r in graph.run("""
MATCH (:Tweet{id: { id }}) -[:REFERS_TO*]- (t:Tweet) <-[:POSTS]- (u:User)
WHERE t.created_at >= datetime({ begin }) AND
t.created_at <= datetime({ end })
RETURN u AS user, count(u) AS n
ORDER BY n DESCENDING
LIMIT 50
""", id=id, begin=begin, end=end)]
return " ".join(
[link(chip("@" + usr['screen_name'], data=n), f"/user/{ usr['id'] }", classes=['filtered']) for usr, n in users]
)
@app.route('/tweet/<id>/tweets')
def tweet_related_tweets(id):
(begin, end) = filter(request.args)
tweets = [{ 'tweet': r['t'], 'user': r['u'] } for r in graph.run("""
MATCH (:Tweet{id: { id }}) -[:REFERS_TO*]- (t:Tweet) <-[:POSTS]- (u:User)
WHERE t.created_at >= datetime({ begin }) AND
t.created_at <= datetime({ end })
RETURN t, u
ORDER BY t.created_at DESCENDING
""", id=id, begin=begin, end=end)]
return tweetlist(tweets)
@app.route('/user/<id>')
def user_as_html(id):
result = graph.run("MATCH (u:User{id: {id}}) RETURN u", id=id).evaluate()
return render_template('user.html', user=result)
@app.route('/user/<id>/behaviour')
def user_behaviour(id):
(begin, end) = filter(request.args)
result = [(r['type'], r['n']) for r in graph.run("""
MATCH (u:User{id: {id}}) -[:POSTS]-> (t:Tweet)
WHERE t.created_at >= datetime({begin}) AND t.created_at <= datetime({end})
RETURN t.type AS type, count(t) AS n
""", id=id, begin=begin, end=end)]
return jsonify([{
'labels': [t for t, n in result],
'values': [n for t, n in result],
'type': 'pie'
}])
@app.route('/user/<id>/activity')
def user_activity(id):
(begin, end) = filter(request.args)
posts = [(r['date'], r['n']) for r in graph.run("""
MATCH (u:User{id: {id}}) -[:POSTS]-> (t:Tweet)
WHERE t.created_at >= datetime({begin}) AND t.created_at <= datetime({end})
RETURN date(t.created_at) AS date, count(t) AS n
ORDER BY date
""", id=id, begin=begin, end=end)]
reactions = [(r['date'], r['n']) for r in graph.run("""
MATCH (u:User{id: {id}}) -[:POSTS]-> (:Tweet) <-[:REFERS_TO*]- (o:Tweet)
WHERE o.created_at >= datetime({begin}) AND o.created_at <= datetime({end})
RETURN date(o.created_at) AS date, count(o) AS n
ORDER BY date
""", id=id, begin=begin, end=end)]
mentions = [(r['date'], r['n']) for r in graph.run("""
MATCH (u:User{id: {id}}) <-[:MENTIONS]- (t:Tweet)
WHERE t.created_at >= datetime({begin}) AND t.created_at <= datetime({end})
RETURN date(t.created_at) AS date, count(t) AS n
ORDER BY date
""", id=id, begin=begin, end=end)]
return jsonify([{
'x': [str(t) for t, n in posts],
'y': [n for t, n in posts],
'name': 'posts',
'type': 'scatter'
}, {
'x': [str(t) for t, n in reactions],
'y': [n for t, n in reactions],
'name': 'reactions',
'type': 'scatter'
}, {
'x': [str(t) for t, n in mentions],
'y': [n for t, n in mentions],
'name': 'mentions',
'type': 'scatter'
}])
@app.route('/user/<id>/interactors')
def user_interactors(id):
(begin, end) = filter(request.args)
action = request.args.get("type", default="retweet")
result = " ".join([link(chip("@" + r['user']['screen_name'], data=r['n']), f"/user/{ r['user']['id'] }", classes=['filtered']) for r in graph.run("""
MATCH (u:User{id: {id}}) -[:POSTS]-> (:Tweet) <-[:REFERS_TO]- (t:Tweet{type: {action}}) <-[:POSTS]- (user:User)
WHERE t.created_at >= datetime({begin}) AND t.created_at <= datetime({end}) AND user <> u
RETURN user, count(user) AS n
ORDER BY n DESCENDING
LIMIT 50
""", id=id, begin=begin, end=end, action=action)])
return result
@app.route('/user/<id>/tags')
def user_tags(id):
(begin, end) = filter(request.args)
result = " ".join([link(chip("#" + r['tag'], data=r['n']), f"/tag/{ r['tag'] }", classes=['filtered']) for r in graph.run("""
MATCH (u:User{id: {id}}) -[:POSTS]-> (t:Tweet) -[:HAS_TAG]-> (tag:Tag)
WHERE t.created_at >= datetime({begin}) AND t.created_at <= datetime({end})
RETURN tag.id AS tag, count(tag) AS n
ORDER BY n DESCENDING
LIMIT 50
""", id=id, begin=begin, end=end)])
return result
@app.route('/user/<id>/contents')
def user_posts(id):
of = request.args.get("of")
tweets = [{ 'tweet': r['t'], 'user': r['u'] } for r in graph.run("""
MATCH (u:User{id: {id}}) -[:POSTS]-> (t:Tweet)
WHERE date(t.created_at) = date({of})
RETURN t, u, t.favourites AS n
ORDER BY n DESCENDING
""", id=id, of=of)]
print(tweets)
return tweetlist(tweets)
@app.route('/user/<id>/info')
def user_info(id):
(begin, end) = filter(request.args)
user = graph.run("MATCH (u:User{id: {id}}) RETURN u", id=id).evaluate()
return render_template('user_info.html', user=user)
@app.route('/user/<id>/punchcard')
def user_punchcard(id):
(begin, end) = filter(request.args)
pc = { d: { h: 0 for h in range(24) } for d in range(1, 8) }
for r in graph.run("""
MATCH (u:User{id: {id}}) -[:POSTS]-> (t:Tweet)
WHERE t.created_at >= datetime({begin}) AND t.created_at <= datetime({end})
RETURN t.created_at.weekday AS day, t.created_at.hour AS hour, count(t) AS n
""", id=id, begin=begin, end=end):
pc[r['day']][r['hour']] = r['n']
weekdays =['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
hours = range(24)
data = [{
'x': [f"{h}h" for h in hours],
'y': weekdays,
'z': [[pc[d][h] for h in hours] for d in range(1, 8)],
'colorscale': [
['0.0', '#3F51B600'],
['0.1', '#3F51B611'],
['0.2', '#3F51B622'],
['0.3', '#3F51B633'],
['0.4', '#3F51B644'],
['0.5', '#3F51B655'],
['0.6', '#3F51B677'],
['0.7', '#3F51B699'],
['0.8', '#3F51B6BB'],
['0.9', '#3F51B6DD'],
['1.0', '#3F51B6FF']
],
'type': 'heatmap'
}]
return jsonify(data)
@app.route('/user/<id>/network')
def user_network(id):
(begin, end) = filter(request.args)
user = graph.run("MATCH (u:User{id: {id}}) RETURN u", id=id).evaluate()
new = set([user['id']])
process = new.copy()
scanned = new.copy()
retweeters = []
for n in [50, 5, 5]:
retweeters.extend([(r['u'], r['rt'], r['n']) for r in graph.run("""
UNWIND {uids} AS uid
MATCH (u:User{id: uid}) -[:POSTS]-> (:Tweet) <-[:REFERS_TO]- (t:Tweet{type: 'retweet'}) <-[:POSTS]- (rt:User)
WHERE t.created_at >= datetime({ begin }) AND t.created_at <= datetime({ end }) AND u <> rt
RETURN u, rt, count(rt) AS n
ORDER BY n DESCENDING
LIMIT {n}
""", uids=list(process), begin=begin, end=end, n=len(new) * n)])
new = set([r['id'] for u, r, _ in retweeters])
process = new - scanned
scanned = scanned.union(new)
nodes = [u for u, _, _ in retweeters]
nodes.extend([rt for _, rt, _ in retweeters])
mark = lambda n: 'start' if (n['id'] == user['id']) else 'follow'
network = {
'nodes': [{ 'data': { 'id': u['id'], 'screen_name': "@" + u['screen_name'], 'select': mark(u) }} for u in set(nodes)],
'edges': [{ 'data': { 'source': u['id'], 'target': rt['id'], 'directed': True, 'qty': n }} for u, rt, n in retweeters]
}
return jsonify(network)
# return render_template('network.js', user=user, elements=json.dumps(network))
@app.route('/tweets/volume')
def tweets_volume():
(begin, end) = filter(request.args)
tweets = [(r['date'], r['n']) for r in graph.run("""
MATCH (t:Tweet)
WHERE t.created_at >= datetime({ begin }) AND
t.created_at <= datetime({ end })
RETURN date(t.created_at) AS date, count(t) AS n
""", begin=begin, end=end)]
users = [(r['date'], r['n']) for r in graph.run("""
MATCH (t:Tweet) <-[:POSTS]- (u:User)
WHERE t.created_at >= datetime({ begin }) AND
t.created_at <= datetime({ end })
RETURN date(t.created_at) AS date, count(distinct(u)) AS n
""", begin=begin, end=end)]
return jsonify([
{
'x': [str(d) for d, n in tweets],
'y': [n for d, n in tweets],
'type': 'scatter',
'name': 'postings'
}, {
'x': [str(d) for d, n in users],
'y': [n for d, n in users],
'type': 'scatter',
'name': 'active unique users'
}
])
@app.route('/tweets/tags')
def tweets_tags_volume():
(begin, end) = filter(request.args)
volume = [(r['tag'], r['n']) for r in graph.run("""
MATCH (t:Tweet) -[:HAS_TAG]-> (tag:Tag)
WHERE t.created_at >= datetime({ begin }) AND
t.created_at <= datetime({ end })
RETURN tag.id AS tag, count(tag) AS n
ORDER BY n DESCENDING
LIMIT 50
""", begin=begin, end=end)]
return " ".join(
[link(chip("#" + tag, data=n), f"/tag/{tag}", classes=["filtered"]) for tag, n in volume]
)
@app.route('/tweets/posting-users')
def tweets_most_posting_users():
(begin, end) = filter(request.args)
volume = [(r['u'], r['n']) for r in graph.run("""
MATCH (t:Tweet) <-[:POSTS]- (u:User)
WHERE t.created_at >= datetime({ begin }) AND
t.created_at <= datetime({ end })
RETURN u, count(t) AS n
ORDER BY n DESCENDING
LIMIT 50
""", begin=begin, end=end)]
return " ".join(
[link(chip("@" + usr['screen_name'], data=n), f"/user/{usr['id']}", classes=["filtered"]) for usr, n in volume]
)
@app.route('/tweets/mentioned-users')
def tweets_most_mentioned_users():
(begin, end) = filter(request.args)
volume = [(r['u'], r['n']) for r in graph.run("""
MATCH (t:Tweet) -[:MENTIONS]-> (u:User)
WHERE t.created_at >= datetime({ begin }) AND
t.created_at <= datetime({ end })
RETURN u, count(t) AS n
ORDER BY n DESCENDING
LIMIT 50
""", begin=begin, end=end)]
return " ".join(
[link(chip("@" + usr['screen_name'], data=n), f"/user/{usr['id']}", classes=["filtered"]) for usr, n in volume]
)
@app.route('/tweets/types')
def tweets_type_volume():
(begin, end) = filter(request.args)
volume = [(r['type'], r['n']) for r in graph.run("""
MATCH (t:Tweet)
WHERE t.created_at >= datetime({ begin }) AND
t.created_at <= datetime({ end })
RETURN t.type AS type, count(t) AS n
ORDER BY type
""", begin=begin, end=end)]
return jsonify([{
'labels': [t for t, n in volume],
'values': [n for t, n in volume],
'type': 'pie'
}])
@app.route('/search')
def search():
(begin, end) = filter(request.args)
search = request.args.get("searchterm", default="")
entity = request.args.get("type", default="users")
return render_template('search.html', searchterm=search, type=entity)
@app.route('/search/tweet')
def search_tweets():
search = request.args.get("searchterm", default="")
return f"Searching for '{search}' in tweets works basically"
@app.route('/search/user')
def search_users():
search = request.args.get("searchterm", default="")
hits = [r['user'] for r in graph.run("""
CALL db.index.fulltext.queryNodes('users', { search }) YIELD node AS user, score
MATCH (user:User) -[:POSTS]-> (t:Tweet) <-[:REFERS_TO]- (r:Tweet)
RETURN user, count(r) AS i
ORDER BY i DESCENDING
LIMIT 1000
""", search=search)]
return render_template('users_list.html', users=hits, search=search)
@app.route('/retweets/')
def get_retweets():
(begin, end) = filter(request.args)
sid = request.args.get("source")
tid = request.args.get("target")
result = [{ 'tweet': r['x'], 'user': r['u'] } for r in graph.run("""
MATCH (u:User{id: {sid}}) -[:POSTS]-> (x:Tweet) <-[:REFERS_TO]- (t:Tweet{type:'retweet'}) <-[:POSTS]- (v:User{id:{tid}})
WHERE t.created_at >= datetime({begin}) AND t.created_at <= datetime({end})
RETURN x, u
ORDER BY x.created_at
""", sid=sid, tid=tid, begin=begin, end=end)]
return render_template('tweet_list.html', tweets=result)
@app.route('/stats/postings')
def stats_for_postings():
(begin, end) = filter(request.args)
N = 10000
result = [(r['duration'], r['n']) for r in graph.run("""
MATCH (u:User) -[:POSTS]-> (t:Tweet)
WHERE t.created_at >= datetime({begin}) AND
t.created_at <= datetime({end})
RETURN duration.inDays(datetime({begin}), datetime({end})) AS duration, count(t) AS n
LIMIT { N }
""", begin=begin, end=end, N=N)]
r = Counter([d.days // n for d, n in result])
return jsonify({ f: n / N * 100 for f, n in r.items() })
def start(settings):
global graph
url = settings['neo4j_url']
usr = settings['neo4j_usr']
pwd = settings['neo4j_pwd']
graph = Graph(url, auth=(usr, pwd))
app.run() | PypiClean |
/kuda_cli-0.1.0-py3-none-any.whl/kuda_cli/savings.py | from typing import Optional
from pykuda2.utils import TransactionType
from typer import Typer
from kuda_cli.utils import get_kuda_wrapper, strip_raw, override_output, colorized_print
savings_app = Typer()
@savings_app.command()
@colorized_print
@override_output
@strip_raw
def create_plain_savings_account(
name: str,
tracking_reference: str,
request_reference: Optional[str] = None,
data_only: bool = False,
):
return get_kuda_wrapper().savings.create_plain_savings_account(
name=name,
tracking_reference=tracking_reference,
request_reference=request_reference,
)
@savings_app.command()
@colorized_print
@override_output
@strip_raw
def get_plain_savings_account(
tracking_reference: str,
primary_account_number: str,
request_reference: Optional[str] = None,
data_only: bool = False,
):
return get_kuda_wrapper().savings.get_plain_savings_account(
tracking_reference=tracking_reference,
primary_account_number=primary_account_number,
request_reference=request_reference,
)
@savings_app.command()
@colorized_print
@override_output
@strip_raw
def get_plain_savings_accounts(
tracking_reference: str,
request_reference: Optional[str] = None,
data_only: bool = False,
):
return get_kuda_wrapper().savings.get_plain_savings_accounts(
tracking_reference=tracking_reference, request_reference=request_reference
)
@savings_app.command()
@colorized_print
@override_output
@strip_raw
def credit_or_debit_plain_savings_account(
amount: int,
narration: str,
transaction_type: TransactionType,
tracking_reference: str,
request_reference: Optional[str] = None,
data_only: bool = False,
):
return get_kuda_wrapper().savings.credit_or_debit_plain_savings_account(
amount=amount,
narration=narration,
transaction_type=transaction_type,
tracking_reference=tracking_reference,
request_reference=request_reference,
)
@savings_app.command()
@colorized_print
@override_output
@strip_raw
def get_plain_savings_account_transactions(
page_size: int,
page_number: int,
tracking_reference: str,
request_reference: Optional[str] = None,
data_only: bool = False,
):
return get_kuda_wrapper().savings.get_plain_savings_account_transactions(
page_size=page_size,
page_number=page_number,
tracking_reference=tracking_reference,
request_reference=request_reference,
)
@savings_app.command()
@colorized_print
@override_output
@strip_raw
def create_open_flexible_savings_account(
savings_tracking_reference: str,
name: str,
virtual_account_tracking_reference: str,
amount: int,
duration: str,
frequency: str,
start_now: bool,
start_date: Optional[str],
request_reference: Optional[str] = None,
data_only: bool = False,
):
return get_kuda_wrapper().savings.create_open_flexible_savings_account(
name=name,
virtual_account_tracking_reference=virtual_account_tracking_reference,
amount=amount,
duration=duration,
frequency=frequency,
start_now=start_now,
start_date=start_date,
request_reference=request_reference,
)
@savings_app.command()
@colorized_print
@override_output
@strip_raw
def pre_create_open_flexible_savings_account(
savings_tracking_reference: str,
name: str,
virtual_account_tracking_reference: str,
amount: int,
duration: str,
frequency: str,
start_now: bool,
start_date: str,
is_interest_earning: bool,
request_reference: Optional[str] = None,
data_only: bool = False,
):
return get_kuda_wrapper().savings.pre_create_open_flexible_savings_account(
savings_tracking_reference=savings_tracking_reference,
name=name,
virtual_account_tracking_reference=virtual_account_tracking_reference,
amount=amount,
duration=duration,
frequency=frequency,
start_now=start_now,
start_date=start_date,
is_interest_earning=is_interest_earning,
request_reference=request_reference,
)
@savings_app.command()
@colorized_print
@override_output
@strip_raw
def get_open_flexible_savings_account(
tracking_reference: str,
primary_account_number: str,
request_reference: Optional[str] = None,
data_only: bool = False,
):
return get_kuda_wrapper().savings.get_open_flexible_savings_account(
tracking_reference=tracking_reference,
primary_account_number=primary_account_number,
request_reference=request_reference,
)
@savings_app.command()
@colorized_print
@override_output
@strip_raw
def get_open_flexible_savings_accounts(
tracking_reference: str,
primary_account_number: str,
request_reference: Optional[str] = None,
data_only: bool = False,
):
return get_kuda_wrapper().savings.get_open_flexible_savings_accounts(
tracking_reference=tracking_reference,
primary_account_number=primary_account_number,
request_reference=request_reference,
)
@savings_app.command()
@colorized_print
@override_output
@strip_raw
def withdrawal_from_flexible_savings_account(
amount: int,
tracking_reference: str,
request_reference: Optional[str] = None,
data_only: bool = False,
):
return get_kuda_wrapper().savings.withdrawal_from_flexible_savings_account(
amount=amount,
tracking_reference=tracking_reference,
request_reference=request_reference,
)
@savings_app.command()
@colorized_print
@override_output
@strip_raw
def get_flexible_savings_account_transactions(
tracking_reference: str,
page_size: int,
page_number: int,
request_reference: Optional[str] = None,
data_only: bool = False,
):
return get_kuda_wrapper().savings.get_flexible_savings_account_transactions(
tracking_reference=tracking_reference,
page_size=page_size,
page_number=page_number,
request_reference=request_reference,
)
@savings_app.command()
@colorized_print
@override_output
@strip_raw
def create_fixed_savings_account(
savings_tracking_reference: str,
name: str,
virtual_account_tracking_reference: str,
amount: int,
duration: str,
frequency: str,
start_now: bool,
start_date: str,
is_interest_earning: bool,
request_reference: Optional[str] = None,
data_only: bool = False,
):
return get_kuda_wrapper().savings.create_fixed_savings_account(
savings_tracking_reference=savings_tracking_reference,
name=name,
virtual_account_tracking_reference=virtual_account_tracking_reference,
amount=amount,
duration=duration,
frequency=frequency,
start_now=start_now,
start_date=start_date,
is_interest_earning=is_interest_earning,
request_reference=request_reference,
)
@savings_app.command()
@colorized_print
@override_output
@strip_raw
def get_fixed_savings_account(
tracking_reference: str,
request_reference: Optional[str] = None,
data_only: bool = False,
):
return get_kuda_wrapper().savings.get_fixed_savings_account(
tracking_reference=tracking_reference, request_reference=request_reference
)
@savings_app.command()
@colorized_print
@override_output
@strip_raw
def get_fixed_savings_accounts(
tracking_reference: str,
request_reference: Optional[str] = None,
data_only: bool = False,
):
return get_kuda_wrapper().savings.get_fixed_savings_accounts(
tracking_reference=tracking_reference, request_reference=request_reference
)
@savings_app.command()
@colorized_print
@override_output
@strip_raw
def close_fixed_savings_account(
amount: int,
tracking_reference: str,
request_reference: Optional[str] = None,
data_only: bool = False,
):
return get_kuda_wrapper().savings.close_fixed_savings_account(
amount=amount,
tracking_reference=tracking_reference,
request_reference=request_reference,
)
@savings_app.command()
@colorized_print
@override_output
@strip_raw
def get_fixed_savings_account_transactions(
tracking_reference: str,
page_number: int,
page_size: int,
request_reference: Optional[str] = None,
data_only: bool = False,
):
return get_kuda_wrapper().savings.get_fixed_savings_account_transactions(
tracking_reference=tracking_reference,
page_number=page_number,
page_size=page_size,
request_reference=request_reference,
) | PypiClean |
/faux_data-0.0.18-py3-none-any.whl/faux_data/target.py |
import abc
import logging
import os
import time
from abc import abstractmethod
from dataclasses import dataclass, field
from typing import Optional, Tuple
import pandas as pd
from .config import settings
@dataclass(kw_only=True)
class Target(abc.ABC):
"""Base class for all targets."""
target: str
@abstractmethod
def save(self, tbl):
pass
@dataclass(kw_only=True)
class PartitionedFileTarget(Target):
"""Base class for targets that create partitioned files."""
filetype: str
partition_cols: list[str] = field(default_factory=list)
@abstractmethod
def construct_path(self, partition_path=None) -> str:
pass
def pre_save_object(self, path):
pass
def save(self, tbl):
if self.partition_cols:
partitions = tbl.df.groupby(self.partition_cols)
for partition in partitions:
if len(self.partition_cols) == 1:
# 1 partition col
partition_path = f"{self.partition_cols[0]}={partition[0]}"
else:
# multiple partition cols
partition_path = '/'.join((f"{p}={v}" for p,v in zip(self.partition_cols, partition[0])))
path = self.construct_path(partition_path)
df = partition[1].drop(self.partition_cols, axis=1)
self.save_object(df, path)
else:
path = self.construct_path()
self.save_object(tbl.df, path)
def save_object(self, df, path):
self.pre_save_object(path)
logging.debug(f"saving data to {path}")
match self.filetype:
case 'csv':
df.to_csv(path, index=False)
case 'parquet':
df.to_parquet(path, index=False)
case _:
raise Exception(f"unrecognised filetype: [{self.filetype}]")
@dataclass(kw_only=True)
class CloudStorage(PartitionedFileTarget, Target):
"""
Target that creates files in cloud storage.
Supports csv and parquet `filetype`s.
Usage:
targets:
- target: CloudStorage
filetype: csv / parquet
bucket: mybucket # the cloud storage bucket to save to
prefix: my/prefix # the path prefix to give to all objects
filename: myfile.csv # the name of the file
# Optional params
partition_cols: [col1, col2] # Optional. The columns within the dataset to partition on.
If partition_cols is specified then data will be split into separate files and loaded to cloud storage
with filepaths that follow the hive partitioning structure.
e.g. If a dataset has dt and currency columns and these are specified as partition_cols
then you might expect the following files to be created:
- gs://bucket/prefix/dt=2022-01-01/currency=USD/filename
- gs://bucket/prefix/dt=2022-01-01/currency=EUR/filename
"""
bucket: str
prefix: str
filename: str
def construct_path(self, partition_path: Optional[str] = None) -> str:
"""Constructs the cloud storage path for a file."""
if partition_path:
return f"gs://{self.bucket}/{self.prefix}/{partition_path}/{self.filename}"
else:
return f"gs://{self.bucket}/{self.prefix}/{self.filename}"
@dataclass(kw_only=True)
class LocalFile(PartitionedFileTarget, Target):
"""
Target that creates files on the local file system
Supports csv and parquet `filetype`s.
Usage:
targets:
- target: LocalFile
filetype: csv / parquet
filepath: path/to/myfile # an absolute or relative base path
filename: myfile.csv # the name of the file
# Optional params
partition_cols: [col1, col2] # Optional. The columns within the dataset to partition on.
If partition_cols is specified then data will be split into separate files and
separate files / directories will be created with filepaths that follow the hive partitioning structure.
e.g. If a dataset has dt and currency columns and these are specified as partition_cols
then you might expect the following files to be created:
- filepath/dt=2022-01-01/currency=USD/filename
- filepath/dt=2022-01-01/currency=EUR/filename
"""
filepath: str
filename: str
def construct_path(self, partition_path: Optional[str] = None) -> str:
"""Constructs the filepath for a local file."""
if partition_path:
return f"{self.filepath}/{partition_path}/{self.filename}"
else:
return f"{self.filepath}/{self.filename}"
def pre_save_object(self, path: str) -> None:
"""Before saving files check and create any dirs."""
if not os.path.exists(os.path.dirname(path)):
logging.debug(f"creating dir {os.path.dirname(path)}")
os.makedirs(os.path.dirname(path), exist_ok=True)
@dataclass(kw_only=True)
class BigQuery(Target):
"""
Target that loads data to BigQuery tables.
This will create datasets / tables that don't currently exist, or load data to existing tables.
Usage:
targets:
- target: BigQuery
dataset: mydataset # the name of the dataset where the table belongs
table: mytable # the name of the table to load to
# Optional parameters
project: myproject # the GCP project where the dataset exists defaults to the system default
truncate: True # whether to clear the table before loading, defaults to False
post_generation_sql: "INSERT INTO xxx" # A query that will be run after the data has been inserted
"""
project: str | None = None
dataset: str
table: str
truncate: bool = False
post_generation_sql: str | None = None
client = None
bigquery = None
def setup(self):
"""Setup the BQ client for the target."""
from google.cloud import bigquery
self.bigquery = bigquery
if not self.project:
self.project = settings.gcp_project_id
if not self.client:
self.client = bigquery.Client(self.project)
def get_or_create_dataset(self, dataset_id: str):
"""Check whether the dataset exists or create if not."""
try:
dataset = self.client.get_dataset(dataset_id)
except Exception as e:
logging.error(e)
logging.info(f"Dataset {dataset_id} does not exist. Creating.")
dataset = self.bigquery.Dataset(dataset_id)
dataset.location = 'europe-west2'
dataset = self.client.create_dataset(dataset)
return dataset
def save(self, tbl):
"""The save method is called when this target is executed."""
self.setup()
dataset_id = f"{self.project}.{self.dataset}"
schema_table = f"{self.project}.{self.dataset}.{self.table}"
dataset = self.get_or_create_dataset(dataset_id)
job_config = None
if self.truncate:
job_config = self.bigquery.LoadJobConfig(
write_disposition=self.bigquery.WriteDisposition.WRITE_TRUNCATE)
logging.info(f"Uploading {tbl.name} data to {schema_table}")
result = self.client.load_table_from_dataframe(
tbl.df, schema_table, job_config=job_config).result()
if self.post_generation_sql and result.state == "DONE":
self.client.query(self.post_generation_sql.format(t=self),
project=self.project).result()
logging.info(
f"Result: {result.state} {result.output_rows} rows written to {result.destination}"
)
@dataclass(kw_only=True)
class StreamingTarget(Target):
"""Base class for targets that send data to streaming systems."""
@abstractmethod
def process_row(self, row, row_attrs):
pass
@abstractmethod
def setup(self):
pass
@abstractmethod
def save(self, tbl):
pass
@dataclass(kw_only=True)
class Pubsub(StreamingTarget, Target):
"""
Target that publishes data to Pubsub.
This target converts the data into json format and publishes each row as a separate pubsub message.
It expects the topic to already exist.
Usage:
targets:
- target: Pubsub
topic: mytopic # the name of the topic
# Optional parameters
project: myproject # the GCP project where the topic exists defaults to the system default
output_cols: [col1, col2] # the columns to convert to json and use for the message body
attribute_cols: [col3, col4] # the columns to pass as pubsub message attributes, these columns will be removed from the message body unless they are also specified in the output_cols
attributes: # additional attributes to add to the pbsub messages
key1: value1
key2: value2
delay: 0.01 # the time in seconds to wait between each publish, default is 0.01
date_format: iso # how timestamp fields should be formatted in the json eith iso or epoch
time_unit: s # the resolution to use for timestamps, s, ms, us etc.
"""
topic: str
project: Optional[str] = None
output_cols: list[str] = field(default_factory=list)
attribute_cols: list[str] = field(default_factory=list)
attributes: dict[str,str] = field(default_factory=dict)
delay: float = 0.01
date_format: str = 'iso' # or epoch
time_unit: str = 'ms'
validate_first: bool = True
client = None
def __post_init__(self):
if not self.project:
self.project = settings.gcp_project_id
@property
def topic_path(self):
return f"projects/{self.project}/topics/{self.topic}"
def setup(self):
from google.cloud import pubsub_v1
if not self.client:
self.client = pubsub_v1.PublisherClient()
def process_row(self, row, row_attrs):
return self.client.publish(self.topic_path, row.encode(), **row_attrs, **self.attributes)
def process_df(self, df) -> Tuple[pd.DataFrame, Optional[pd.DataFrame]]:
if self.attribute_cols:
attributes_df = df[self.attribute_cols].astype('string')
else:
attributes_df = None
if self.output_cols:
data_df = df[self.output_cols]
else:
data_df = df.drop(self.attribute_cols, axis=1)
return data_df, attributes_df
def save(self, tbl):
self.setup()
data_df, attributes_df = self.process_df(tbl.df)
json_data = data_df.to_json(
orient='records',
lines=True,
date_format=self.date_format,
date_unit=self.time_unit).strip().split("\n")
for i, row in enumerate(json_data):
if attributes_df is not None:
row_attrs = attributes_df.iloc[i].to_dict()
else:
row_attrs = {}
if self.validate_first:
res = self.process_row(row, row_attrs)
logging.info(f"publishing first message to topic [{self.topic_path}]" \
f" with data: [{row}]" \
f" and attributes: [{row_attrs}]" \
f"message_id: {res.result()}")
self.validate_first = False
else:
res = self.process_row(row, row_attrs)
if self.delay > 0:
time.sleep(self.delay) | PypiClean |
/django-cms_wg-3.0.0.beta2.tar.gz/django-cms_wg-3.0.0.beta2/cms/plugins/picture/models.py | from cms.utils.compat.dj import python_2_unicode_compatible
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.core.exceptions import ValidationError
from cms.models import CMSPlugin, Page
from os.path import basename
@python_2_unicode_compatible
class Picture(CMSPlugin):
"""
A Picture with or without a link.
"""
LEFT = "left"
RIGHT = "right"
CENTER = "center"
FLOAT_CHOICES = ((LEFT, _("left")),
(RIGHT, _("right")),
(CENTER, _("center")),
)
image = models.ImageField(_("image"), upload_to=CMSPlugin.get_media_path)
url = models.CharField(_("link"), max_length=255, blank=True, null=True,
help_text=_("If present, clicking on image will take user to link."))
page_link = models.ForeignKey(Page, verbose_name=_("page"), null=True,
limit_choices_to={'publisher_is_draft': True},
blank=True, help_text=_("If present, clicking on image will take user \
to specified page."))
alt = models.CharField(_("alternate text"), max_length=255, blank=True,
null=True, help_text=_("Specifies an alternate text for an image, if \
the image cannot be displayed.<br />Is also used by search engines to \
classify the image."))
longdesc = models.CharField(_("long description"), max_length=255,
blank=True, null=True, help_text=_("When user hovers above picture,\
this text will appear in a popup."))
float = models.CharField(_("side"), max_length=10, blank=True, null=True,
choices=FLOAT_CHOICES, help_text=_("Move image left, right or center."))
def __str__(self):
if self.alt:
return self.alt[:40]
elif self.image:
# added if, because it raised attribute error when file wasn't
# defined.
try:
return u"%s" % basename(self.image.path)
except:
pass
return "<empty>"
def clean(self):
if self.url and self.page_link:
raise ValidationError(
_("You can enter a Link or a Page, but not both.")) | PypiClean |
/msl-io-0.1.0.tar.gz/msl-io-0.1.0/msl/io/readers/spreadsheet.py | import re
import string
_cell_regex = re.compile(r'^([A-Z]+)(\d*)$')
class Spreadsheet(object):
def __init__(self, file):
"""Generic class for spreadsheets.
Parameters
----------
file : :class:`str`
The location of the spreadsheet on a local hard drive or on a network.
"""
self._file = file
@property
def file(self):
""":class:`str`: The location of the spreadsheet on a local hard drive or on a network."""
return self._file
def read(self, cell=None, sheet=None, as_datetime=True):
"""Read values from the spreadsheet.
Parameters
----------
cell : :class:`str`, optional
The cell(s) to read. For example, ``C9`` will return a single value
and ``C9:G20`` will return all values in the specified range. If not
specified then returns all values in the specified `sheet`.
sheet : :class:`str`, optional
The name of the sheet to read the value(s) from. If there is only
one sheet in the spreadsheet then you do not need to specify the name
of the sheet.
as_datetime : :class:`bool`, optional
Whether dates should be returned as :class:`~datetime.datetime` or
:class:`~datetime.date` objects. If :data:`False` then dates are
returned as a string.
Returns
-------
The value(s) of the requested cell(s).
"""
raise NotImplementedError
def sheet_names(self):
"""Get the names of all sheets in the spreadsheet.
Returns
-------
:class:`tuple` of :class:`str`
The names of all sheets.
"""
raise NotImplementedError
@staticmethod
def to_letters(index):
"""Convert a column index to column letters.
Parameters
----------
index : :class:`int`
The column index (zero based).
Returns
-------
:class:`str`
The corresponding spreadsheet column letter(s).
Examples
--------
.. invisible-code-block: pycon
>>> from msl.io.readers.spreadsheet import Spreadsheet
>>> to_letters = Spreadsheet.to_letters
>>> to_letters(0)
'A'
>>> to_letters(1)
'B'
>>> to_letters(26)
'AA'
>>> to_letters(702)
'AAA'
>>> to_letters(494264)
'ABCDE'
"""
letters = []
uppercase = string.ascii_uppercase
while index >= 0:
div, mod = divmod(index, 26)
letters.append(uppercase[mod])
index = div - 1
return ''.join(letters[::-1])
@staticmethod
def to_indices(cell):
"""Convert a string representation of a cell to row and column indices.
Parameters
----------
cell : :class:`str`
The cell. Can be letters only (a column) or letters and a number
(a column and a row).
Returns
-------
:class:`tuple`
The (row_index, column_index). If `cell` does not contain a row number
then the row index is :data:`None`. The row and column index are zero based.
Examples
--------
.. invisible-code-block: pycon
>>> from msl.io.readers.spreadsheet import Spreadsheet
>>> to_indices = Spreadsheet.to_indices
>>> to_indices('A')
(None, 0)
>>> to_indices('A1')
(0, 0)
>>> to_indices('AA10')
(9, 26)
>>> to_indices('AAA111')
(110, 702)
>>> to_indices('MSL123456')
(123455, 9293)
>>> to_indices('BIPM')
(None, 41664)
"""
match = _cell_regex.match(cell)
if not match:
raise ValueError('Invalid cell {!r}'.format(cell))
letters, numbers = match.groups()
row = max(0, int(numbers) - 1) if numbers else None
uppercase = string.ascii_uppercase
col = sum(
(26**i) * (1+uppercase.index(c))
for i, c in enumerate(letters[::-1])
)
return row, col-1
@staticmethod
def to_slices(cells, row_step=None, column_step=None):
"""Convert a range of cells to slices of row and column indices.
Parameters
----------
cells : :class:`str`
The cells. Can be letters only (a column) or letters and a number
(a column and a row).
row_step : :class:`int`, optional
The step-by value for the row slice.
column_step : :class:`int`, optional
The step-by value for the column slice.
Returns
-------
:class:`slice`
The row slice.
:class:`slice`
The column slice.
Examples
--------
.. invisible-code-block: pycon
>>> from msl.io.readers.spreadsheet import Spreadsheet
>>> to_slices = Spreadsheet.to_slices
>>> to_slices('A:A')
(slice(0, None, None), slice(0, 1, None))
>>> to_slices('A:H')
(slice(0, None, None), slice(0, 8, None))
>>> to_slices('B2:M10')
(slice(1, 10, None), slice(1, 13, None))
>>> to_slices('A5:M100', row_step=2, column_step=4)
(slice(4, 100, 2), slice(0, 13, 4))
"""
split = cells.split(':')
if len(split) != 2:
raise ValueError('Invalid cell range {!r}'.format(cells))
r1, c1 = Spreadsheet.to_indices(split[0])
r2, c2 = Spreadsheet.to_indices(split[1])
if r1 is None:
r1 = 0
if r2 is not None:
r2 += 1
if c2 is not None:
c2 += 1
return slice(r1, r2, row_step), slice(c1, c2, column_step) | PypiClean |
/jupyterlab_remote_contents-0.1.1.tar.gz/jupyterlab_remote_contents-0.1.1/node_modules/url-parse/dist/url-parse.min.js | !function(e){"object"==typeof exports&&"undefined"!=typeof module?module.exports=e():"function"==typeof define&&define.amd?define([],e):("undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:this).URLParse=e()}(function(){return function n(r,s,a){function i(o,e){if(!s[o]){if(!r[o]){var t="function"==typeof require&&require;if(!e&&t)return t(o,!0);if(p)return p(o,!0);throw(e=new Error("Cannot find module '"+o+"'")).code="MODULE_NOT_FOUND",e}t=s[o]={exports:{}},r[o][0].call(t.exports,function(e){return i(r[o][1][e]||e)},t,t.exports,n,r,s,a)}return s[o].exports}for(var p="function"==typeof require&&require,e=0;e<a.length;e++)i(a[e]);return i}({1:[function(e,t,o){!function(a){!function(){"use strict";var f=e("requires-port"),h=e("querystringify"),o=/^[\x00-\x20\u00a0\u1680\u2000-\u200a\u2028\u2029\u202f\u205f\u3000\ufeff]+/,d=/[\n\r\t]/g,s=/^[A-Za-z][A-Za-z0-9+-.]*:\/\//,i=/:\d+$/,p=/^([a-z][a-z0-9.+-]*:)?(\/\/)?([\\/]+)?([\S\s]*)/i,v=/^[a-zA-Z]:/;function m(e){return(e||"").toString().replace(o,"")}var w=[["#","hash"],["?","query"],function(e,o){return g(o.protocol)?e.replace(/\\/g,"/"):e},["/","pathname"],["@","auth",1],[NaN,"host",void 0,1,1],[/:(\d*)$/,"port",void 0,1],[NaN,"hostname",void 0,1,1]],r={hash:1,query:1};function y(e){var o,t="undefined"!=typeof window?window:void 0!==a?a:"undefined"!=typeof self?self:{},t=t.location||{},n={},t=typeof(e=e||t);if("blob:"===e.protocol)n=new C(unescape(e.pathname),{});else if("string"==t)for(o in n=new C(e,{}),r)delete n[o];else if("object"==t){for(o in e)o in r||(n[o]=e[o]);void 0===n.slashes&&(n.slashes=s.test(e.href))}return n}function g(e){return"file:"===e||"ftp:"===e||"http:"===e||"https:"===e||"ws:"===e||"wss:"===e}function b(e,o){e=(e=m(e)).replace(d,""),o=o||{};var t,e=p.exec(e),n=e[1]?e[1].toLowerCase():"",r=!!e[2],s=!!e[3],a=0;return r?a=s?(t=e[2]+e[3]+e[4],e[2].length+e[3].length):(t=e[2]+e[4],e[2].length):s?(t=e[3]+e[4],a=e[3].length):t=e[4],"file:"===n?2<=a&&(t=t.slice(2)):g(n)?t=e[4]:n?r&&(t=t.slice(2)):2<=a&&g(o.protocol)&&(t=e[4]),{protocol:n,slashes:r||g(n),slashesCount:a,rest:t}}function C(e,o,t){if(e=(e=m(e)).replace(d,""),!(this instanceof C))return new C(e,o,t);var n,r,s,a,i,u=w.slice(),p=typeof o,c=this,l=0;for("object"!=p&&"string"!=p&&(t=o,o=null),t&&"function"!=typeof t&&(t=h.parse),n=!(p=b(e||"",o=y(o))).protocol&&!p.slashes,c.slashes=p.slashes||n&&o.slashes,c.protocol=p.protocol||o.protocol||"",e=p.rest,("file:"===p.protocol&&(2!==p.slashesCount||v.test(e))||!p.slashes&&(p.protocol||p.slashesCount<2||!g(c.protocol)))&&(u[3]=[/(.*)/,"pathname"]);l<u.length;l++)"function"!=typeof(s=u[l])?(r=s[0],i=s[1],r!=r?c[i]=e:"string"==typeof r?~(a="@"===r?e.lastIndexOf(r):e.indexOf(r))&&(e="number"==typeof s[2]?(c[i]=e.slice(0,a),e.slice(a+s[2])):(c[i]=e.slice(a),e.slice(0,a))):(a=r.exec(e))&&(c[i]=a[1],e=e.slice(0,a.index)),c[i]=c[i]||n&&s[3]&&o[i]||"",s[4]&&(c[i]=c[i].toLowerCase())):e=s(e,c);t&&(c.query=t(c.query)),n&&o.slashes&&"/"!==c.pathname.charAt(0)&&(""!==c.pathname||""!==o.pathname)&&(c.pathname=function(e,o){if(""===e)return o;for(var t=(o||"/").split("/").slice(0,-1).concat(e.split("/")),n=t.length,o=t[n-1],r=!1,s=0;n--;)"."===t[n]?t.splice(n,1):".."===t[n]?(t.splice(n,1),s++):s&&(0===n&&(r=!0),t.splice(n,1),s--);return r&&t.unshift(""),"."!==o&&".."!==o||t.push(""),t.join("/")}(c.pathname,o.pathname)),"/"!==c.pathname.charAt(0)&&g(c.protocol)&&(c.pathname="/"+c.pathname),f(c.port,c.protocol)||(c.host=c.hostname,c.port=""),c.username=c.password="",c.auth&&(~(a=c.auth.indexOf(":"))?(c.username=c.auth.slice(0,a),c.username=encodeURIComponent(decodeURIComponent(c.username)),c.password=c.auth.slice(a+1),c.password=encodeURIComponent(decodeURIComponent(c.password))):c.username=encodeURIComponent(decodeURIComponent(c.auth)),c.auth=c.password?c.username+":"+c.password:c.username),c.origin="file:"!==c.protocol&&g(c.protocol)&&c.host?c.protocol+"//"+c.host:"null",c.href=c.toString()}C.prototype={set:function(e,o,t){var n=this;switch(e){case"query":"string"==typeof o&&o.length&&(o=(t||h.parse)(o)),n[e]=o;break;case"port":n[e]=o,f(o,n.protocol)?o&&(n.host=n.hostname+":"+o):(n.host=n.hostname,n[e]="");break;case"hostname":n[e]=o,n.port&&(o+=":"+n.port),n.host=o;break;case"host":n[e]=o,i.test(o)?(o=o.split(":"),n.port=o.pop(),n.hostname=o.join(":")):(n.hostname=o,n.port="");break;case"protocol":n.protocol=o.toLowerCase(),n.slashes=!t;break;case"pathname":case"hash":o?(r="pathname"===e?"/":"#",n[e]=o.charAt(0)!==r?r+o:o):n[e]=o;break;case"username":case"password":n[e]=encodeURIComponent(o);break;case"auth":var r=o.indexOf(":");~r?(n.username=o.slice(0,r),n.username=encodeURIComponent(decodeURIComponent(n.username)),n.password=o.slice(r+1),n.password=encodeURIComponent(decodeURIComponent(n.password))):n.username=encodeURIComponent(decodeURIComponent(o))}for(var s=0;s<w.length;s++){var a=w[s];a[4]&&(n[a[1]]=n[a[1]].toLowerCase())}return n.auth=n.password?n.username+":"+n.password:n.username,n.origin="file:"!==n.protocol&&g(n.protocol)&&n.host?n.protocol+"//"+n.host:"null",n.href=n.toString(),n},toString:function(e){e&&"function"==typeof e||(e=h.stringify);var o=this,t=o.host,n=((n=o.protocol)&&":"!==n.charAt(n.length-1)&&(n+=":"),n+(o.protocol&&o.slashes||g(o.protocol)?"//":""));return o.username?(n+=o.username,o.password&&(n+=":"+o.password),n+="@"):o.password?n=n+(":"+o.password)+"@":"file:"!==o.protocol&&g(o.protocol)&&!t&&"/"!==o.pathname&&(n+="@"),(":"===t[t.length-1]||i.test(o.hostname)&&!o.port)&&(t+=":"),n+=t+o.pathname,(t="object"==typeof o.query?e(o.query):o.query)&&(n+="?"!==t.charAt(0)?"?"+t:t),o.hash&&(n+=o.hash),n}},C.extractProtocol=b,C.location=y,C.trimLeft=m,C.qs=h,t.exports=C}.call(this)}.call(this,"undefined"!=typeof global?global:"undefined"!=typeof self?self:"undefined"!=typeof window?window:{})},{querystringify:2,"requires-port":3}],2:[function(e,o,t){"use strict";var s=Object.prototype.hasOwnProperty;function a(e){try{return decodeURIComponent(e.replace(/\+/g," "))}catch(e){return null}}function i(e){try{return encodeURIComponent(e)}catch(e){return null}}t.stringify=function(e,o){var t,n,r=[];for(n in"string"!=typeof(o=o||"")&&(o="?"),e)s.call(e,n)&&((t=e[n])||null!=t&&!isNaN(t)||(t=""),n=i(n),t=i(t),null!==n&&null!==t&&r.push(n+"="+t));return r.length?o+r.join("&"):""},t.parse=function(e){for(var o=/([^=?#&]+)=?([^&]*)/g,t={};r=o.exec(e);){var n=a(r[1]),r=a(r[2]);null===n||null===r||n in t||(t[n]=r)}return t}},{}],3:[function(e,o,t){"use strict";o.exports=function(e,o){if(o=o.split(":")[0],!(e=+e))return!1;switch(o){case"http":case"ws":return 80!==e;case"https":case"wss":return 443!==e;case"ftp":return 21!==e;case"gopher":return 70!==e;case"file":return!1}return 0!==e}},{}]},{},[1])(1)}); | PypiClean |
/cloudforet-console-api-v2-1.11.0.2.tar.gz/cloudforet-console-api-v2-1.11.0.2/cloudforet/console_api_v2/model/cost_analysis/cost.py | from pydantic import Field
from typing import Union, List
from datetime import datetime
from cloudforet.console_api_v2.model import BaseAPIModel
class CreateCostRequest(BaseAPIModel):
pass
class CostRequest(BaseAPIModel):
pass
class GetCostRequest(BaseAPIModel):
pass
class CostQuery(BaseAPIModel):
cost_id: Union[str, None] = Field(None)
original_currency: Union[str, None] = Field(None)
provider: Union[str, None] = Field(None)
region_code: Union[str, None] = Field(None)
region_key: Union[str, None] = Field(None)
category: Union[str, None] = Field(None)
product: Union[str, None] = Field(None)
account: Union[str, None] = Field(None)
usage_type: Union[str, None] = Field(None)
resource_group: Union[str, None] = Field(None)
resource: Union[str, None] = Field(None)
service_account_id: Union[str, None] = Field(None)
project_id: Union[str, None] = Field(None)
project_group_id: Union[str, None] = Field(None)
data_source_id: Union[str, None] = Field(None)
query: Union[dict, None] = Field(None)
domain_id: Union[str, None] = Field(None)
class CostInfo(BaseAPIModel):
cost_id: Union[str, None] = Field(None)
usd_cost: Union[str, None] = Field(None)
original_currency: Union[str, None] = Field(None)
original_cost: Union[float, None] = Field(None)
usage_quantity: Union[float, None] = Field(None)
provider: Union[str, None] = Field(None)
region_code: Union[str, None] = Field(None)
region_key: Union[str, None] = Field(None)
category: Union[str, None] = Field(None)
product: Union[str, None] = Field(None)
account: Union[str, None] = Field(None)
usage_type: Union[str, None] = Field(None)
resource_group: Union[str, None] = Field(None)
resource: Union[str, None] = Field(None)
tags: Union[dict, None] = Field(None)
additional_info: Union[dict, None] = Field(None)
service_account_id: Union[str, None] = Field(None)
project_id: Union[str, None] = Field(None)
data_source_id: Union[str, None] = Field(None)
domain_id: Union[str, None] = Field(None)
billed_at: Union[datetime, None] = Field(None)
created_at: Union[datetime, None] = Field(None)
class CostsInfo(BaseAPIModel):
results: Union[List[CostInfo], None] = Field(None)
total_count: Union[int, None] = Field(None)
class CostAnalyzeV2Query(BaseAPIModel):
query: dict = Field(None)
domain_id: Union[str, None] = Field(None)
class CostAnalyzeInfo(BaseAPIModel):
results: Union[list, None] = Field(None)
more: Union[bool, None] = Field(None)
class CostStatQuery(BaseAPIModel):
pass | PypiClean |
/sas-esppy-7.1.16.tar.gz/sas-esppy-7.1.16/esppy/templates/template.py | from __future__ import print_function, division, absolute_import, unicode_literals
import collections
import os
import re
import requests
import six
import warnings
from six.moves import urllib
from ..base import ESPObject, attribute
from ..config import get_option
from ..mas import MASModule
from ..windows import Target, BaseWindow, get_window_class, CalculateWindow, TrainWindow, ScoreWindow, ModelReaderWindow
from ..utils import xml
from ..utils.data import gen_name
from ..utils.notebook import scale_svg
class WindowDict(collections.abc.MutableMapping):
'''
Dictionary for holding window objects
Attributes
----------
project : string
The name of the project
contquery : string
The name of the continuous query
template : string
The name of the template
session : requests.Session
The session for the windows
Parameters
----------
*args : one-or-more arguments, optional
Positional arguments to MutableMapping
**kwargs : keyword arguments, optional
Keyword arguments to MutableMapping
'''
def __init__(self, template, *args, **kwargs):
collections.abc.MutableMapping.__init__(self, *args, **kwargs)
self._data = dict()
self.template = template
self.project = None
self.project_handle = None
self.contquery = None
self.session = None
@property
def session(self):
'''
The session for the windows
Returns
-------
string
'''
return self._session
@session.setter
def session(self, value):
self._session = value
for item in self._data.values():
item.session = self._session
@property
def project(self):
'''
The project that windows are associated with
Returns
-------
string
'''
return self._project
@project.setter
def project(self, value):
self._project = getattr(value, 'name', value)
for item in self._data.values():
item.project = self._project
@property
def contquery(self):
'''
The continuous query that windows are associated with
Returns
-------
string
'''
return self._contquery
@contquery.setter
def contquery(self, value):
self._contquery = getattr(value, 'name', value)
for item in self._data.values():
item.contquery = self._contquery
if hasattr(value, 'project'):
self.project = value.project
@property
def template(self):
'''
The tempalte that windows are associated with
Returns
-------
string
'''
return self._template
@template.setter
def template(self, value):
self._template = value
for item in self._data.values():
item.template = self._template
if hasattr(value, 'contquery'):
self.contquery = value.contquery
if hasattr(value, 'project'):
self.project = value.project
def __getitem__(self, key):
return self._data[key]
def __setitem__(self, key, value):
if not isinstance(value, BaseWindow):
raise TypeError('Only Window objects can be values '
'in a template')
if key in self._data.keys():
oldkey = key
suffix = 0
while key in self._data.keys():
key = key.strip(str(suffix))
suffix += 1
key = key + str(suffix)
warnings.warn('%s already exists in Template %s, renamed to %s' % (oldkey, self.template.name, key),
Warning)
value._register_to_project(self.template)
oldname = value.name
value.base_name = key
value.project = self.project
value.contquery = self.contquery
value.template = self.template
value.session = self.session
self._data[key] = value
# Make sure targets get updated with new name
if oldname != value.name:
for window in self._data.values():
for target in set(window.targets):
if target.name == oldname:
role, slot = target.role, target.slot
window.targets.discard(target)
window.add_target(value, role=role, slot=slot)
def __delitem__(self, key):
del self._data[key]
for window in self._data.values():
try:
window.delete_target(key)
except ValueError:
pass
def __iter__(self):
return iter(self._data)
def __len__(self):
return len(self._data)
def __str__(self):
return str(self._data)
def __repr__(self):
return repr(self._data)
class Template(ESPObject, collections.abc.MutableMapping):
'''
ESP Template
Parameters
----------
name : string
Name of the continuous query
trace : string, optional
One or more space-separated window names or IDs
index_type : string, optional
A default index type for all windows in the template that
do not explicitly specify an index type
Valid values: 'rbtree', 'hash', 'ln_hash', 'cl_hash', 'fw_hash', 'empty'
timing_threshold : int, optional
When a window in the template takes more than value microseconds to
compute for a given event or event block, a warning message is logged
include_singletons : bool, optional
Specify whether to add unattached source windows
description : string, optional
Description of the template
Attributes
----------
project : string or Project
Name of the project the template is associated with
contquery : string or ContinuousQuerry
The name of the continuous query
windows : dict
Collection of windows in the template
metadata : dict
Metadata dictionary
url : string
URL of the template
Notes
-----
All parameters are also available as instance attributes.
Returns
-------
:class:`Template`
'''
trace = attribute('trace', dtype='string')
index_type = attribute('index', dtype='string',
values={'rbtree': 'pi_RBTREE', 'hash': 'pi_HASH',
'ln_hash': 'pi_LN_HASH', 'cl_hash': 'pi_CL_HASH',
'fw_hash': 'pi_FW_HASH', 'empty': 'pi_EMPTY'})
timing_threshold = attribute('timing-threshold', dtype='int')
include_singletons = attribute('include-singletons', dtype='bool')
file_dir = os.path.split(__file__)[0]
template_list = collections.defaultdict(dict)
def __init__(self, name, trace=None, index_type=None,
timing_threshold=None, include_singletons=None, description=None):
self.windows = WindowDict(self)
ESPObject.__init__(self, attrs=locals())
self.project = None
self.contquery = None
self.tag = None
self.name = name or gen_name(prefix='tp_')
self.description = description
self.mas_modules = []
self.metadata = {}
self.input_windows = list()
self.output_windows = list()
self.required_parameter_map = collections.defaultdict(list)
@property
def session(self):
'''
The requests.Session object for the continuous query
Returns
-------
string
'''
return ESPObject.session.fget(self)
@session.setter
def session(self, value):
ESPObject.session.fset(self, value)
self.windows.session = value
@property
def name(self):
'''
The name of the template
Returns
-------
string
'''
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def contquery(self):
'''
The name of the continuous query
Returns
-------
string
'''
return self._contquery
@contquery.setter
def contquery(self, value):
self._contquery = getattr(value, 'name', value)
self.windows.contquery = self._contquery
@property
def project(self):
'''
The name of the project
Returns
-------
string
'''
return self._project
@project.setter
def project(self, value):
self._project = getattr(value, 'name', value)
self.windows.project = self._project
@property
def input_windows(self):
return ','.join(self._input_windows)
@input_windows.setter
def input_windows(self, value):
if not isinstance(value, list):
raise TypeError('Please use a list to set input_windows')
self._input_windows = list()
for window in value:
self.add_input_windows(window)
def _register_to_project(self, project_handle=None):
pass
def add_input_windows(self, *windows):
'''
Add input_windows
Parameters
----------
windows : one-or-more-Windows
The Window objects to add as input_windows for Template
'''
for window in windows:
base_name = getattr(window, 'base_name', window)
if not base_name or base_name not in self.windows:
raise KeyError('%s is not a one of Template %s' %
(base_name, self.name))
elif base_name in self._input_windows:
print('%s is already a one of input_windows' % base_name)
else:
self._input_windows.append(base_name)
return self._input_windows
def delete_input_windows(self, *windows):
'''
Delete input_windows
Parameters
----------
windows : one-or-more-Windows
The Window objects to deleted from input_windows
'''
for window in windows:
base_name = getattr(window, 'base_name', window)
if base_name not in self.windows:
raise KeyError('%s is not a one of Template %s' %
(base_name, self.name))
elif base_name not in self._input_windows:
print('%s is not a one of input_windows' % base_name)
else:
self._input_windows.remove(base_name)
return self._input_windows
@property
def output_windows(self):
return ','.join(self._output_windows)
@output_windows.setter
def output_windows(self, value):
if not isinstance(value, list):
raise TypeError('Please use a list to set output_windows')
self._output_windows = list()
for window in value:
self.add_output_windows(window)
def add_output_windows(self, *windows):
'''
Add output_windows
Parameters
----------
windows : one-or-more-Windows
The Window objects to add as output_windows for Template
'''
for window in set(windows):
base_name = getattr(window, 'base_name', window)
if base_name not in self.windows:
raise KeyError('%s is not a one of Template %s' %
(base_name, self.name))
elif base_name in self._output_windows:
print('%s is already a one of output_windows' % base_name)
else:
self._output_windows.append(base_name)
return self._output_windows
def delete_output_windows(self, *windows):
'''
Delete output_windows
Parameters
----------
windows : one-or-more-Windows
The Window objects to deleted from output_windows
'''
for window in set(windows):
base_name = getattr(window, 'base_name', window)
if base_name not in self.windows:
raise KeyError('%s is not a one of Template %s' %
(base_name, self.name))
elif base_name not in self._output_windows:
print('%s is not a one of input_windows' % base_name)
else:
self._output_windows.remove(base_name)
return self._output_windows
def create_mas_module(self, language, module, func_names, mas_store=None,
mas_store_version=None, description=None,
code_file=None, code=None):
'''
Create a MAS module object
Parameters
----------
language : string
The name of the programming language
module : string, optional
Name of the MAS module
func_names : string or list-of-strings, optional
The function names exported by the module
Returns
-------
:class:`MASModule`
'''
out = MASModule(language, module, func_names, mas_store=mas_store,
mas_store_version=mas_store_version,
description=description, code_file=code_file,
code=code)
out.project = self.project
out.session = self.session
return out
def set_parameters(self, window, **parameters):
'''
Set parameters
Parameters
----------
window : Window
The Window object to set parameters
**parameters : keyword-arguments, optional
The parameters to set
'''
base_name = getattr(window, 'base_name', window)
try:
window = self.windows[base_name]
except KeyError:
raise KeyError('%s is not a one of Template %s' %
(base_name, self.name))
if not isinstance(window, (CalculateWindow, TrainWindow, ModelReaderWindow)):
raise TypeError('Only CalculationWindow and TrainWindow objects support the method')
return window.set_parameters(**parameters)
def set_inputs(self, window=None, model=None, **input_map):
'''
Set inputs
Parameters
----------
window : Window, optional
The Window object to set inputs, default value is None
model : string
The name / URL of the model
**input_map : keyword-arguments, optional
The parameters to set
'''
if window is None:
try:
window = self._input_windows[0]
print("INFO: window is not specified, default for first input window %s" % window)
except IndexError:
raise IndexError("Please specify input_windows for Template %s first" % self.name)
base_name = getattr(window, 'base_name', window)
try:
window = self.windows[base_name]
except KeyError:
raise ValueError('%s is not a one of Template %s' %
(base_name, self.name))
if isinstance(window, (TrainWindow, CalculateWindow)):
return window.set_inputs(**input_map)
elif isinstance(window, ScoreWindow):
return window.set_inputs(model, **input_map)
else:
raise TypeError('Only CalculationWindow, TrainWindow and ScoreWindow objects support the method')
def set_outputs(self, window=None, model=None, **output_map):
'''
Set outputs
Parameters
----------
window : Window, optional
The Window object to set outputs, default value is None
model : string
The name / URL of the model
**output_map : keyword-arguments, optional
The parameters to set
'''
if window is None:
try:
window = self._output_windows[0]
print("INFO: window is not specified, default for first output window %s" % window)
except IndexError:
raise IndexError("Please specify output_windows for Template %s first" % self.name)
base_name = getattr(window, 'base_name', window)
try:
window = self.windows[base_name]
except KeyError:
raise ValueError('%s is not a one of Template %s' %
(base_name, self.name))
if isinstance(window, (CalculateWindow, ScoreWindow)):
return window.set_outputs(**output_map)
elif isinstance(window, ScoreWindow):
return window.set_outputs(model, **output_map)
else:
raise TypeError('Only CalculationWindow and ScoreWindow objects support the method')
def set_mas_window_map(self, window, **mas_map):
'''
Set outputs
Parameters
----------
window : Window, optional
The Window object to set mas_map, default value is None
**mas_map : keyword-arguments, optional
The parameters to set
'''
base_name = getattr(window, 'base_name', window)
try:
window = self.windows[base_name]
except KeyError:
raise ValueError('%s is not a one of Template %s' %
(base_name, self.name))
return window.update_mas_window_map(old_key=None, **mas_map)
def add_target(self, obj, **kwargs):
'''
Add target for Template
Parameters
----------
obj : Window or Template
The Window or Template object to add as targets
role : string, optional
The role of the connection
slot : string, optional
Indicates the slot number to use from the splitting
function for the window.
Returns
-------
``self``
'''
try:
window_name = self._output_windows[0]
except IndexError:
raise IndexError("Please specify output_windows for Template %s first" % self.name)
window = self.windows[window_name]
if isinstance(obj, BaseWindow):
window.add_targets(obj, **kwargs)
elif isinstance(obj, Template):
try:
target_window_name = obj._input_windows[0]
except IndexError:
raise IndexError("Please specify input_windows for Template %s first" % obj.name)
window.add_targets(obj.windows[target_window_name], **kwargs)
def delete_target(self, *objs):
'''
Delete targets for Template
Parameters
----------
obj : Window or Template
The Window or Template object to deleted from targets
Returns
-------
``self``
'''
try:
window_name = self._output_windows[0]
except IndexError:
raise IndexError("There is no output_windows for Template %s" % self.name)
window = self.windows[window_name]
target_set = set(target.name for target in window.targets)
for obj in objs:
if isinstance(obj, BaseWindow):
window.delete_targets(obj)
elif isinstance(obj, Template):
for possible_target in obj._input_windows:
if obj.windows[possible_target].name in target_set:
window.delete_targets(obj.windows[possible_target])
def add_window(self, window):
'''
Add a window to the template
Parameters
----------
window : Window
The Window object to add
Returns
-------
:class:`Window`
'''
if not window.base_name:
window.base_name = gen_name(prefix='w_')
self.windows[window.base_name] = window
return window
def add_windows(self, *windows):
'''
Add one or more windows to the template
Parameters
----------
windows : one-or-more-Windows
The Window objects to add
Returns
-------
tuple of :class:`Window`s
'''
for item in windows:
self.add_window(item)
return windows
def import_template(self, template, internal_only=True):
'''
import a template object
Parameters
----------
template : Template
A Template object to be imported to current template
internal_only: bool, optional
Only includes the internal edges or not, default value is True
Returns
-------
:class:`Template`
'''
if template is self:
warnings.warn('You are importing the self template.', Warning)
return self.import_template(self.copy(None), internal_only=internal_only)
ref_dict = {}
for key, window in sorted(six.iteritems(template.windows)):
copied_win = window.copy(deep=True)
self.windows[key] = copied_win
ref_dict[key] = copied_win.base_name
for old_name, new_name in ref_dict.items():
copied_win = self.windows[new_name]
win = template.windows[old_name]
copied_win.targets = set()
for target in set(win.targets):
if target.base_name in ref_dict:
copied_win.targets.add(Target(name=ref_dict[target.base_name], template=self,
role=target.role, slot=target.slot))
elif not internal_only:
copied_win.targets.add(target)
return
def copy(self, name, deep=True, internal_only=True):
'''
Return a copy of the template
Parameters
----------
name : string
Name of the copied template
deep : bool, optional
Copy the sub-objects or not, default value is True
internal_only: bool, optional
Only includes the internal edges or not, default value is True
Returns
-------
:class:`Template`
'''
out = type(self)(name)
out.session = self.session
out.contquery = self.contquery
out.project = self.project
for key, value in self._get_attributes(use_xml_values=False).items():
if key != 'name': # do NOT copy the old name
setattr(out, key, value)
if deep:
for k, win in self.windows.items():
out.windows[k] = win.copy(deep=deep)
out.windows[k].targets = set()
for target in set(win.targets):
if target.template.name == self.name:
out.windows[k].targets.add(Target(name=target.base_name, template=out,
role=target.role, slot=target.slot))
elif not internal_only:
out.windows[k].targets.add(target)
else:
out.windows.update(self.windows)
out.input_windows = self._input_windows
out.output_windows = self._output_windows
out.required_parameter_map = self.required_parameter_map
return out
def __copy__(self):
return self.copy(name=None, deep=False)
def __deepcopy__(self, memo):
return self.copy(name=None, deep=True)
@property
def fullname(self):
return '%s.%s.%s' % (self.project, self.contquery, self.name)
@property
def url(self):
'''
URL of the Template
Returns
-------
string
'''
self._verify_project()
return urllib.parse.urljoin(self.base_url, '%s/%s/%s/' %
(self.project, self.contquery, self.name))
@classmethod
def from_xml(cls, data, template_name, tag=None, contquery=None, project=None, session=None):
'''
Create template from XML definition
Parameters
----------
data : xml-string or ElementTree.Element
XML template definition
template_name: string
The name for the newly created Template object
tag: string, optional
Type of imported template
contquery : string, optional
The name of Continuous Query
project : string, optional
The name of Project
session : requests.Session, optionals
The session object
Returns
-------
:class:`Template`
'''
out = cls(template_name)
out.session = session
out.project = project
out.contquery = contquery
if isinstance(data, six.string_types):
if re.match(r'^\s*<', data):
data = data
elif os.path.isfile(data):
data = open(data, 'r').read()
else:
data = urllib.request.urlopen(data).read().decode('utf-8')
data = xml.from_xml(data)
try:
del data.attrib['name']
except:
pass
try:
out.tag = data.attrib['tag']
except:
out.tag = tag
out._set_attributes(data.attrib)
for desc in data.findall('./description'):
out.description = desc.text
for item in data.findall('./mas-modules/mas-module'):
out.mas_modules.append(MASModule.from_xml(item, session=session))
for item in data.findall('./windows/*'):
try:
wcls = get_window_class(item.tag)
except KeyError:
raise TypeError('Unknown window type: %s' % item.tag)
window = wcls.from_xml(item, session=session)
out.windows[window.base_name] = window
for item in data.findall('./edges/*'):
for target in re.split(r'\s+', item.attrib.get('target', '').strip()):
if not target or target not in out.windows:
continue
for source in re.split(r'\s+', item.attrib.get('source', '').strip()):
if not source or source not in out.windows:
continue
out.windows[source].add_target(out.windows[target], role=item.get('role'),
slot=item.get('slot'))
try:
out.input_windows = re.sub("[^\w]", " ", data.attrib['input-windows']).split()
except:
pass
try:
out.output_windows = re.sub("[^\w]", " ", data.attrib['output-windows']).split()
except:
pass
for item in data.findall('./metadata/meta'):
if 'id' in item.attrib.keys():
out.metadata[item.attrib['id']] = item.text
elif 'name' in item.attrib.keys():
out.metadata[item.attrib['name']] = item.text
for item in data.findall('./required-parameter-map/properties/property'):
field = item.text.split(',')
out.required_parameter_map[item.attrib['name']] = field
return out
from_element = from_xml
def to_element(self, template=False):
'''
Export template definition to ElementTree.Element
Returns
-------
:class:`ElementTree.Element`
'''
extra_attrs = [item for item in ['tag', 'input_windows', 'output_windows'] if getattr(self, item)]
out = xml.new_elem('template', xml.get_attrs(self, exclude=['name', 'project', 'contquery'], extra=extra_attrs))
if self.description:
xml.add_elem(out, 'description', text_content=self.description)
if self.metadata:
metadata = xml.add_elem(out, 'metadata')
for key, value in sorted(six.iteritems(self.metadata)):
xml.add_elem(metadata, 'meta', attrib=dict(id=key),
text_content=value)
if self.mas_modules:
mods = xml.add_elem(out, 'mas-modules')
for item in self.mas_modules:
xml.add_elem(mods, item.to_element())
windows = xml.add_elem(out, 'windows')
sources = {}
if self.windows:
edges = []
for name, window in sorted(six.iteritems(self.windows)):
win_out = window.to_element()
if not template:
win_out.attrib['name'] = window.base_name
xml.add_elem(windows, win_out)
for target in window.targets:
sources.setdefault(target.name, []).append(window.name)
attrib = dict(source=window.base_name, target=target.base_name)
if target.role:
attrib['role'] = target.role
if target.slot:
attrib['slot'] = target.slot
edges.append((target._index, attrib))
if edges:
elem = xml.add_elem(out, 'edges')
for i, attrib in sorted(edges):
xml.add_elem(elem, 'edge', attrib=attrib)
if self.required_parameter_map:
mappings = xml.add_elem(out, 'required-parameter-map')
xml.add_properties(mappings, self.required_parameter_map, bool_as_int=True)
# Replace "inherit" data types with the real data type
n_inherit = -1
while True:
inherit = out.findall('./windows/*/schema/fields/field[@type="inherit"]')
if len(inherit) == n_inherit:
break
n_inherit = len(inherit)
for window in out.findall('./windows/*'):
for field in window.findall('./schema/fields/field[@type="inherit"]'):
for source in sources[window.attrib['name']]:
fname = field.attrib['name']
if source not in self.windows:
raise ValueError("Could not determine data type of "
"field '%s' on window '%s'" % (fname, source))
win = self.windows[source]
if hasattr(win, 'schema') and fname in win.schema:
dtype = win.schema[fname].type
field.set('type', dtype)
# return windows, elem
return out
def to_xml(self, pretty=False, template=False):
'''
Export template definition to XML
Parameters
----------
pretty : bool, optional
Should the output embed whitespaced for readability or not, default value is False
template : bool, optional
To include template name or not, default value is False
Returns
-------
string
'''
return xml.to_xml(self.to_element(template=template), pretty=pretty)
def save_xml(self, dest, mode='w', pretty=True, **kwargs):
'''
Save the template XML to a file
Parameters
----------
dest : string or file-like
The destination of the XML content
mode : string, optional
The write mode for the output file (only used if `dest` is a string)
pretty : boolean, optional
Should the XML include whitespace for readability or not, default value is True
'''
if isinstance(dest, six.string_types):
with open(dest, mode=mode, **kwargs) as output:
output.write(self.to_xml(pretty=pretty))
else:
dest.write(self.to_xml(pretty=pretty))
def export_to(self, type='xml', pretty=False):
if type == 'xml':
return self.to_xml(pretty=pretty)
return
def import_from(self, type='xml'):
if type == 'xml':
return self.from_xml()
return
def _persist_metadata(self):
if self.metadata:
self._set_metadata(self.metadata)
def _clear_metadata(self):
self.metadata.clear()
def _set_metadata(self, data):
for key, value in six.iteritems(data):
self._put(urllib.parse.urljoin(self.base_url,
'projectMetadata/%s/%s/%s' %
(self.project, self.name, key)),
data='%s' % value)
def _del_metadata(self, *data):
for key in data:
self._delete(urllib.parse.urljoin(self.base_url,
'projectMetadata/%s/%s/%s' %
(self.project, self.name, key)))
def to_graph(self, graph=None, schema=False, detail=False):
'''
Export template definition to graphviz.Digraph
Parameters
----------
graph : graphviz.Graph, optional
The parent graph to add to
schema : bool, optional
Include window schemas or not, default value is False
detail : bool, optional
Show template detail or not, default value is False
Returns
-------
:class:`graphviz.Digraph`
'''
try:
import graphviz as gv
except ImportError:
raise ImportError('The graphviz module is required for exporting to graphs.')
if graph is None:
graph = gv.Digraph(format='svg')
graph.attr('node', shape='rect')
graph.attr('graph', rankdir='LR', center='false')
graph.attr('edge', fontname='times-italic')
label = 'Template-%s: ' % self.tag if self.tag else 'Template: '
label = label + self.name
if self.windows and detail:
tgraph = gv.Digraph(format='svg', name='cluster_%s' % self.fullname.replace('.', '_'))
tgraph.attr('node', shape='rect')
tgraph.attr('graph', fontname='helvetica')
tgraph.attr('edge', fontname='times-italic')
tgraph.attr(label=label, labeljust='l', style='rounded,bold,dashed', color='blue', fontcolor='black')
for wkey, window in sorted(self.windows.items()):
window.to_graph(graph=tgraph,
schema=schema or get_option('display.show_schema'))
for target in window.targets:
if target.base_name in self.windows and target.template and target.template.name == self.name:
graph.edge(window.fullname, self.windows[target.base_name].fullname, label=target.role or '')
graph.subgraph(tgraph)
else:
graph.node(self.fullname, label=label, labeljust='l',
style='bold,filled', color='blue', fillcolor='#f0f0f0',
fontcolor='blue', margin='.25,.17', fontname='helvetica')
return graph
def _repr_svg_(self):
try:
return scale_svg(self.to_graph()._repr_svg_())
except ImportError:
raise AttributeError('_repr_svg_')
def __str__(self):
return '%s(name=%s, project=%s)' % (type(self).__name__,
repr(self.name),
repr(self.project))
def __repr__(self):
return str(self)
def rename_window(self, window, newname):
'''
Rename a window and update targets
Parameters
----------
window : string or Window
The window to rename
newname : string
The new name of the Window object
'''
oldname = getattr(window, 'base_name', window)
self.windows[newname] = self.windows[oldname]
del self.windows[oldname]
def delete_windows(self, *windows):
'''
Delete windows and update targets
Parameters
----------
windows : one-or-more strings or Window objects
The window to delete
'''
for item in windows:
to_delete = self.windows[getattr(item, 'base_name', item)]
to_delete.template = None
del self.windows[getattr(item, 'base_name', item)]
delete_window = delete_windows
def subscribe(self, mode='streaming', pagesize=50, filter=None,
sort=None, interval=None, limit=None, horizon=None, reset=True):
'''
Subscribe to events
Parameters
----------
mode : string, optional
The mode of subscriber: 'updating' or 'streaming'
pagesize : int, optional
The maximum number of events in a page
filter : string, optional
Functional filter to subset events
sort : string, optional
Sort order for the events (updating mode only)
interval : int, optional
Interval between event sends in milliseconds
limit : int, optional
The maximum number of rows of data to keep in the internal
DataFrame object.
horizon : int or datetime.datetime or string, optional
Specifies a condition that stops the subscriber.
If an int, the subscriber stops after than many events.
If a datetime.datetime, the subscriber stops after the specified
date and time. If a string, the string is an expression
applied to the event using the :meth:`DataFrame.query`
method. If that query returns any number of rows, the
subscriber is stopped.
reset : bool, optional
If True, the internal data is reset on subsequent calls
to the :meth:`subscribe` method.
See Also
--------
:meth:`unsubscribe`
:class:`Subscriber`
'''
for k, win in self.windows.items():
win.subscribe(mode, pagesize, filter, sort, interval, limit, horizon, reset)
def unsubscribe(self):
'''
Stop event processing
See Also
--------
:meth:`subscribe`
'''
for k, win in self.windows.items():
win.unsubscribe()
#
# MutableMapping methods
#
def __getitem__(self, key):
return self.windows[key]
def __setitem__(self, key, value):
self.windows[key] = value
def __delitem__(self, key):
del self.windows[key]
def __iter__(self):
return iter(self.windows)
def __len__(self):
return len(self.windows)
def __contains__(self, value):
return value in self.windows | PypiClean |
/KaKa-0.1.1.tar.gz/KaKa-0.1.1/kaka/middlewares.py | import time
from abc import ABCMeta, abstractmethod
from .errors import EntryPatternError
from .response import BaseResponse
class MWManager(object):
def __init__(self):
self._priority_set = set()
self._middleware_cls_set = set()
self._mw_list = list()
def register(self, entry_list):
"""
中间件注册接口,可以多次注册中间件,将会根据中间件的优先级排列。
"""
for entry in entry_list:
self._check_pattern(entry)
priority, middleware_cls = entry
self._add(priority, middleware_cls())
def _check_pattern(self, entry):
"""
entry的格式是(priority, middleware_cls)
"""
def check_entry(entry):
if not isinstance(entry, (tuple, list)):
raise EntryPatternError('the type of entry must be tuple or list')
valid_length = len(entry) == 2
if not valid_length:
raise EntryPatternError('length of route entry must be 2')
else:
priority, middleware_cls = entry
return priority, middleware_cls
def check_priority(priority):
if not isinstance(priority, int):
raise EntryPatternError('the type of priority must be int.')
valid_range = 0 <= priority
if not valid_range:
raise EntryPatternError('the range of priority must >=0.')
if priority in self._priority_set:
raise EntryPatternError(f"the priority: '{priority}' is already set, please change.")
else:
self._priority_set.add(priority)
def check_mw_cls(mw_cls):
if not issubclass(mw_cls, AbstractMiddleWare):
raise EntryPatternError('the middleware cls must be subclass of AbstractMiddleWare.')
if mw_cls in self._middleware_cls_set:
raise EntryPatternError(f"the middleware: '{mw_cls}' is already register.")
else:
self._middleware_cls_set.add(mw_cls)
priority, middleware_cls = check_entry(entry)
check_priority(priority)
check_mw_cls(middleware_cls)
def _add(self, priority, mw):
is_empty = len(self._mw_list) == 0
if is_empty:
self._mw_list.append((priority, mw))
return None
# 如果比第一个中间件的优先级还小
first_mw_priority = self._mw_list[0][0]
if priority < first_mw_priority:
self._mw_list.insert(0, (priority, mw))
return None
# 如果比最后一个中间件的优先级还大
last_mw_priority = self._mw_list[-1][0]
if priority > last_mw_priority:
self._mw_list.append((priority, mw))
return None
# 如果处于中间位置
insert_index = None
for index, _entry in enumerate(self._mw_list):
_priority = _entry[0]
if priority < _priority:
insert_index = index
break
self._mw_list.insert(insert_index, (priority, mw))
def pre_process(self, request):
"""
仅修改request而不伪造
绝大部分情况下应该返回None
如果要截断请求,则返回response对象
"""
for _entry in self._mw_list:
mw = _entry[1]
interrupt_response = mw.pre_process(request)
if interrupt_response is not None: # 特殊处理中断的响应对象
if isinstance(result, BaseResponse):
return interrupt_response
else:
raise TypeError(f'the type of response object must be BaseResponse or its subclass. current type: "{type(response)}".')
else:
pass
return None
def after_process(self, request, response):
"""
仅修改response而不伪造,必须返回None
"""
for _entry in reversed(self._mw_list):
mw = _entry[1]
mw.after_process(request, response)
return None
class AbstractMiddleWare(object, metaclass=ABCMeta):
"""抽象中间件类,所有中间件类的定义都要继承此抽象类作为接口约束"""
@abstractmethod
def pre_process(self, request):
pass
@abstractmethod
def after_process(self, request, response):
pass | PypiClean |
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dojox/gauges/GlossyCircularGaugeBase.js.uncompressed.js | define("dojox/gauges/GlossyCircularGaugeBase", ["dojo/_base/declare","dojo/_base/lang","dojo/_base/connect","dojox/gfx","./AnalogGauge","./AnalogCircleIndicator","./TextIndicator","./GlossyCircularGaugeNeedle"],
function(declare, lang, connect, gfx, AnalogGauge, AnalogCircleIndicator, TextIndicator, GlossyCircularGaugeNeedle) {
/*=====
AnalogGauge = dojox.gauges.AnalogGauge;
=====*/
return declare("dojox.gauges.GlossyCircularGaugeBase", [AnalogGauge], {
// summary:
// The base class for GlossyCircularGauge and GlossySemiCircularGauge.
//_defaultIndicator : _Indicator
// the type of default indicator to create
_defaultIndicator: AnalogCircleIndicator,
// _needle: dojox.gauges.GlossyCircularGaugeNeedle
// the needle of this circular gauge
_needle: null,
// _textIndicator: dojox.gauges.TextIndicator
// the text displaying the gauge's value
_textIndicator: null,
_textIndicatorAdded: false,
// _range: Object
// the range of this gauge
_range: null,
// value: Number
// The value of the gauge.
value: 0,
// color: String
// The main color of the gauge.
color: 'black',
// needleColor: Color
// The main color of the needle.
needleColor: '#c4c4c4',
// textIndicatorFont: String
// The font of the text indicator
textIndicatorFont: "normal normal normal 20pt serif",
// textIndicatorVisible: Boolean
// Indicates if the text indicator is visible
textIndicatorVisible: true,
// textIndicatorColor: Color
// The color of the text indicator
textIndicatorColor: '#c4c4c4',
// _majorTicksOffset: Number
// Distance, at design, from gauge's center to major ticks
_majorTicksOffset: 130,
// majorTicksInterval: Number
// Interval between major ticks
majorTicksInterval: 10,
// _majorTicksLength: Number
// Major tick size, at design
_majorTicksLength: 5,
// majorTicksColor: Color
// Color of major tick marks
majorTicksColor: '#c4c4c4',
// majorTicksLabelPlacement: String
// Placement of major tick labels
majorTicksLabelPlacement: 'inside',
// _minorTicksOffset: Number
// Distance, at design, from gauge's center to minor ticks
_minorTicksOffset: 130,
// minorTicksInterval: Number
// Interval between minor ticks
minorTicksInterval: 5,
// _minorTicksLength: Number
// Minor tick size, at design
_minorTicksLength: 3,
// minorTicksColor: Color
// Color of minor tick marks
minorTicksColor: '#c4c4c4',
// noChange: Boolean
// Indicates if the gauge reacts to touch events
noChange: false,
// title: String
// The title displayed in the needle's tooltip
title: "",
// font: Object
// The font of the gauge
font: "normal normal normal 10pt serif",
// scalePrecision: Number
// The precision for the formatting of numbers in the scale (default is 0)
scalePrecision: 0,
// textIndicatorPrecision: Number
// The precision for the formatting of numbers in the text indicator (default is 0)
textIndicatorPrecision: 0,
_font: null,
constructor: function(){
this.startAngle = -135;
this.endAngle = 135;
this.min = 0;
this.max = 100;
},
startup: function(){
// summary:
// Overrides AnalogGauge.startup
this.inherited(arguments);
//just in case someone calls the startup twice.
if (this._needle) return;
var scale = Math.min((this.width / this._designWidth), (this.height / this._designHeight));
this.cx = scale * this._designCx + (this.width - scale * this._designWidth) / 2;
this.cy = scale * this._designCy + (this.height - scale * this._designHeight) / 2;
this._range = {
low: this.min ? this.min : 0,
high: this.max ? this.max : 100,
color: [255, 255, 255, 0]
};
this.addRange(this._range);
this._majorTicksOffset = this._minorTicksOffset = scale * this._majorTicksOffset;
this._majorTicksLength = scale * this._majorTicksLength;
this._minorTicksLength = scale * this._minorTicksLength;
// creates and add the major ticks
this.setMajorTicks({
fixedPrecision: true,
precision: this.scalePrecision,
font: this._font,
offset: this._majorTicksOffset,
interval: this.majorTicksInterval,
length: this._majorTicksLength,
color: this.majorTicksColor,
labelPlacement: this.majorTicksLabelPlacement
});
// creates and add the minor ticks
this.setMinorTicks({
offset: this._minorTicksOffset,
interval: this.minorTicksInterval,
length: this._minorTicksLength,
color: this.minorTicksColor
});
// creates and adds the needle
this._needle = new GlossyCircularGaugeNeedle({
hideValue: true,
title: this.title,
noChange: this.noChange,
color: this.needleColor,
value: this.value
});
this.addIndicator(this._needle);
// creates and add the text indicator
this._textIndicator = new TextIndicator({
x: scale * this._designTextIndicatorX + (this.width - scale * this._designWidth) / 2,
y: scale * this._designTextIndicatorY + (this.height - scale * this._designHeight) / 2,
fixedPrecision: true,
precision: this.textIndicatorPrecision,
color: this.textIndicatorColor,
value: this.value ? this.value : this.min,
align: "middle",
font: this._textIndicatorFont
});
if (this.textIndicatorVisible){
this.addIndicator(this._textIndicator);
this._textIndicatorAdded = true;
}
// connect needle and text
connect.connect(this._needle, "valueChanged", lang.hitch(this, function(){
this.value = this._needle.value;
this._textIndicator.update(this._needle.value);
this.onValueChanged();
}));
},
onValueChanged: function(){
// summary:
// Invoked when the value of the gauge has changed.
},
//*******************************************************************************************
//* Property getters and setters
//*******************************************************************************************
_setColorAttr: function(color){
// summary:
// Sets the main color of the gauge
// color: String
// The color
this.color = color ? color : 'black';
if (this._gaugeBackground && this._gaugeBackground.parent)
this._gaugeBackground.parent.remove(this._gaugeBackground);
if (this._foreground && this._foreground.parent)
this._foreground.parent.remove(this._foreground);
this._gaugeBackground = null;
this._foreground = null;
this.draw();
},
_setNeedleColorAttr: function(color){
// summary:
// Sets the main color of the needle
// color: String
// The color
this.needleColor = color;
if (this._needle){
this.removeIndicator(this._needle);
this._needle.color = this.needleColor;
this._needle.shape = null;
this.addIndicator(this._needle);
}
},
_setTextIndicatorColorAttr: function(color){
// summary:
// Sets the color of text indicator display the gauge's value
// color: String
// The color
this.textIndicatorColor = color;
if (this._textIndicator){
this._textIndicator.color = this.textIndicatorColor;
this.draw();
}
},
_setTextIndicatorFontAttr: function(font){
// summary:
// Sets the font of the text indicator
// font: String
// An string representing the font such as 'normal normal normal 10pt Helvetica,Arial,sans-serif'
//
this.textIndicatorFont = font;
this._textIndicatorFont = gfx.splitFontString(font);
if (this._textIndicator){
this._textIndicator.font = this._textIndicatorFont;
this.draw();
}
},
setMajorTicksOffset: function(offset){
// summary:
// Sets the distance from gauge's center to major ticks
this._majorTicksOffset = offset;
this._setMajorTicksProperty({
'offset': this._majorTicksOffset
});
return this;
},
getMajorTicksOffset: function(){
// summary:
// Return the distance from gauge's center to major ticks
return this._majorTicksOffset;
},
_setMajorTicksIntervalAttr: function(interval){
// summary:
// Sets the interval between major ticks
this.majorTicksInterval = interval;
this._setMajorTicksProperty({
'interval': this.majorTicksInterval
});
},
setMajorTicksLength: function(length){
// summary:
// Sets the size of the major ticks.
this._majorTicksLength = length;
this._setMajorTicksProperty({
'length': this._majorTicksLength
});
return this;
},
getMajorTicksLength: function(){
// summary:
// Returns the size of the major ticks.
return this._majorTicksLength;
},
_setMajorTicksColorAttr: function(color){
// summary:
// Sets the color of the major ticks.
this.majorTicksColor = color;
this._setMajorTicksProperty({
'color': this.majorTicksColor
});
},
_setMajorTicksLabelPlacementAttr: function(placement){
// summary:
// Sets the placement of labels relatively to major ticks.
// placement: String
// 'inside' or 'outside'
this.majorTicksLabelPlacement = placement;
this._setMajorTicksProperty({
'labelPlacement': this.majorTicksLabelPlacement
});
},
_setMajorTicksProperty: function(prop){
if (this.majorTicks){
lang.mixin(this.majorTicks, prop);
this.setMajorTicks(this.majorTicks);
}
},
setMinorTicksOffset: function(offset){
// summary:
// Sets the distance from gauge's center to minor ticks
this._minorTicksOffset = offset;
this._setMinorTicksProperty({
'offset': this._minorTicksOffset
});
return this;
},
getMinorTicksOffset: function(){
// summary:
// Returns the distance from gauge's center to minor ticks
return this._minorTicksOffset;
},
_setMinorTicksIntervalAttr: function(interval){
// summary:
// Sets the interval between minor ticks
this.minorTicksInterval = interval;
this._setMinorTicksProperty({
'interval': this.minorTicksInterval
});
},
setMinorTicksLength: function(length){
// summary:
// Sets the size of the minor ticks.
this._minorTicksLength = length;
this._setMinorTicksProperty({
'length': this._minorTicksLength
});
return this;
},
getMinorTicksLength: function(){
// summary:
// Return the size of the minor ticks.
return this._minorTicksLength;
},
_setMinorTicksColorAttr: function(color){
// summary:
// Sets the color of the minor ticks.
this.minorTicksColor = color;
this._setMinorTicksProperty({
'color': this.minorTicksColor
});
},
_setMinorTicksProperty: function(prop){
if (this.minorTicks){
lang.mixin(this.minorTicks, prop);
this.setMinorTicks(this.minorTicks);
}
},
_setMinAttr: function(min){
this.min = min;
if (this.majorTicks != null)
this.setMajorTicks(this.majorTicks);
if (this.minorTicks != null)
this.setMinorTicks(this.minorTicks);
this.draw();
this._updateNeedle();
},
_setMaxAttr: function(max){
this.max = max;
if (this.majorTicks != null)
this.setMajorTicks(this.majorTicks);
if (this.minorTicks != null)
this.setMinorTicks(this.minorTicks);
this.draw();
this._updateNeedle();
},
_setScalePrecisionAttr: function(value){
// summary:
// Changes precision of the numbers in the scale of the gauge
// value: Number
// The new value
this.scalePrecision = value;
this._setMajorTicksProperty({
'precision': value
});
},
_setTextIndicatorPrecisionAttr: function(value){
// summary:
// Changes precision of the numbers in the text indicator
// value: Number
// The new value
this.textIndicatorPrecision = value;
this._setMajorTicksProperty({
'precision': value
});
},
_setValueAttr: function(value){
// summary:
// Changes the value of the gauge
// value: Number
// The new value for the gauge.
value = Math.min(this.max, value);
value = Math.max(this.min, value);
this.value = value;
if (this._needle){
// update will not work if noChange is true.
var noChange = this._needle.noChange;
this._needle.noChange = false;
this._needle.update(value);
this._needle.noChange = noChange;
}
},
_setNoChangeAttr: function(value){
// summary:
// Indicates if the value of the gauge can be changed or not
// value: boolean
// true indicates that the gauge's value cannot be changed
this.noChange = value;
if (this._needle)
this._needle.noChange = this.noChange;
},
_setTextIndicatorVisibleAttr: function(value){
// summary:
// Changes the visibility of the text indicator displaying the gauge's value.
// value: boolean
// true to show the indicator, false to hide.
this.textIndicatorVisible = value;
if (this._textIndicator && this._needle){
if (this.textIndicatorVisible && !this._textIndicatorAdded){
this.addIndicator(this._textIndicator);
this._textIndicatorAdded = true;
this.moveIndicatorToFront(this._needle);
}
else
if (!this.textIndicatorVisible && this._textIndicatorAdded){
this.removeIndicator(this._textIndicator);
this._textIndicatorAdded = false;
}
}
},
_setTitleAttr: function(value){
// summary:
// Sets the title displayed by the needle's tooltip .
// value: String
// the title
this.title = value;
if (this._needle){
this._needle.title = this.title;
}
},
_setOrientationAttr: function(orientation){
// summary:
// Sets the orientation of the gauge
// orientation: String
// Either "clockwise" or "cclockwise"
this.orientation = orientation;
if (this.majorTicks != null)
this.setMajorTicks(this.majorTicks);
if (this.minorTicks != null)
this.setMinorTicks(this.minorTicks);
this.draw();
this._updateNeedle();
},
_updateNeedle: function(){
// updates the needle with no animation
this.value = Math.max(this.min, this.value);
this.value = Math.min(this.max, this.value);
if (this._needle){
// update will not work if noChange is true.
var noChange = this._needle.noChange;
this._needle.noChange = false;
this._needle.update(this.value, false);
this._needle.noChange = noChange;
} // to redraw the needle
},
_setFontAttr: function(font){
// summary:
// Sets the font of the gauge
// font: String
// An string representing the font such as 'normal normal normal 10pt Helvetica,Arial,sans-serif'
//
this.font = font;
this._font = gfx.splitFontString(font);
this._setMajorTicksProperty({
'font': this._font
});
}});
}); | PypiClean |
/gas_dynamics-0.4.2-py3-none-any.whl/gas_dynamics/fanno/fanno.py |
from gas_dynamics.fluids import fluid, air
from numpy import log
from scipy.optimize import fsolve
#==================================================
#stagnation enthalpy
#==================================================
def stagnation_enthalpy(enthalpy: float, gas=air) -> float:
"""Return the stagnation enthalpy
Notes
-----
Given the fluid state and a given enthalpy, return its stagnation
enthalpy
Parameters
----------
enthalpy : `float`
The enthalpy of the fluid\n
gas : `fluid`
A user defined fluid object\n
Returns
-------
float
Stagnation enthalpy\n
Examples
--------
"""
ht = enthalpy + gas.mass_velocity**2 / (gas.rho**2 * 2 * gas.gc)
return ht
#==================================================
#fanno temperature
#==================================================
#==================================================
#fanno mach from temperature
#==================================================
#==================================================
#fanno pressure
#==================================================
#==================================================
#fanno mach from pressure
#==================================================
#==================================================
#fanno temperature ratio
#==================================================
def fanno_temperature_ratio(mach_initial: float, mach_final: float, gas=air) -> float:
"""Return the temperature ratio for a fanno flow given two Mach numbers
Notes
-----
Given the two Mach numbers of a constant area adiabatic duct under the influence of
friction alone, return the temperature ratio of region two over region one.
Default fluid is air.
Parameters
----------
mach_initial : `flaot`
The mach number at region 1 \n
mach_final : `float`
The mach number at region 2 \n
gas : `fluid`
A user defined fluid object. Default is air \n
Returns
-------
float
the fanno Temperature ratio T2 / T1\n
Examples
--------
>>> import gas_dynamics as gd
>>> mach_initial, mach_final = 1.2, 1
>>> T2_T1 = gd.fanno_temperature_ratio(mach_final,mach_initial)
>>> T2_T1
0.9316770186335405
>>>
"""
gamma = gas.gamma
T2_T1 = ( 1+ (gamma-1)/2 * mach_initial**2)/( 1+ (gamma-1)/2 * mach_final**2)
return T2_T1
#==================================================
#fanno pressure ratio
#==================================================
def fanno_pressure_ratio(mach_initial: float, mach_final: float, gas=air) -> float:
"""Return the pressure ratio for a fanno flow given two Mach numbers
Notes
-----
Given the two Mach numbers of a constant area adiabatic duct under the influence of
friction alone, return the pressure ratio of region two over region one.
Default fluid is air.
Parameters
----------
mach_initial : `flaot`
The mach number at region 1 \n
mach_final : `float`
The mach number at region 2 \n
gas : `fluid`
A user defined fluid object. Default is air \n
Returns
-------
float
the fanno pressure ratio p2 / p1\n
Examples
--------
>>> import gas_dynamics as gd
>>> mach_initial, mach_final = 1.2, 1
>>> p2_p1 = gd.fanno_pressure_ratio(mach_initial,mach_final)
>>> p2_p1
1.243221621433604
>>>
"""
gamma = gas.gamma
p2_p1 = mach_initial/mach_final * (( 1+ (gamma-1)/2 * mach_initial**2)/( 1+ (gamma-1)/2 * mach_final**2))**.5
return p2_p1
#==================================================
#fanno density ratio
#==================================================
def fanno_density_ratio(mach_initial: float, mach_final: float, gas=air) -> float:
"""Return the density ratio for a fanno flow given two Mach numbers
Notes
-----
Given the two Mach numbers of a constant area adiabatic duct under the influence of
friction alone, return the density ratio of region two over region one.
Default fluid is air.
Parameters
----------
mach_initial : `flaot`
The mach number at region 1 \n
mach_final : `float`
The mach number at region 2 \n
gas : `fluid`
A user defined fluid object. Default is air \n
Returns
-------
float
The fanno density ratio rho2 / rho1\n
Examples
--------
>>> import gas_dynamics as gd
>>> mach_initial, mach_final = 1.2, 1
>>> rho2_rho1 = gd.fanno_density_ratio(mach_final,mach_initial)
>>> rho2_rho1
0.8633483482177806
>>>
"""
gamma = gas.gamma
rho2_rho1 = mach_initial/mach_final * ((1+(gamma-1)/2*mach_final**2)/(1+(gamma-1)/2*mach_initial**2))**.5
return rho2_rho1
#==================================================
#fanno stagnation star ratio
#==================================================
def fanno_stagnation_pressure_ratio(mach_initial: float, mach_final: float, gas=air) -> float:
"""Return the stagnation pressure ratio pt2/pt1 for a fanno flow given two mach numbers
Notes
-----
Given the two Mach numbers of a constant area adiabatic duct under the influence of
friction alone, return the stagnation pressure ratio of region two over region one.
Default fluid is air.
Parameters
----------
mach_initial : `flaot`
The mach number at region 1 \n
mach_final : `float`
The mach number at region 2 \n
gas : `fluid`
A user defined fluid object. Default is air \n
Returns
-------
float
The fanno stagnation pressure ratio pt2 / pt1\n
Examples
--------
>>> import gas_dynamics as gd
>>> mach_initial, mach_final = 1.2, 1
>>> pt2_pt1 = gd.fanno_stagnation_pressure_ratio(mach_final,mach_initial)
>>> pt2_pt1
1.0304397530864196
>>>
"""
gamma = gas.gamma
pt2_pt1 = mach_initial/mach_final * (( 1+ (gamma-1)/2 * mach_final**2)/( 1+ (gamma-1)/2 * mach_initial**2))**((gamma+1)/(2*(gamma-1)))
return pt2_pt1
#==================================================
#fanno temperature star ratio
#==================================================
def fanno_temperature_star_ratio(mach: float, gas=air) -> float:
"""Return the ratio of temperature over temperature where Mach equals one
Notes
-----
Given a Mach number of a constant area adiabatic duct under the influence of
friction alone, return the temperature ratio of region two over region one where
Mach in region one equals one. Default fluid is air.
Parameters
----------
mach : `float`
The mach number\n
gas : `fluid`
The user defined fluid object\n
Returns
-------
float
The fanno temperature ratio T / T*\n
Examples
--------
>>> import gas_dynamics as gd
>>> M = 1.2
>>> gd.fanno_temperature_star_ratio(M)
0.9316770186335405
>>>
"""
gamma = gas.gamma
T_Tstar = ((gamma+1)/2)/(1 + (gamma-1)/2 * mach**2)
return T_Tstar
#==================================================
#fanno pressure star ratio
#==================================================
def fanno_pressure_star_ratio(mach: float, gas=air) -> float:
"""Return the ratio of pressure over pressure where Mach equals one
Notes
-----
Given a Mach number of a constant area adiabatic duct under the influence of
friction alone, return the pressure ratio of region two over region one where
Mach in region one equals one. Default fluid is air.
Parameters
----------
mach : `float`
The mach number\n
gas : `fluid`
The user defined fluid object \n
Returns
-------
float
The fanno pressure ratio p / p* \n
Examples
--------
>>> import gas_dynamics as gd
>>> M = 1.2
>>> gd.fanno_pressure_star_ratio(M)
0.8043618151097336
>>>
"""
gamma = gas.gamma
p_pstar = 1/mach * (((gamma+1)/2)/(1 + (gamma-1)/2 * mach**2))**.5
return p_pstar
#==================================================
#fanno density star ratio
#==================================================
def fanno_density_star_ratio(mach: float, gas=air) -> float:
"""Return the ratio of density over density where Mach equals one
Notes
-----
Given a Mach number of a constant area adiabatic duct under the influence of
friction alone, return the density ratio of region two over region one where
Mach in region one equals one. Default fluid is air.
Parameters
----------
mach : `float`
The mach number\n
gas : `fluid`
The user defined fluid object \n
Returns
-------
float
The fanno density ratio rho / rho* \n
Examples
--------
>>> import gas_dynamics as gd
>>> M = 1.2
>>> gd.fanno_density_star_ratio(M)
0.8633483482177806
>>>
"""
gamma = gas.gamma
rho_rhostar = 1/mach * ((1 + (gamma-1)/2 * mach**2)/((gamma+1)/2))**.5
return rho_rhostar
#==================================================
#fanno velocity choked ratio
#==================================================
def fanno_velocity_star_ratio(mach: float, gas=air) -> float:
"""Return the ratio of velocity over velocity where Mach equals one
Notes
-----
Given a Mach number of a constant area adiabatic duct under the influence of
friction alone, return the velocity ratio of region two over region one where
Mach in region two equals one. Default fluid is air.
Parameters
----------
mach : `float`
The mach number\n
gas : `fluid`
The user defined fluid object\n
Returns
-------
float
The fanno velocity ratio V / V* \n
Examples
--------
>>> import gas_dynamics as gd
>>> M = 1.2
>>> v_vstar = gd.fanno_velocity_star_ratio(M)
>>> v_vstar
1.1582810137580164
>>>
"""
gamma = gas.gamma
v_vstar = mach/1 * (((gamma+1)/2)/(1 + (gamma-1)/2 * mach**2))**.5
return v_vstar
#==================================================
#fanno
#==================================================
def fanno_parameter(mach_initial: float, mach_final: float, gas=air) -> float:
"""Return the product of friction factor and length divided by diameter for two Mach numbers
Notes
-----
Given the two Mach numbers of a constant area adiabatic duct under the influence of
friction alone, return the fanno parameter that describes that system where fanno
parameter is the product of friction factor and length over diameter.
Default fluid is air.
Parameters
----------
mach_initial : `flaot`
The mach number at region 1\n
mach_final : `float`
The mach number at region 2\n
gas : `fluid`
The user defined fluid object\n
Returns
-------
float
The fanno parameter f(x2-x1)/D \n
Examples
--------
>>> import gas_dynamics as gd
>>> mach_initial, mach_final = 3, 2
>>> fanno = gd.fanno_parameter(mach_initial, mach_final)
>>> fanno
0.21716290559704166
>>>
"""
gamma = gas.gamma
fanno = (gamma+1)/(2*gamma) * log((( 1+ (gamma-1)/2 * mach_final**2)/( 1+ (gamma-1)/2 * mach_initial**2))) - 1/gamma * (1/(mach_final**2) - 1/(mach_initial**2)) - (gamma+1)/(2*gamma) * log((mach_final**2)/(mach_initial**2))
return fanno
#==================================================
#fanno parameter max
#==================================================
def fanno_parameter_max(mach: float, gas=air) -> float:
"""Return the maximum product of friction factor and length divided by diameter
Notes
-----
Given a Mach number of a constant area adiabatic duct under the influence of
friction alone, determine the maximum length to diameter ratio for a fluid to
reach a Mach number of 1. Default fluid is air.
Parameters
----------
M : `float`
The starting Mach number \n
gas : `fluid`
The user defined fluid object \n
Returns
-------
float
The maximum fanno parameter f(x*-x)/D \n
Examples
--------
>>> import gas_dynamics as gd
>>> M = 2
>>> fanno_max = gd.fanno_parameter_max(M)
>>> fanno_max
0.3049965025814798
>>>
"""
gamma = gas.gamma
fanno_ratio_max = (gamma + 1)/(2*gamma) * log(( (gamma+1)/2 * mach**2) / (1 + (gamma-1)/2 * mach**2)) + 1/gamma * (1/(mach**2)-1)
return fanno_ratio_max
#==================================================
#mach from fanno parameter
#==================================================
def mach_from_fanno(fanno: float, mach_initial: float, gas=air) -> float:
"""Return the Mach number that would result from the fanno parameter and initial mach number
Notes
-----
Given the Mach number and fanno parameter that describes that system, return the resulting
mach number. Default fluid is air.
Parameters
----------
fanno : `float`
The fanno parameter for the system \n
mach_initial : `float`
The starting Mach number \n
gas : `fluid`
The user defined fluid object \n
Returns
-------
Float
The resulting Mach number \n
Examples
--------
>>> import gas_dynamics as gd
>>> fanno, mach_initial = .3, 2.64
>>> mach_final = gd.mach_from_fanno(fanno=fanno, mach_initial=mach_initial)
>>> mach_final
1.567008305615555
>>>
"""
def mach_solve(M, mach_initial=mach_initial, fanno=fanno, gas=gas):
zero = fanno_parameter(mach_initial=mach_initial, mach_final=M, gas=gas) - fanno
return zero
if mach_initial < 1:
x0 = .5
elif mach_initial > 1:
x0 = 1.5
else:
x0=1
sol = fsolve(mach_solve, args=(mach_initial, fanno, gas), x0=x0)
return sol[0] | PypiClean |
/django_dans_notifications-1.1.15-py3-none-any.whl/django_dans_notifications/migrations/0001_initial.py |
import uuid
import django.db.models.deletion
from django.db import migrations, models
import django_dans_notifications.models.email
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="NotificationBasic",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
),
),
("datetime_created", models.DateTimeField(auto_now_add=True)),
("datetime_modified", models.DateTimeField(auto_now=True)),
("datetime_sent", models.DateTimeField(blank=True, null=True)),
("sent_successfully", models.BooleanField(default=False)),
(
"sender",
models.CharField(
help_text="This should be the sending users email.",
max_length=300,
),
),
(
"recipients",
models.CharField(
help_text="Comma separated list of email recipients.",
max_length=900,
),
),
("read", models.BooleanField(default=False)),
("message", models.CharField(max_length=600)),
],
options={
"abstract": False,
},
),
migrations.CreateModel(
name="NotificationEmailTemplate",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
),
),
("datetime_created", models.DateTimeField(auto_now_add=True)),
("datetime_modified", models.DateTimeField(auto_now=True)),
("path", models.CharField(max_length=300)),
("nickname", models.CharField(max_length=300)),
],
options={
"abstract": False,
},
managers=[
(
"objects",
django_dans_notifications.models.email.NotificationEmailTemplateManager(),
),
],
),
migrations.CreateModel(
name="NotificationPush",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
),
),
("datetime_created", models.DateTimeField(auto_now_add=True)),
("datetime_modified", models.DateTimeField(auto_now=True)),
("datetime_sent", models.DateTimeField(blank=True, null=True)),
("sent_successfully", models.BooleanField(default=False)),
(
"sender",
models.CharField(
help_text="This should be the sending users email.",
max_length=300,
),
),
(
"recipients",
models.CharField(
help_text="Comma separated list of email recipients.",
max_length=900,
),
),
("message", models.CharField(max_length=300)),
],
options={
"abstract": False,
},
),
migrations.CreateModel(
name="NotificationEmail",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
),
),
("datetime_created", models.DateTimeField(auto_now_add=True)),
("datetime_modified", models.DateTimeField(auto_now=True)),
("datetime_sent", models.DateTimeField(blank=True, null=True)),
("sent_successfully", models.BooleanField(default=False)),
(
"sender",
models.CharField(
help_text="This should be the sending users email.",
max_length=300,
),
),
(
"recipients",
models.CharField(
help_text="Comma separated list of email recipients.",
max_length=900,
),
),
("subject", models.CharField(max_length=300)),
("context", models.JSONField(blank=True, null=True)),
(
"template",
models.ForeignKey(
default=django_dans_notifications.models.email.get_default_template,
on_delete=django.db.models.deletion.DO_NOTHING,
related_name="template",
to="django_dans_notifications.notificationemailtemplate",
),
),
],
options={
"abstract": False,
},
managers=[
(
"objects",
django_dans_notifications.models.email.NotificationEmailManager(),
),
],
),
] | PypiClean |
/omnidata_tools-0.0.23-py3-none-any.whl/omnidata_tools/torch/modules/channel_attention.py | import torch
from torch import nn
class ECALayer(nn.Module):
"""Constructs a ECA module.
Args:
channel: Number of channels of the input feature map
k_size: Adaptive selection of kernel size
"""
def __init__(self, channel, k_size=3):
super(ECALayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv = nn.Conv1d(1, 1, kernel_size=k_size, padding=(k_size - 1) // 2, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
# x: input features with shape [b, c, h, w]
b, c, h, w = x.size()
# feature descriptor on the global spatial information
y = self.avg_pool(x)
# Two different branches of ECA module
y = self.conv(y.squeeze(-1).transpose(-1, -2)).transpose(-1, -2).unsqueeze(-1)
# Multi-scale information fusion
y = self.sigmoid(y)
return x * y.expand_as(x)
class ChannelAttention(nn.Module):
def __init__(self, num_features, reduction):
super(ChannelAttention, self).__init__()
self.module = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(num_features, num_features // reduction, kernel_size=1),
nn.ReLU(inplace=True),
nn.Conv2d(num_features // reduction, num_features, kernel_size=1),
nn.Sigmoid()
)
def forward(self, x):
return x * self.module(x)
class RCAB(nn.Module):
def __init__(self, num_features, reduction):
super(RCAB, self).__init__()
self.module = nn.Sequential(
nn.Conv2d(num_features, num_features, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(num_features, num_features, kernel_size=3, padding=1),
ChannelAttention(num_features, reduction)
)
def forward(self, x):
return x + self.module(x)
class RG(nn.Module):
def __init__(self, num_features, num_rcab, reduction):
super(RG, self).__init__()
self.module = [RCAB(num_features, reduction) for _ in range(num_rcab)]
self.module.append(nn.Conv2d(num_features, num_features, kernel_size=3, padding=1))
self.module = nn.Sequential(*self.module)
def forward(self, x):
return x + self.module(x)
class RCAN(nn.Module):
def __init__(self, scale, num_features, num_rg, num_rcab, reduction):
super(RCAN, self).__init__()
self.sf = nn.Conv2d(3, num_features, kernel_size=3, padding=1)
self.rgs = nn.Sequential(*[RG(num_features, num_rcab, reduction) for _ in range(num_rg)])
self.conv1 = nn.Conv2d(num_features, num_features, kernel_size=3, padding=1)
self.upscale = nn.Sequential(
nn.Conv2d(num_features, num_features * (scale ** 2), kernel_size=3, padding=1),
nn.PixelShuffle(scale)
)
self.conv2 = nn.Conv2d(num_features, 3, kernel_size=3, padding=1)
def forward(self, x):
x = self.sf(x)
residual = x
x = self.rgs(x)
x = self.conv1(x)
x += residual
x = self.upscale(x)
x = self.conv2(x)
return x
class CBAMChannelAttention(nn.Module):
def __init__(self, in_planes, ratio=16):
super(CBAMChannelAttention, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.max_pool = nn.AdaptiveMaxPool2d(1)
self.fc1 = nn.Conv2d(in_planes, in_planes // 16, 1, bias=False)
self.relu1 = nn.ReLU()
self.fc2 = nn.Conv2d(in_planes // 16, in_planes, 1, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
avg_out = self.fc2(self.relu1(self.fc1(self.avg_pool(x))))
max_out = self.fc2(self.relu1(self.fc1(self.max_pool(x))))
out = avg_out + max_out
return self.sigmoid(out)
class CBAMSpatialAttention(nn.Module):
def __init__(self, kernel_size=7):
super(CBAMSpatialAttention, self).__init__()
assert kernel_size in (3, 7), 'kernel size must be 3 or 7'
padding = 3 if kernel_size == 7 else 1
self.conv1 = nn.Conv2d(2, 1, kernel_size, padding=padding, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
avg_out = torch.mean(x, dim=1, keepdim=True)
max_out, _ = torch.max(x, dim=1, keepdim=True)
x = torch.cat([avg_out, max_out], dim=1)
x = self.conv1(x)
return self.sigmoid(x) | PypiClean |
/discord-py-interactions-5.9.2.tar.gz/discord-py-interactions-5.9.2/interactions/models/internal/__init__.py | from .annotations import (
slash_attachment_option,
slash_bool_option,
slash_channel_option,
slash_float_option,
slash_int_option,
slash_mentionable_option,
slash_role_option,
slash_str_option,
slash_user_option,
)
from .callback import CallbackObject
from .active_voice_state import ActiveVoiceState
from .auto_defer import AutoDefer # purposely out of order to make sure auto_defer comes out as the deco
from .application_commands import (
application_commands_to_dict,
auto_defer,
CallbackType,
component_callback,
ComponentCommand,
context_menu,
user_context_menu,
message_context_menu,
ContextMenu,
global_autocomplete,
GlobalAutoComplete,
InteractionCommand,
LocalisedDesc,
LocalisedName,
LocalizedDesc,
LocalizedName,
modal_callback,
ModalCommand,
OptionType,
slash_command,
slash_default_member_permission,
slash_option,
SlashCommand,
SlashCommandChoice,
SlashCommandOption,
SlashCommandParameter,
subcommand,
sync_needed,
)
from .checks import dm_only, guild_only, has_any_role, has_id, has_role, is_owner
from .command import BaseCommand, check, cooldown, max_concurrency
from .context import (
AutocompleteContext,
BaseContext,
BaseInteractionContext,
ComponentContext,
ContextMenuContext,
InteractionContext,
ModalContext,
Resolved,
SlashContext,
)
from .converters import (
BaseChannelConverter,
ChannelConverter,
ConsumeRest,
CustomEmojiConverter,
DMChannelConverter,
DMConverter,
DMGroupConverter,
Greedy,
GuildCategoryConverter,
GuildChannelConverter,
GuildConverter,
GuildNewsConverter,
GuildNewsThreadConverter,
GuildPrivateThreadConverter,
GuildPublicThreadConverter,
GuildStageVoiceConverter,
GuildTextConverter,
GuildVoiceConverter,
IDConverter,
MemberConverter,
MessageableChannelConverter,
MessageConverter,
MODEL_TO_CONVERTER,
NoArgumentConverter,
PartialEmojiConverter,
RoleConverter,
SnowflakeConverter,
ThreadChannelConverter,
UserConverter,
VoiceChannelConverter,
)
from .cooldowns import (
Buckets,
Cooldown,
CooldownSystem,
MaxConcurrency,
SlidingWindowSystem,
ExponentialBackoffSystem,
LeakyBucketSystem,
TokenBucketSystem,
)
from .listener import listen, Listener
from .protocols import Converter
from .extension import Extension
from .wait import Wait
from .tasks import BaseTrigger, DateTrigger, IntervalTrigger, OrTrigger, Task, TimeTrigger
__all__ = (
"ActiveVoiceState",
"application_commands_to_dict",
"auto_defer",
"AutocompleteContext",
"AutoDefer",
"BaseChannelConverter",
"BaseCommand",
"BaseContext",
"BaseInteractionContext",
"BaseTrigger",
"Buckets",
"CallbackObject",
"CallbackType",
"ChannelConverter",
"check",
"component_callback",
"ComponentCommand",
"ComponentContext",
"context_menu",
"user_context_menu",
"message_context_menu",
"ConsumeRest",
"ContextMenu",
"ContextMenuContext",
"Converter",
"cooldown",
"Cooldown",
"CooldownSystem",
"SlidingWindowSystem",
"ExponentialBackoffSystem",
"LeakyBucketSystem",
"TokenBucketSystem",
"CustomEmojiConverter",
"DateTrigger",
"dm_only",
"DMChannelConverter",
"DMConverter",
"DMGroupConverter",
"Extension",
"global_autocomplete",
"GlobalAutoComplete",
"Greedy",
"guild_only",
"GuildCategoryConverter",
"GuildChannelConverter",
"GuildConverter",
"GuildNewsConverter",
"GuildNewsThreadConverter",
"GuildPrivateThreadConverter",
"GuildPublicThreadConverter",
"GuildStageVoiceConverter",
"GuildTextConverter",
"GuildVoiceConverter",
"has_any_role",
"has_id",
"has_role",
"IDConverter",
"InteractionCommand",
"InteractionContext",
"IntervalTrigger",
"is_owner",
"listen",
"Listener",
"LocalisedDesc",
"LocalisedName",
"LocalizedDesc",
"LocalizedName",
"max_concurrency",
"MaxConcurrency",
"MemberConverter",
"MessageableChannelConverter",
"MessageConverter",
"modal_callback",
"ModalCommand",
"ModalContext",
"MODEL_TO_CONVERTER",
"NoArgumentConverter",
"OptionType",
"OrTrigger",
"PartialEmojiConverter",
"Resolved",
"RoleConverter",
"slash_attachment_option",
"slash_bool_option",
"slash_channel_option",
"slash_command",
"slash_default_member_permission",
"slash_float_option",
"slash_int_option",
"slash_mentionable_option",
"slash_option",
"slash_role_option",
"slash_str_option",
"slash_user_option",
"SlashCommand",
"SlashCommandChoice",
"SlashCommandOption",
"SlashCommandParameter",
"SlashContext",
"SnowflakeConverter",
"subcommand",
"sync_needed",
"Task",
"ThreadChannelConverter",
"TimeTrigger",
"UserConverter",
"VoiceChannelConverter",
"Wait",
) | PypiClean |
/zope.catalog-5.0.tar.gz/zope.catalog-5.0/src/zope/catalog/catalog.py | """Catalog
"""
import BTrees
import zope.index.interfaces
from zope.annotation.interfaces import IAttributeAnnotatable
from zope.container.btree import BTreeContainer
from zope.interface import implementer
from zope.intid.interfaces import IIntIdAddedEvent
from zope.intid.interfaces import IIntIdRemovedEvent
from zope.intid.interfaces import IIntIds
from zope.lifecycleevent import IObjectModifiedEvent
from zope.lifecycleevent.interfaces import IObjectAddedEvent
from zope.location import location
from zope.location.interfaces import ILocationInfo
from zope import component
from zope.catalog.interfaces import ICatalog
from zope.catalog.interfaces import ICatalogIndex
from zope.catalog.interfaces import INoAutoIndex
from zope.catalog.interfaces import INoAutoReindex
class ResultSet:
"""Lazily accessed set of objects."""
def __init__(self, uids, uidutil):
self.uids = uids
self.uidutil = uidutil
def __len__(self):
return len(self.uids)
def __iter__(self):
for uid in self.uids:
obj = self.uidutil.getObject(uid)
yield obj
@implementer(ICatalog,
IAttributeAnnotatable,
zope.index.interfaces.IIndexSearch,
)
class Catalog(BTreeContainer):
family = BTrees.family32
def __init__(self, family=None):
super().__init__()
if family is not None:
self.family = family
def clear(self):
for index in self.values():
index.clear()
def index_doc(self, docid, texts):
"""Register the data in indexes of this catalog."""
for index in self.values():
index.index_doc(docid, texts)
def unindex_doc(self, docid):
"""Unregister the data from indexes of this catalog."""
for index in self.values():
index.unindex_doc(docid)
def _visitSublocations(self):
"""Restricts the access to the objects that live within
the nearest site if the catalog itself is locatable.
"""
uidutil = None
locatable = ILocationInfo(self, None)
if locatable is not None:
site = locatable.getNearestSite()
sm = site.getSiteManager()
uidutil = sm.queryUtility(IIntIds)
if uidutil not in [c.component for c in sm.registeredUtilities()]:
# we do not have a local inits utility
uidutil = component.getUtility(IIntIds, context=self)
for uid in uidutil:
obj = uidutil.getObject(uid)
if location.inside(obj, site):
yield uid, obj
return
if uidutil is None:
uidutil = component.getUtility(IIntIds)
for uid in uidutil:
yield uid, uidutil.getObject(uid)
def updateIndex(self, index):
for uid, obj in self._visitSublocations():
index.index_doc(uid, obj)
def updateIndexes(self):
for uid, obj in self._visitSublocations():
for index in self.values():
index.index_doc(uid, obj)
def apply(self, query):
results = []
for index_name, index_query in query.items():
index = self[index_name]
r = index.apply(index_query)
if r is None:
continue
if not r:
# empty results
return r
results.append((len(r), r))
if not results:
# no applicable indexes, so catalog was not applicable
return None
results.sort(key=lambda x: x[0]) # order from smallest to largest
_, result = results.pop(0)
for _, r in results:
_, result = self.family.IF.weightedIntersection(result, r)
return result
def searchResults(self, **searchterms):
sort_index = searchterms.pop('_sort_index', None)
limit = searchterms.pop('_limit', None)
reverse = searchterms.pop('_reverse', False)
results = self.apply(searchterms)
if results is not None:
if sort_index is not None:
index = self[sort_index]
if not zope.index.interfaces.IIndexSort.providedBy(index):
raise ValueError(
'Index %s does not support sorting.' %
sort_index)
results = list(
index.sort(
results,
limit=limit,
reverse=reverse))
else:
if reverse or limit:
results = list(results)
if reverse:
results.reverse()
if limit:
del results[limit:]
uidutil = component.getUtility(IIntIds)
results = ResultSet(results, uidutil)
return results
@component.adapter(ICatalogIndex, IObjectAddedEvent)
def indexAdded(index, event):
"""When an index is added to a catalog, we have to index existing objects
When an index is added, we tell it's parent to index it:
>>> class FauxCatalog:
... updated = None
... def updateIndex(self, index):
... self.updated = index
>>> class FauxIndex:
... pass
>>> index = FauxIndex()
>>> index.__parent__ = FauxCatalog()
>>> from zope.catalog.catalog import indexAdded
>>> indexAdded(index, None)
>>> index.__parent__.updated is index
True
"""
index.__parent__.updateIndex(index)
@component.adapter(IIntIdAddedEvent)
def indexDocSubscriber(event):
"""A subscriber to IntIdAddedEvent"""
ob = event.object
if INoAutoIndex.providedBy(ob):
return
for cat in component.getAllUtilitiesRegisteredFor(ICatalog, context=ob):
id = component.getUtility(IIntIds, context=cat).getId(ob)
cat.index_doc(id, ob)
@component.adapter(IObjectModifiedEvent)
def reindexDocSubscriber(event):
"""A subscriber to ObjectModifiedEvent"""
ob = event.object
if INoAutoReindex.providedBy(ob):
return
for cat in component.getAllUtilitiesRegisteredFor(ICatalog, context=ob):
id = component.getUtility(IIntIds, context=cat).queryId(ob)
if id is not None:
cat.index_doc(id, ob)
@component.adapter(IIntIdRemovedEvent)
def unindexDocSubscriber(event):
"""A subscriber to IntIdRemovedEvent"""
ob = event.object
for cat in component.getAllUtilitiesRegisteredFor(ICatalog, context=ob):
id = component.getUtility(IIntIds, context=cat).queryId(ob)
if id is not None:
cat.unindex_doc(id) | PypiClean |
/drypatrick-2021.7.5.tar.gz/drypatrick-2021.7.5/homeassistant/components/satel_integra/__init__.py | import collections
import logging
from satel_integra.satel_integra import AsyncSatel
import voluptuous as vol
from homeassistant.const import CONF_HOST, CONF_PORT, EVENT_HOMEASSISTANT_STOP
from homeassistant.core import callback
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.discovery import async_load_platform
from homeassistant.helpers.dispatcher import async_dispatcher_send
DEFAULT_ALARM_NAME = "satel_integra"
DEFAULT_PORT = 7094
DEFAULT_CONF_ARM_HOME_MODE = 1
DEFAULT_DEVICE_PARTITION = 1
DEFAULT_ZONE_TYPE = "motion"
_LOGGER = logging.getLogger(__name__)
DOMAIN = "satel_integra"
DATA_SATEL = "satel_integra"
CONF_DEVICE_CODE = "code"
CONF_DEVICE_PARTITIONS = "partitions"
CONF_ARM_HOME_MODE = "arm_home_mode"
CONF_ZONE_NAME = "name"
CONF_ZONE_TYPE = "type"
CONF_ZONES = "zones"
CONF_OUTPUTS = "outputs"
CONF_SWITCHABLE_OUTPUTS = "switchable_outputs"
ZONES = "zones"
SIGNAL_PANEL_MESSAGE = "satel_integra.panel_message"
SIGNAL_PANEL_ARM_AWAY = "satel_integra.panel_arm_away"
SIGNAL_PANEL_ARM_HOME = "satel_integra.panel_arm_home"
SIGNAL_PANEL_DISARM = "satel_integra.panel_disarm"
SIGNAL_ZONES_UPDATED = "satel_integra.zones_updated"
SIGNAL_OUTPUTS_UPDATED = "satel_integra.outputs_updated"
ZONE_SCHEMA = vol.Schema(
{
vol.Required(CONF_ZONE_NAME): cv.string,
vol.Optional(CONF_ZONE_TYPE, default=DEFAULT_ZONE_TYPE): cv.string,
}
)
EDITABLE_OUTPUT_SCHEMA = vol.Schema({vol.Required(CONF_ZONE_NAME): cv.string})
PARTITION_SCHEMA = vol.Schema(
{
vol.Required(CONF_ZONE_NAME): cv.string,
vol.Optional(CONF_ARM_HOME_MODE, default=DEFAULT_CONF_ARM_HOME_MODE): vol.In(
[1, 2, 3]
),
}
)
def is_alarm_code_necessary(value):
"""Check if alarm code must be configured."""
if value.get(CONF_SWITCHABLE_OUTPUTS) and CONF_DEVICE_CODE not in value:
raise vol.Invalid("You need to specify alarm code to use switchable_outputs")
return value
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.All(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_DEVICE_CODE): cv.string,
vol.Optional(CONF_DEVICE_PARTITIONS, default={}): {
vol.Coerce(int): PARTITION_SCHEMA
},
vol.Optional(CONF_ZONES, default={}): {vol.Coerce(int): ZONE_SCHEMA},
vol.Optional(CONF_OUTPUTS, default={}): {vol.Coerce(int): ZONE_SCHEMA},
vol.Optional(CONF_SWITCHABLE_OUTPUTS, default={}): {
vol.Coerce(int): EDITABLE_OUTPUT_SCHEMA
},
},
is_alarm_code_necessary,
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Set up the Satel Integra component."""
conf = config.get(DOMAIN)
zones = conf.get(CONF_ZONES)
outputs = conf.get(CONF_OUTPUTS)
switchable_outputs = conf.get(CONF_SWITCHABLE_OUTPUTS)
host = conf.get(CONF_HOST)
port = conf.get(CONF_PORT)
partitions = conf.get(CONF_DEVICE_PARTITIONS)
monitored_outputs = collections.OrderedDict(
list(outputs.items()) + list(switchable_outputs.items())
)
controller = AsyncSatel(host, port, hass.loop, zones, monitored_outputs, partitions)
hass.data[DATA_SATEL] = controller
result = await controller.connect()
if not result:
return False
@callback
def _close(*_):
controller.close()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _close)
_LOGGER.debug("Arm home config: %s, mode: %s ", conf, conf.get(CONF_ARM_HOME_MODE))
hass.async_create_task(
async_load_platform(hass, "alarm_control_panel", DOMAIN, conf, config)
)
hass.async_create_task(
async_load_platform(
hass,
"binary_sensor",
DOMAIN,
{CONF_ZONES: zones, CONF_OUTPUTS: outputs},
config,
)
)
hass.async_create_task(
async_load_platform(
hass,
"switch",
DOMAIN,
{
CONF_SWITCHABLE_OUTPUTS: switchable_outputs,
CONF_DEVICE_CODE: conf.get(CONF_DEVICE_CODE),
},
config,
)
)
@callback
def alarm_status_update_callback():
"""Send status update received from alarm to Home Assistant."""
_LOGGER.debug("Sending request to update panel state")
async_dispatcher_send(hass, SIGNAL_PANEL_MESSAGE)
@callback
def zones_update_callback(status):
"""Update zone objects as per notification from the alarm."""
_LOGGER.debug("Zones callback, status: %s", status)
async_dispatcher_send(hass, SIGNAL_ZONES_UPDATED, status[ZONES])
@callback
def outputs_update_callback(status):
"""Update zone objects as per notification from the alarm."""
_LOGGER.debug("Outputs updated callback , status: %s", status)
async_dispatcher_send(hass, SIGNAL_OUTPUTS_UPDATED, status["outputs"])
# Create a task instead of adding a tracking job, since this task will
# run until the connection to satel_integra is closed.
hass.loop.create_task(controller.keep_alive())
hass.loop.create_task(
controller.monitor_status(
alarm_status_update_callback, zones_update_callback, outputs_update_callback
)
)
return True | PypiClean |
/goatherd-0.1.0.tar.gz/goatherd-0.1.0/README.md | # Goat Herd
[![PyPI](https://img.shields.io/pypi/v/goatherd.svg)](https://pypi.python.org/pypi/goatherd/#history)
Partially-observed visual reinforcement learning domain.
## Play Yourself
You can play the game yourself with an interactive window and keyboard input.
The mapping from keys to actions, health level, and inventory state are printed
to the terminal.
```sh
# Install with GUI
pip3 install 'goatherd[gui]'
# Start the game
goatherd
# Alternative way to start the game
python3 -m goatherd.run_gui
```
![Goat Herd Video](https://github.com/danijar/goatherd/raw/main/media/video.gif)
The following optional command line flags are available:
| Flag | Default | Description |
| :--- | :-----: | :---------- |
| `--window <width> <height>` | 800 800 | Window size in pixels, used as width and height. |
| `--fps <integer>` | 5 | How many times to update the environment per second. |
| `--record <filename>.mp4` | None | Record a video of the trajectory. |
| `--num_cows` | 3 | The number of cows in the environment. |
| `--view <width> <height>` | 7 7 | The layout size in cells; determines view distance. |
| `--length <integer>` | None | Time limit for the episode. |
| `--seed <integer>` | None | Determines world generation and creatures. |
## Training Agents
Installation: `pip3 install -U goatherd`
The environment follows the [OpenAI Gym][gym] interface:
```py
import goatherd
env = goatherd.Env(seed=0)
obs = env.reset()
assert obs.shape == (64, 64, 3)
done = False
while not done:
action = env.action_space.sample()
obs, reward, done, info = env.step(action)
```
[gym]: https://github.com/openai/gym
## Environment Details
### Reward
A reward of +1 is given every time the player milks one of the cows.
### Termination
Episodes terminate after 1000 steps.
### Observation Space
Each observation is an RGB image that shows a local view of the world around
the player, as well as the inventory state of the agent.
### Action Space
The action space is categorical. Each action is an integer index representing
one of the possible actions:
| Integer | Name | Description |
| :-----: | :--- | :---------- |
| 0 | `noop` | Do nothing. |
| 1 | `move_left` | Walk to the left. |
| 2 | `move_right` | Walk to the right. |
| 3 | `move_up` | Walk upwards. |
| 4 | `move_down` | Walk downwards. |
| 5 | `do` | Pick up a placed fence or milk a cow. |
| 6 | `place_fence` | Place a fence inventory. |
## Questions
Please [open an issue][issues] on Github.
[issues]: https://github.com/danijar/goatherd/issues
| PypiClean |
/discord.py_fork-2.0.0a0-py3-none-any.whl/discord/http.py | from __future__ import annotations
import asyncio
import json
import logging
import sys
from typing import (
Any,
ClassVar,
Coroutine,
Dict,
Iterable,
List,
Optional,
Sequence,
TYPE_CHECKING,
Tuple,
Type,
TypeVar,
Union,
)
from urllib.parse import quote as _uriquote
import weakref
import aiohttp
from .errors import HTTPException, Forbidden, NotFound, LoginFailure, DiscordServerError, GatewayNotFound, InvalidArgument
from .gateway import DiscordClientWebSocketResponse
from . import __version__, utils
from .utils import MISSING
_log = logging.getLogger(__name__)
if TYPE_CHECKING:
from .file import File
from .enums import (
AuditLogAction,
InteractionResponseType,
)
from .types import (
appinfo,
audit_log,
channel,
components,
emoji,
embed,
guild,
integration,
interactions,
invite,
member,
message,
template,
role,
user,
webhook,
channel,
widget,
threads,
voice,
sticker,
)
from .types.snowflake import Snowflake, SnowflakeList
from types import TracebackType
T = TypeVar('T')
BE = TypeVar('BE', bound=BaseException)
MU = TypeVar('MU', bound='MaybeUnlock')
Response = Coroutine[Any, Any, T]
async def json_or_text(response: aiohttp.ClientResponse) -> Union[Dict[str, Any], str]:
text = await response.text(encoding='utf-8')
try:
if response.headers['content-type'] == 'application/json':
return utils._from_json(text)
except KeyError:
# Thanks Cloudflare
pass
return text
class Route:
BASE: ClassVar[str] = 'https://discord.com/api/v8'
def __init__(self, method: str, path: str, **parameters: Any) -> None:
self.path: str = path
self.method: str = method
url = self.BASE + self.path
if parameters:
url = url.format_map({k: _uriquote(v) if isinstance(v, str) else v for k, v in parameters.items()})
self.url: str = url
# major parameters:
self.channel_id: Optional[Snowflake] = parameters.get('channel_id')
self.guild_id: Optional[Snowflake] = parameters.get('guild_id')
self.webhook_id: Optional[Snowflake] = parameters.get('webhook_id')
self.webhook_token: Optional[str] = parameters.get('webhook_token')
@property
def bucket(self) -> str:
# the bucket is just method + path w/ major parameters
return f'{self.channel_id}:{self.guild_id}:{self.path}'
class MaybeUnlock:
def __init__(self, lock: asyncio.Lock) -> None:
self.lock: asyncio.Lock = lock
self._unlock: bool = True
def __enter__(self: MU) -> MU:
return self
def defer(self) -> None:
self._unlock = False
def __exit__(
self,
exc_type: Optional[Type[BE]],
exc: Optional[BE],
traceback: Optional[TracebackType],
) -> None:
if self._unlock:
self.lock.release()
# For some reason, the Discord voice websocket expects this header to be
# completely lowercase while aiohttp respects spec and does it as case-insensitive
aiohttp.hdrs.WEBSOCKET = 'websocket' # type: ignore
class HTTPClient:
"""Represents an HTTP client sending HTTP requests to the Discord API."""
def __init__(
self,
connector: Optional[aiohttp.BaseConnector] = None,
*,
proxy: Optional[str] = None,
proxy_auth: Optional[aiohttp.BasicAuth] = None,
loop: Optional[asyncio.AbstractEventLoop] = None,
unsync_clock: bool = True,
) -> None:
self.loop: asyncio.AbstractEventLoop = asyncio.get_event_loop() if loop is None else loop
self.connector = connector
self.__session: aiohttp.ClientSession = MISSING # filled in static_login
self._locks: weakref.WeakValueDictionary = weakref.WeakValueDictionary()
self._global_over: asyncio.Event = asyncio.Event()
self._global_over.set()
self.token: Optional[str] = None
self.bot_token: bool = False
self.proxy: Optional[str] = proxy
self.proxy_auth: Optional[aiohttp.BasicAuth] = proxy_auth
self.use_clock: bool = not unsync_clock
user_agent = 'DiscordBot (https://github.com/veni-vidi-code/discord.py {0}) Python/{1[0]}.{1[1]} aiohttp/{2}'
self.user_agent: str = user_agent.format(__version__, sys.version_info, aiohttp.__version__)
def recreate(self) -> None:
if self.__session.closed:
self.__session = aiohttp.ClientSession(
connector=self.connector, ws_response_class=DiscordClientWebSocketResponse
)
async def ws_connect(self, url: str, *, compress: int = 0) -> Any:
kwargs = {
'proxy_auth': self.proxy_auth,
'proxy': self.proxy,
'max_msg_size': 0,
'timeout': 30.0,
'autoclose': False,
'headers': {
'User-Agent': self.user_agent,
},
'compress': compress,
}
return await self.__session.ws_connect(url, **kwargs)
async def request(
self,
route: Route,
*,
files: Optional[Sequence[File]] = None,
form: Optional[Iterable[Dict[str, Any]]] = None,
**kwargs: Any,
) -> Any:
bucket = route.bucket
method = route.method
url = route.url
lock = self._locks.get(bucket)
if lock is None:
lock = asyncio.Lock()
if bucket is not None:
self._locks[bucket] = lock
# header creation
headers: Dict[str, str] = {
'User-Agent': self.user_agent,
}
if self.token is not None:
headers['Authorization'] = 'Bot ' + self.token
# some checking if it's a JSON request
if 'json' in kwargs:
headers['Content-Type'] = 'application/json'
kwargs['data'] = utils._to_json(kwargs.pop('json'))
try:
reason = kwargs.pop('reason')
except KeyError:
pass
else:
if reason:
headers['X-Audit-Log-Reason'] = _uriquote(reason, safe='/ ')
kwargs['headers'] = headers
# Proxy support
if self.proxy is not None:
kwargs['proxy'] = self.proxy
if self.proxy_auth is not None:
kwargs['proxy_auth'] = self.proxy_auth
if not self._global_over.is_set():
# wait until the global lock is complete
await self._global_over.wait()
response: Optional[aiohttp.ClientResponse] = None
data: Optional[Union[Dict[str, Any], str]] = None
await lock.acquire()
with MaybeUnlock(lock) as maybe_lock:
for tries in range(5):
if files:
for f in files:
f.reset(seek=tries)
if form:
form_data = aiohttp.FormData()
for params in form:
form_data.add_field(**params)
kwargs['data'] = form_data
try:
async with self.__session.request(method, url, **kwargs) as response:
_log.debug('%s %s with %s has returned %s', method, url, kwargs.get('data'), response.status)
# even errors have text involved in them so this is safe to call
data = await json_or_text(response)
# check if we have rate limit header information
remaining = response.headers.get('X-Ratelimit-Remaining')
if remaining == '0' and response.status != 429:
# we've depleted our current bucket
delta = utils._parse_ratelimit_header(response, use_clock=self.use_clock)
_log.debug('A rate limit bucket has been exhausted (bucket: %s, retry: %s).', bucket, delta)
maybe_lock.defer()
self.loop.call_later(delta, lock.release)
# the request was successful so just return the text/json
if 300 > response.status >= 200:
_log.debug('%s %s has received %s', method, url, data)
return data
# we are being rate limited
if response.status == 429:
if not response.headers.get('Via') or isinstance(data, str):
# Banned by Cloudflare more than likely.
raise HTTPException(response, data)
fmt = 'We are being rate limited. Retrying in %.2f seconds. Handled under the bucket "%s"'
# sleep a bit
retry_after: float = data['retry_after']
_log.warning(fmt, retry_after, bucket)
# check if it's a global rate limit
is_global = data.get('global', False)
if is_global:
_log.warning('Global rate limit has been hit. Retrying in %.2f seconds.', retry_after)
self._global_over.clear()
await asyncio.sleep(retry_after)
_log.debug('Done sleeping for the rate limit. Retrying...')
# release the global lock now that the
# global rate limit has passed
if is_global:
self._global_over.set()
_log.debug('Global rate limit is now over.')
continue
# we've received a 500, 502, or 504, unconditional retry
if response.status in {500, 502, 504}:
await asyncio.sleep(1 + tries * 2)
continue
# the usual error cases
if response.status == 403:
raise Forbidden(response, data)
elif response.status == 404:
raise NotFound(response, data)
elif response.status >= 500:
raise DiscordServerError(response, data)
else:
raise HTTPException(response, data)
# This is handling exceptions from the request
except OSError as e:
# Connection reset by peer
if tries < 4 and e.errno in (54, 10054):
await asyncio.sleep(1 + tries * 2)
continue
raise
if response is not None:
# We've run out of retries, raise.
if response.status >= 500:
raise DiscordServerError(response, data)
raise HTTPException(response, data)
raise RuntimeError('Unreachable code in HTTP handling')
async def get_from_cdn(self, url: str) -> bytes:
async with self.__session.get(url) as resp:
if resp.status == 200:
return await resp.read()
elif resp.status == 404:
raise NotFound(resp, 'asset not found')
elif resp.status == 403:
raise Forbidden(resp, 'cannot retrieve asset')
else:
raise HTTPException(resp, 'failed to get asset')
# state management
async def close(self) -> None:
if self.__session:
await self.__session.close()
# login management
async def static_login(self, token: str) -> user.User:
# Necessary to get aiohttp to stop complaining about session creation
self.__session = aiohttp.ClientSession(connector=self.connector, ws_response_class=DiscordClientWebSocketResponse)
old_token = self.token
self.token = token
try:
data = await self.request(Route('GET', '/users/@me'))
except HTTPException as exc:
self.token = old_token
if exc.status == 401:
raise LoginFailure('Improper token has been passed.') from exc
raise
return data
def logout(self) -> Response[None]:
return self.request(Route('POST', '/auth/logout'))
# Group functionality
def start_group(self, user_id: Snowflake, recipients: List[int]) -> Response[channel.GroupDMChannel]:
payload = {
'recipients': recipients,
}
return self.request(Route('POST', '/users/{user_id}/channels', user_id=user_id), json=payload)
def leave_group(self, channel_id) -> Response[None]:
return self.request(Route('DELETE', '/channels/{channel_id}', channel_id=channel_id))
# Message management
def start_private_message(self, user_id: Snowflake) -> Response[channel.DMChannel]:
payload = {
'recipient_id': user_id,
}
return self.request(Route('POST', '/users/@me/channels'), json=payload)
def send_message(
self,
channel_id: Snowflake,
content: Optional[str],
*,
tts: bool = False,
embed: Optional[embed.Embed] = None,
embeds: Optional[List[embed.Embed]] = None,
nonce: Optional[str] = None,
allowed_mentions: Optional[message.AllowedMentions] = None,
message_reference: Optional[message.MessageReference] = None,
stickers: Optional[List[sticker.StickerItem]] = None,
components: Optional[List[components.Component]] = None,
) -> Response[message.Message]:
r = Route('POST', '/channels/{channel_id}/messages', channel_id=channel_id)
payload = {}
if content:
payload['content'] = content
if tts:
payload['tts'] = True
if embed:
payload['embeds'] = [embed]
if embeds:
payload['embeds'] = embeds
if nonce:
payload['nonce'] = nonce
if allowed_mentions:
payload['allowed_mentions'] = allowed_mentions
if message_reference:
payload['message_reference'] = message_reference
if components:
payload['components'] = components
if stickers:
payload['sticker_ids'] = stickers
return self.request(r, json=payload)
def send_typing(self, channel_id: Snowflake) -> Response[None]:
return self.request(Route('POST', '/channels/{channel_id}/typing', channel_id=channel_id))
def send_multipart_helper(
self,
route: Route,
*,
files: Sequence[File],
content: Optional[str] = None,
tts: bool = False,
embed: Optional[embed.Embed] = None,
embeds: Optional[Iterable[Optional[embed.Embed]]] = None,
nonce: Optional[str] = None,
allowed_mentions: Optional[message.AllowedMentions] = None,
message_reference: Optional[message.MessageReference] = None,
stickers: Optional[List[sticker.StickerItem]] = None,
components: Optional[List[components.Component]] = None,
) -> Response[message.Message]:
form = []
payload: Dict[str, Any] = {'tts': tts}
if content:
payload['content'] = content
if embed:
payload['embeds'] = [embed]
if embeds:
payload['embeds'] = embeds
if nonce:
payload['nonce'] = nonce
if allowed_mentions:
payload['allowed_mentions'] = allowed_mentions
if message_reference:
payload['message_reference'] = message_reference
if components:
payload['components'] = components
if stickers:
payload['sticker_ids'] = stickers
form.append({'name': 'payload_json', 'value': utils._to_json(payload)})
if len(files) == 1:
file = files[0]
form.append(
{
'name': 'file',
'value': file.fp,
'filename': file.filename,
'content_type': 'application/octet-stream',
}
)
else:
for index, file in enumerate(files):
form.append(
{
'name': f'file{index}',
'value': file.fp,
'filename': file.filename,
'content_type': 'application/octet-stream',
}
)
return self.request(route, form=form, files=files)
def send_files(
self,
channel_id: Snowflake,
*,
files: Sequence[File],
content: Optional[str] = None,
tts: bool = False,
embed: Optional[embed.Embed] = None,
embeds: Optional[List[embed.Embed]] = None,
nonce: Optional[str] = None,
allowed_mentions: Optional[message.AllowedMentions] = None,
message_reference: Optional[message.MessageReference] = None,
stickers: Optional[List[sticker.StickerItem]] = None,
components: Optional[List[components.Component]] = None,
) -> Response[message.Message]:
r = Route('POST', '/channels/{channel_id}/messages', channel_id=channel_id)
return self.send_multipart_helper(
r,
files=files,
content=content,
tts=tts,
embed=embed,
embeds=embeds,
nonce=nonce,
allowed_mentions=allowed_mentions,
message_reference=message_reference,
stickers=stickers,
components=components,
)
def delete_message(
self, channel_id: Snowflake, message_id: Snowflake, *, reason: Optional[str] = None
) -> Response[None]:
r = Route('DELETE', '/channels/{channel_id}/messages/{message_id}', channel_id=channel_id, message_id=message_id)
return self.request(r, reason=reason)
def delete_messages(
self, channel_id: Snowflake, message_ids: SnowflakeList, *, reason: Optional[str] = None
) -> Response[None]:
r = Route('POST', '/channels/{channel_id}/messages/bulk-delete', channel_id=channel_id)
payload = {
'messages': message_ids,
}
return self.request(r, json=payload, reason=reason)
def edit_message(self, channel_id: Snowflake, message_id: Snowflake, **fields: Any) -> Response[message.Message]:
r = Route('PATCH', '/channels/{channel_id}/messages/{message_id}', channel_id=channel_id, message_id=message_id)
return self.request(r, json=fields)
def add_reaction(self, channel_id: Snowflake, message_id: Snowflake, emoji: str) -> Response[None]:
r = Route(
'PUT',
'/channels/{channel_id}/messages/{message_id}/reactions/{emoji}/@me',
channel_id=channel_id,
message_id=message_id,
emoji=emoji,
)
return self.request(r)
def remove_reaction(
self, channel_id: Snowflake, message_id: Snowflake, emoji: str, member_id: Snowflake
) -> Response[None]:
r = Route(
'DELETE',
'/channels/{channel_id}/messages/{message_id}/reactions/{emoji}/{member_id}',
channel_id=channel_id,
message_id=message_id,
member_id=member_id,
emoji=emoji,
)
return self.request(r)
def remove_own_reaction(self, channel_id: Snowflake, message_id: Snowflake, emoji: str) -> Response[None]:
r = Route(
'DELETE',
'/channels/{channel_id}/messages/{message_id}/reactions/{emoji}/@me',
channel_id=channel_id,
message_id=message_id,
emoji=emoji,
)
return self.request(r)
def get_reaction_users(
self,
channel_id: Snowflake,
message_id: Snowflake,
emoji: str,
limit: int,
after: Optional[Snowflake] = None,
) -> Response[List[user.User]]:
r = Route(
'GET',
'/channels/{channel_id}/messages/{message_id}/reactions/{emoji}',
channel_id=channel_id,
message_id=message_id,
emoji=emoji,
)
params: Dict[str, Any] = {
'limit': limit,
}
if after:
params['after'] = after
return self.request(r, params=params)
def clear_reactions(self, channel_id: Snowflake, message_id: Snowflake) -> Response[None]:
r = Route(
'DELETE',
'/channels/{channel_id}/messages/{message_id}/reactions',
channel_id=channel_id,
message_id=message_id,
)
return self.request(r)
def clear_single_reaction(self, channel_id: Snowflake, message_id: Snowflake, emoji: str) -> Response[None]:
r = Route(
'DELETE',
'/channels/{channel_id}/messages/{message_id}/reactions/{emoji}',
channel_id=channel_id,
message_id=message_id,
emoji=emoji,
)
return self.request(r)
def get_message(self, channel_id: Snowflake, message_id: Snowflake) -> Response[message.Message]:
r = Route('GET', '/channels/{channel_id}/messages/{message_id}', channel_id=channel_id, message_id=message_id)
return self.request(r)
def get_channel(self, channel_id: Snowflake) -> Response[channel.Channel]:
r = Route('GET', '/channels/{channel_id}', channel_id=channel_id)
return self.request(r)
def logs_from(
self,
channel_id: Snowflake,
limit: int,
before: Optional[Snowflake] = None,
after: Optional[Snowflake] = None,
around: Optional[Snowflake] = None,
) -> Response[List[message.Message]]:
params: Dict[str, Any] = {
'limit': limit,
}
if before is not None:
params['before'] = before
if after is not None:
params['after'] = after
if around is not None:
params['around'] = around
return self.request(Route('GET', '/channels/{channel_id}/messages', channel_id=channel_id), params=params)
def publish_message(self, channel_id: Snowflake, message_id: Snowflake) -> Response[message.Message]:
return self.request(
Route(
'POST',
'/channels/{channel_id}/messages/{message_id}/crosspost',
channel_id=channel_id,
message_id=message_id,
)
)
def pin_message(self, channel_id: Snowflake, message_id: Snowflake, reason: Optional[str] = None) -> Response[None]:
r = Route(
'PUT',
'/channels/{channel_id}/pins/{message_id}',
channel_id=channel_id,
message_id=message_id,
)
return self.request(r, reason=reason)
def unpin_message(self, channel_id: Snowflake, message_id: Snowflake, reason: Optional[str] = None) -> Response[None]:
r = Route(
'DELETE',
'/channels/{channel_id}/pins/{message_id}',
channel_id=channel_id,
message_id=message_id,
)
return self.request(r, reason=reason)
def pins_from(self, channel_id: Snowflake) -> Response[List[message.Message]]:
return self.request(Route('GET', '/channels/{channel_id}/pins', channel_id=channel_id))
# Member management
def kick(self, user_id: Snowflake, guild_id: Snowflake, reason: Optional[str] = None) -> Response[None]:
r = Route('DELETE', '/guilds/{guild_id}/members/{user_id}', guild_id=guild_id, user_id=user_id)
if reason:
# thanks aiohttp
r.url = f'{r.url}?reason={_uriquote(reason)}'
return self.request(r)
def ban(
self,
user_id: Snowflake,
guild_id: Snowflake,
delete_message_days: int = 1,
reason: Optional[str] = None,
) -> Response[None]:
r = Route('PUT', '/guilds/{guild_id}/bans/{user_id}', guild_id=guild_id, user_id=user_id)
params = {
'delete_message_days': delete_message_days,
}
return self.request(r, params=params, reason=reason)
def unban(self, user_id: Snowflake, guild_id: Snowflake, *, reason: Optional[str] = None) -> Response[None]:
r = Route('DELETE', '/guilds/{guild_id}/bans/{user_id}', guild_id=guild_id, user_id=user_id)
return self.request(r, reason=reason)
def guild_voice_state(
self,
user_id: Snowflake,
guild_id: Snowflake,
*,
mute: Optional[bool] = None,
deafen: Optional[bool] = None,
reason: Optional[str] = None,
) -> Response[member.Member]:
r = Route('PATCH', '/guilds/{guild_id}/members/{user_id}', guild_id=guild_id, user_id=user_id)
payload = {}
if mute is not None:
payload['mute'] = mute
if deafen is not None:
payload['deaf'] = deafen
return self.request(r, json=payload, reason=reason)
def edit_profile(self, payload: Dict[str, Any]) -> Response[user.User]:
return self.request(Route('PATCH', '/users/@me'), json=payload)
def change_my_nickname(
self,
guild_id: Snowflake,
nickname: str,
*,
reason: Optional[str] = None,
) -> Response[member.Nickname]:
r = Route('PATCH', '/guilds/{guild_id}/members/@me/nick', guild_id=guild_id)
payload = {
'nick': nickname,
}
return self.request(r, json=payload, reason=reason)
def change_nickname(
self,
guild_id: Snowflake,
user_id: Snowflake,
nickname: str,
*,
reason: Optional[str] = None,
) -> Response[member.Member]:
r = Route('PATCH', '/guilds/{guild_id}/members/{user_id}', guild_id=guild_id, user_id=user_id)
payload = {
'nick': nickname,
}
return self.request(r, json=payload, reason=reason)
def edit_my_voice_state(self, guild_id: Snowflake, payload: Dict[str, Any]) -> Response[None]:
r = Route('PATCH', '/guilds/{guild_id}/voice-states/@me', guild_id=guild_id)
return self.request(r, json=payload)
def edit_voice_state(self, guild_id: Snowflake, user_id: Snowflake, payload: Dict[str, Any]) -> Response[None]:
r = Route('PATCH', '/guilds/{guild_id}/voice-states/{user_id}', guild_id=guild_id, user_id=user_id)
return self.request(r, json=payload)
def edit_member(
self,
guild_id: Snowflake,
user_id: Snowflake,
*,
reason: Optional[str] = None,
**fields: Any,
) -> Response[member.MemberWithUser]:
r = Route('PATCH', '/guilds/{guild_id}/members/{user_id}', guild_id=guild_id, user_id=user_id)
return self.request(r, json=fields, reason=reason)
# Channel management
def edit_channel(
self,
channel_id: Snowflake,
*,
reason: Optional[str] = None,
**options: Any,
) -> Response[channel.Channel]:
r = Route('PATCH', '/channels/{channel_id}', channel_id=channel_id)
valid_keys = (
'name',
'parent_id',
'topic',
'bitrate',
'nsfw',
'user_limit',
'position',
'permission_overwrites',
'rate_limit_per_user',
'type',
'rtc_region',
'video_quality_mode',
'archived',
'auto_archive_duration',
'locked',
'invitable',
'default_auto_archive_duration',
)
payload = {k: v for k, v in options.items() if k in valid_keys}
return self.request(r, reason=reason, json=payload)
def bulk_channel_update(
self,
guild_id: Snowflake,
data: List[guild.ChannelPositionUpdate],
*,
reason: Optional[str] = None,
) -> Response[None]:
r = Route('PATCH', '/guilds/{guild_id}/channels', guild_id=guild_id)
return self.request(r, json=data, reason=reason)
def create_channel(
self,
guild_id: Snowflake,
channel_type: channel.ChannelType,
*,
reason: Optional[str] = None,
**options: Any,
) -> Response[channel.GuildChannel]:
payload = {
'type': channel_type,
}
valid_keys = (
'name',
'parent_id',
'topic',
'bitrate',
'nsfw',
'user_limit',
'position',
'permission_overwrites',
'rate_limit_per_user',
'rtc_region',
'video_quality_mode',
'auto_archive_duration',
)
payload.update({k: v for k, v in options.items() if k in valid_keys and v is not None})
return self.request(Route('POST', '/guilds/{guild_id}/channels', guild_id=guild_id), json=payload, reason=reason)
def delete_channel(
self,
channel_id: Snowflake,
*,
reason: Optional[str] = None,
) -> Response[None]:
return self.request(Route('DELETE', '/channels/{channel_id}', channel_id=channel_id), reason=reason)
# Thread management
def start_thread_with_message(
self,
channel_id: Snowflake,
message_id: Snowflake,
*,
name: str,
auto_archive_duration: threads.ThreadArchiveDuration,
reason: Optional[str] = None,
) -> Response[threads.Thread]:
payload = {
'name': name,
'auto_archive_duration': auto_archive_duration,
}
route = Route(
'POST', '/channels/{channel_id}/messages/{message_id}/threads', channel_id=channel_id, message_id=message_id
)
return self.request(route, json=payload, reason=reason)
def start_thread_without_message(
self,
channel_id: Snowflake,
*,
name: str,
auto_archive_duration: threads.ThreadArchiveDuration,
type: threads.ThreadType,
invitable: bool = True,
reason: Optional[str] = None,
) -> Response[threads.Thread]:
payload = {
'name': name,
'auto_archive_duration': auto_archive_duration,
'type': type,
'invitable': invitable,
}
route = Route('POST', '/channels/{channel_id}/threads', channel_id=channel_id)
return self.request(route, json=payload, reason=reason)
def join_thread(self, channel_id: Snowflake) -> Response[None]:
return self.request(Route('POST', '/channels/{channel_id}/thread-members/@me', channel_id=channel_id))
def add_user_to_thread(self, channel_id: Snowflake, user_id: Snowflake) -> Response[None]:
return self.request(
Route('PUT', '/channels/{channel_id}/thread-members/{user_id}', channel_id=channel_id, user_id=user_id)
)
def leave_thread(self, channel_id: Snowflake) -> Response[None]:
return self.request(Route('DELETE', '/channels/{channel_id}/thread-members/@me', channel_id=channel_id))
def remove_user_from_thread(self, channel_id: Snowflake, user_id: Snowflake) -> Response[None]:
route = Route('DELETE', '/channels/{channel_id}/thread-members/{user_id}', channel_id=channel_id, user_id=user_id)
return self.request(route)
def get_public_archived_threads(
self, channel_id: Snowflake, before: Optional[Snowflake] = None, limit: int = 50
) -> Response[threads.ThreadPaginationPayload]:
route = Route('GET', '/channels/{channel_id}/threads/archived/public', channel_id=channel_id)
params = {}
if before:
params['before'] = before
params['limit'] = limit
return self.request(route, params=params)
def get_private_archived_threads(
self, channel_id: Snowflake, before: Optional[Snowflake] = None, limit: int = 50
) -> Response[threads.ThreadPaginationPayload]:
route = Route('GET', '/channels/{channel_id}/threads/archived/private', channel_id=channel_id)
params = {}
if before:
params['before'] = before
params['limit'] = limit
return self.request(route, params=params)
def get_joined_private_archived_threads(
self, channel_id: Snowflake, before: Optional[Snowflake] = None, limit: int = 50
) -> Response[threads.ThreadPaginationPayload]:
route = Route('GET', '/channels/{channel_id}/users/@me/threads/archived/private', channel_id=channel_id)
params = {}
if before:
params['before'] = before
params['limit'] = limit
return self.request(route, params=params)
def get_active_threads(self, guild_id: Snowflake) -> Response[threads.ThreadPaginationPayload]:
route = Route('GET', '/guilds/{guild_id}/threads/active', guild_id=guild_id)
return self.request(route)
def get_thread_members(self, channel_id: Snowflake) -> Response[List[threads.ThreadMember]]:
route = Route('GET', '/channels/{channel_id}/thread-members', channel_id=channel_id)
return self.request(route)
# Webhook management
def create_webhook(
self,
channel_id: Snowflake,
*,
name: str,
avatar: Optional[bytes] = None,
reason: Optional[str] = None,
) -> Response[webhook.Webhook]:
payload: Dict[str, Any] = {
'name': name,
}
if avatar is not None:
payload['avatar'] = avatar
r = Route('POST', '/channels/{channel_id}/webhooks', channel_id=channel_id)
return self.request(r, json=payload, reason=reason)
def channel_webhooks(self, channel_id: Snowflake) -> Response[List[webhook.Webhook]]:
return self.request(Route('GET', '/channels/{channel_id}/webhooks', channel_id=channel_id))
def guild_webhooks(self, guild_id: Snowflake) -> Response[List[webhook.Webhook]]:
return self.request(Route('GET', '/guilds/{guild_id}/webhooks', guild_id=guild_id))
def get_webhook(self, webhook_id: Snowflake) -> Response[webhook.Webhook]:
return self.request(Route('GET', '/webhooks/{webhook_id}', webhook_id=webhook_id))
def follow_webhook(
self,
channel_id: Snowflake,
webhook_channel_id: Snowflake,
reason: Optional[str] = None,
) -> Response[None]:
payload = {
'webhook_channel_id': str(webhook_channel_id),
}
return self.request(
Route('POST', '/channels/{channel_id}/followers', channel_id=channel_id), json=payload, reason=reason
)
# Guild management
def get_guilds(
self,
limit: int,
before: Optional[Snowflake] = None,
after: Optional[Snowflake] = None,
) -> Response[List[guild.Guild]]:
params: Dict[str, Any] = {
'limit': limit,
}
if before:
params['before'] = before
if after:
params['after'] = after
return self.request(Route('GET', '/users/@me/guilds'), params=params)
def leave_guild(self, guild_id: Snowflake) -> Response[None]:
return self.request(Route('DELETE', '/users/@me/guilds/{guild_id}', guild_id=guild_id))
def get_guild(self, guild_id: Snowflake) -> Response[guild.Guild]:
return self.request(Route('GET', '/guilds/{guild_id}', guild_id=guild_id))
def delete_guild(self, guild_id: Snowflake) -> Response[None]:
return self.request(Route('DELETE', '/guilds/{guild_id}', guild_id=guild_id))
def create_guild(self, name: str, region: str, icon: Optional[str]) -> Response[guild.Guild]:
payload = {
'name': name,
'region': region,
}
if icon:
payload['icon'] = icon
return self.request(Route('POST', '/guilds'), json=payload)
def edit_guild(self, guild_id: Snowflake, *, reason: Optional[str] = None, **fields: Any) -> Response[guild.Guild]:
valid_keys = (
'name',
'region',
'icon',
'afk_timeout',
'owner_id',
'afk_channel_id',
'splash',
'discovery_splash',
'features',
'verification_level',
'system_channel_id',
'default_message_notifications',
'description',
'explicit_content_filter',
'banner',
'system_channel_flags',
'rules_channel_id',
'public_updates_channel_id',
'preferred_locale',
)
payload = {k: v for k, v in fields.items() if k in valid_keys}
return self.request(Route('PATCH', '/guilds/{guild_id}', guild_id=guild_id), json=payload, reason=reason)
def get_template(self, code: str) -> Response[template.Template]:
return self.request(Route('GET', '/guilds/templates/{code}', code=code))
def guild_templates(self, guild_id: Snowflake) -> Response[List[template.Template]]:
return self.request(Route('GET', '/guilds/{guild_id}/templates', guild_id=guild_id))
def create_template(self, guild_id: Snowflake, payload: template.CreateTemplate) -> Response[template.Template]:
return self.request(Route('POST', '/guilds/{guild_id}/templates', guild_id=guild_id), json=payload)
def sync_template(self, guild_id: Snowflake, code: str) -> Response[template.Template]:
return self.request(Route('PUT', '/guilds/{guild_id}/templates/{code}', guild_id=guild_id, code=code))
def edit_template(self, guild_id: Snowflake, code: str, payload) -> Response[template.Template]:
valid_keys = (
'name',
'description',
)
payload = {k: v for k, v in payload.items() if k in valid_keys}
return self.request(
Route('PATCH', '/guilds/{guild_id}/templates/{code}', guild_id=guild_id, code=code), json=payload
)
def delete_template(self, guild_id: Snowflake, code: str) -> Response[None]:
return self.request(Route('DELETE', '/guilds/{guild_id}/templates/{code}', guild_id=guild_id, code=code))
def create_from_template(self, code: str, name: str, region: str, icon: Optional[str]) -> Response[guild.Guild]:
payload = {
'name': name,
'region': region,
}
if icon:
payload['icon'] = icon
return self.request(Route('POST', '/guilds/templates/{code}', code=code), json=payload)
def get_bans(self, guild_id: Snowflake) -> Response[List[guild.Ban]]:
return self.request(Route('GET', '/guilds/{guild_id}/bans', guild_id=guild_id))
def get_ban(self, user_id: Snowflake, guild_id: Snowflake) -> Response[guild.Ban]:
return self.request(Route('GET', '/guilds/{guild_id}/bans/{user_id}', guild_id=guild_id, user_id=user_id))
def get_vanity_code(self, guild_id: Snowflake) -> Response[invite.VanityInvite]:
return self.request(Route('GET', '/guilds/{guild_id}/vanity-url', guild_id=guild_id))
def change_vanity_code(self, guild_id: Snowflake, code: str, *, reason: Optional[str] = None) -> Response[None]:
payload: Dict[str, Any] = {'code': code}
return self.request(Route('PATCH', '/guilds/{guild_id}/vanity-url', guild_id=guild_id), json=payload, reason=reason)
def get_all_guild_channels(self, guild_id: Snowflake) -> Response[List[guild.GuildChannel]]:
return self.request(Route('GET', '/guilds/{guild_id}/channels', guild_id=guild_id))
def get_members(
self, guild_id: Snowflake, limit: int, after: Optional[Snowflake]
) -> Response[List[member.MemberWithUser]]:
params: Dict[str, Any] = {
'limit': limit,
}
if after:
params['after'] = after
r = Route('GET', '/guilds/{guild_id}/members', guild_id=guild_id)
return self.request(r, params=params)
def get_member(self, guild_id: Snowflake, member_id: Snowflake) -> Response[member.MemberWithUser]:
return self.request(Route('GET', '/guilds/{guild_id}/members/{member_id}', guild_id=guild_id, member_id=member_id))
def prune_members(
self,
guild_id: Snowflake,
days: int,
compute_prune_count: bool,
roles: List[str],
*,
reason: Optional[str] = None,
) -> Response[guild.GuildPrune]:
payload: Dict[str, Any] = {
'days': days,
'compute_prune_count': 'true' if compute_prune_count else 'false',
}
if roles:
payload['include_roles'] = ', '.join(roles)
return self.request(Route('POST', '/guilds/{guild_id}/prune', guild_id=guild_id), json=payload, reason=reason)
def estimate_pruned_members(
self,
guild_id: Snowflake,
days: int,
roles: List[str],
) -> Response[guild.GuildPrune]:
params: Dict[str, Any] = {
'days': days,
}
if roles:
params['include_roles'] = ', '.join(roles)
return self.request(Route('GET', '/guilds/{guild_id}/prune', guild_id=guild_id), params=params)
def get_sticker(self, sticker_id: Snowflake) -> Response[sticker.Sticker]:
return self.request(Route('GET', '/stickers/{sticker_id}', sticker_id=sticker_id))
def list_premium_sticker_packs(self) -> Response[sticker.ListPremiumStickerPacks]:
return self.request(Route('GET', '/sticker-packs'))
def get_all_guild_stickers(self, guild_id: Snowflake) -> Response[List[sticker.GuildSticker]]:
return self.request(Route('GET', '/guilds/{guild_id}/stickers', guild_id=guild_id))
def get_guild_sticker(self, guild_id: Snowflake, sticker_id: Snowflake) -> Response[sticker.GuildSticker]:
return self.request(
Route('GET', '/guilds/{guild_id}/stickers/{sticker_id}', guild_id=guild_id, sticker_id=sticker_id)
)
def create_guild_sticker(
self, guild_id: Snowflake, payload: sticker.CreateGuildSticker, file: File, reason: str
) -> Response[sticker.GuildSticker]:
initial_bytes = file.fp.read(16)
try:
mime_type = utils._get_mime_type_for_image(initial_bytes)
except InvalidArgument:
if initial_bytes.startswith(b'{'):
mime_type = 'application/json'
else:
mime_type = 'application/octet-stream'
finally:
file.reset()
form: List[Dict[str, Any]] = [
{
'name': 'file',
'value': file.fp,
'filename': file.filename,
'content_type': mime_type,
}
]
for k, v in payload.items():
form.append(
{
'name': k,
'value': v,
}
)
return self.request(
Route('POST', '/guilds/{guild_id}/stickers', guild_id=guild_id), form=form, files=[file], reason=reason
)
def modify_guild_sticker(
self, guild_id: Snowflake, sticker_id: Snowflake, payload: sticker.EditGuildSticker, reason: Optional[str],
) -> Response[sticker.GuildSticker]:
return self.request(
Route('PATCH', '/guilds/{guild_id}/stickers/{sticker_id}', guild_id=guild_id, sticker_id=sticker_id),
json=payload,
reason=reason,
)
def delete_guild_sticker(self, guild_id: Snowflake, sticker_id: Snowflake, reason: Optional[str]) -> Response[None]:
return self.request(
Route('DELETE', '/guilds/{guild_id}/stickers/{sticker_id}', guild_id=guild_id, sticker_id=sticker_id),
reason=reason,
)
def get_all_custom_emojis(self, guild_id: Snowflake) -> Response[List[emoji.Emoji]]:
return self.request(Route('GET', '/guilds/{guild_id}/emojis', guild_id=guild_id))
def get_custom_emoji(self, guild_id: Snowflake, emoji_id: Snowflake) -> Response[emoji.Emoji]:
return self.request(Route('GET', '/guilds/{guild_id}/emojis/{emoji_id}', guild_id=guild_id, emoji_id=emoji_id))
def create_custom_emoji(
self,
guild_id: Snowflake,
name: str,
image: bytes,
*,
roles: Optional[SnowflakeList] = None,
reason: Optional[str] = None,
) -> Response[emoji.Emoji]:
payload = {
'name': name,
'image': image,
'roles': roles or [],
}
r = Route('POST', '/guilds/{guild_id}/emojis', guild_id=guild_id)
return self.request(r, json=payload, reason=reason)
def delete_custom_emoji(
self,
guild_id: Snowflake,
emoji_id: Snowflake,
*,
reason: Optional[str] = None,
) -> Response[None]:
r = Route('DELETE', '/guilds/{guild_id}/emojis/{emoji_id}', guild_id=guild_id, emoji_id=emoji_id)
return self.request(r, reason=reason)
def edit_custom_emoji(
self,
guild_id: Snowflake,
emoji_id: Snowflake,
*,
payload: Dict[str, Any],
reason: Optional[str] = None,
) -> Response[emoji.Emoji]:
r = Route('PATCH', '/guilds/{guild_id}/emojis/{emoji_id}', guild_id=guild_id, emoji_id=emoji_id)
return self.request(r, json=payload, reason=reason)
def get_all_integrations(self, guild_id: Snowflake) -> Response[List[integration.Integration]]:
r = Route('GET', '/guilds/{guild_id}/integrations', guild_id=guild_id)
return self.request(r)
def create_integration(self, guild_id: Snowflake, type: integration.IntegrationType, id: int) -> Response[None]:
payload = {
'type': type,
'id': id,
}
r = Route('POST', '/guilds/{guild_id}/integrations', guild_id=guild_id)
return self.request(r, json=payload)
def edit_integration(self, guild_id: Snowflake, integration_id: Snowflake, **payload: Any) -> Response[None]:
r = Route(
'PATCH', '/guilds/{guild_id}/integrations/{integration_id}', guild_id=guild_id, integration_id=integration_id
)
return self.request(r, json=payload)
def sync_integration(self, guild_id: Snowflake, integration_id: Snowflake) -> Response[None]:
r = Route(
'POST', '/guilds/{guild_id}/integrations/{integration_id}/sync', guild_id=guild_id, integration_id=integration_id
)
return self.request(r)
def delete_integration(
self, guild_id: Snowflake, integration_id: Snowflake, *, reason: Optional[str] = None
) -> Response[None]:
r = Route(
'DELETE', '/guilds/{guild_id}/integrations/{integration_id}', guild_id=guild_id, integration_id=integration_id
)
return self.request(r, reason=reason)
def get_audit_logs(
self,
guild_id: Snowflake,
limit: int = 100,
before: Optional[Snowflake] = None,
after: Optional[Snowflake] = None,
user_id: Optional[Snowflake] = None,
action_type: Optional[AuditLogAction] = None,
) -> Response[audit_log.AuditLog]:
params: Dict[str, Any] = {'limit': limit}
if before:
params['before'] = before
if after:
params['after'] = after
if user_id:
params['user_id'] = user_id
if action_type:
params['action_type'] = action_type
r = Route('GET', '/guilds/{guild_id}/audit-logs', guild_id=guild_id)
return self.request(r, params=params)
def get_widget(self, guild_id: Snowflake) -> Response[widget.Widget]:
return self.request(Route('GET', '/guilds/{guild_id}/widget.json', guild_id=guild_id))
def edit_widget(self, guild_id: Snowflake, payload) -> Response[widget.WidgetSettings]:
return self.request(Route('PATCH', '/guilds/{guild_id}/widget', guild_id=guild_id), json=payload)
# Invite management
def create_invite(
self,
channel_id: Snowflake,
*,
reason: Optional[str] = None,
max_age: int = 0,
max_uses: int = 0,
temporary: bool = False,
unique: bool = True,
target_type: Optional[invite.InviteTargetType] = None,
target_user_id: Optional[Snowflake] = None,
target_application_id: Optional[Snowflake] = None,
) -> Response[invite.Invite]:
r = Route('POST', '/channels/{channel_id}/invites', channel_id=channel_id)
payload = {
'max_age': max_age,
'max_uses': max_uses,
'temporary': temporary,
'unique': unique,
}
if target_type:
payload['target_type'] = target_type
if target_user_id:
payload['target_user_id'] = target_user_id
if target_application_id:
payload['target_application_id'] = str(target_application_id)
return self.request(r, reason=reason, json=payload)
def get_invite(
self, invite_id: str, *, with_counts: bool = True, with_expiration: bool = True
) -> Response[invite.Invite]:
params = {
'with_counts': int(with_counts),
'with_expiration': int(with_expiration),
}
return self.request(Route('GET', '/invites/{invite_id}', invite_id=invite_id), params=params)
def invites_from(self, guild_id: Snowflake) -> Response[List[invite.Invite]]:
return self.request(Route('GET', '/guilds/{guild_id}/invites', guild_id=guild_id))
def invites_from_channel(self, channel_id: Snowflake) -> Response[List[invite.Invite]]:
return self.request(Route('GET', '/channels/{channel_id}/invites', channel_id=channel_id))
def delete_invite(self, invite_id: str, *, reason: Optional[str] = None) -> Response[None]:
return self.request(Route('DELETE', '/invites/{invite_id}', invite_id=invite_id), reason=reason)
# Role management
def get_roles(self, guild_id: Snowflake) -> Response[List[role.Role]]:
return self.request(Route('GET', '/guilds/{guild_id}/roles', guild_id=guild_id))
def edit_role(
self, guild_id: Snowflake, role_id: Snowflake, *, reason: Optional[str] = None, **fields: Any
) -> Response[role.Role]:
r = Route('PATCH', '/guilds/{guild_id}/roles/{role_id}', guild_id=guild_id, role_id=role_id)
valid_keys = ('name', 'permissions', 'color', 'hoist', 'mentionable')
payload = {k: v for k, v in fields.items() if k in valid_keys}
return self.request(r, json=payload, reason=reason)
def delete_role(self, guild_id: Snowflake, role_id: Snowflake, *, reason: Optional[str] = None) -> Response[None]:
r = Route('DELETE', '/guilds/{guild_id}/roles/{role_id}', guild_id=guild_id, role_id=role_id)
return self.request(r, reason=reason)
def replace_roles(
self,
user_id: Snowflake,
guild_id: Snowflake,
role_ids: List[int],
*,
reason: Optional[str] = None,
) -> Response[member.MemberWithUser]:
return self.edit_member(guild_id=guild_id, user_id=user_id, roles=role_ids, reason=reason)
def create_role(self, guild_id: Snowflake, *, reason: Optional[str] = None, **fields: Any) -> Response[role.Role]:
r = Route('POST', '/guilds/{guild_id}/roles', guild_id=guild_id)
return self.request(r, json=fields, reason=reason)
def move_role_position(
self,
guild_id: Snowflake,
positions: List[guild.RolePositionUpdate],
*,
reason: Optional[str] = None,
) -> Response[List[role.Role]]:
r = Route('PATCH', '/guilds/{guild_id}/roles', guild_id=guild_id)
return self.request(r, json=positions, reason=reason)
def add_role(
self, guild_id: Snowflake, user_id: Snowflake, role_id: Snowflake, *, reason: Optional[str] = None
) -> Response[None]:
r = Route(
'PUT',
'/guilds/{guild_id}/members/{user_id}/roles/{role_id}',
guild_id=guild_id,
user_id=user_id,
role_id=role_id,
)
return self.request(r, reason=reason)
def remove_role(
self, guild_id: Snowflake, user_id: Snowflake, role_id: Snowflake, *, reason: Optional[str] = None
) -> Response[None]:
r = Route(
'DELETE',
'/guilds/{guild_id}/members/{user_id}/roles/{role_id}',
guild_id=guild_id,
user_id=user_id,
role_id=role_id,
)
return self.request(r, reason=reason)
def edit_channel_permissions(
self,
channel_id: Snowflake,
target: Snowflake,
allow: str,
deny: str,
type: channel.OverwriteType,
*,
reason: Optional[str] = None,
) -> Response[None]:
payload = {'id': target, 'allow': allow, 'deny': deny, 'type': type}
r = Route('PUT', '/channels/{channel_id}/permissions/{target}', channel_id=channel_id, target=target)
return self.request(r, json=payload, reason=reason)
def delete_channel_permissions(
self, channel_id: Snowflake, target: channel.OverwriteType, *, reason: Optional[str] = None
) -> Response[None]:
r = Route('DELETE', '/channels/{channel_id}/permissions/{target}', channel_id=channel_id, target=target)
return self.request(r, reason=reason)
# Voice management
def move_member(
self,
user_id: Snowflake,
guild_id: Snowflake,
channel_id: Snowflake,
*,
reason: Optional[str] = None,
) -> Response[member.MemberWithUser]:
return self.edit_member(guild_id=guild_id, user_id=user_id, channel_id=channel_id, reason=reason)
# Stage instance management
def get_stage_instance(self, channel_id: Snowflake) -> Response[channel.StageInstance]:
return self.request(Route('GET', '/stage-instances/{channel_id}', channel_id=channel_id))
def create_stage_instance(self, *, reason: Optional[str], **payload: Any) -> Response[channel.StageInstance]:
valid_keys = (
'channel_id',
'topic',
'privacy_level',
)
payload = {k: v for k, v in payload.items() if k in valid_keys}
return self.request(Route('POST', '/stage-instances'), json=payload, reason=reason)
def edit_stage_instance(self, channel_id: Snowflake, *, reason: Optional[str] = None, **payload: Any) -> Response[None]:
valid_keys = (
'topic',
'privacy_level',
)
payload = {k: v for k, v in payload.items() if k in valid_keys}
return self.request(
Route('PATCH', '/stage-instances/{channel_id}', channel_id=channel_id), json=payload, reason=reason
)
def delete_stage_instance(self, channel_id: Snowflake, *, reason: Optional[str] = None) -> Response[None]:
return self.request(Route('DELETE', '/stage-instances/{channel_id}', channel_id=channel_id), reason=reason)
# Application commands (global)
def get_global_commands(self, application_id: Snowflake) -> Response[List[interactions.ApplicationCommand]]:
return self.request(Route('GET', '/applications/{application_id}/commands', application_id=application_id))
def get_global_command(
self, application_id: Snowflake, command_id: Snowflake
) -> Response[interactions.ApplicationCommand]:
r = Route(
'GET',
'/applications/{application_id}/commands/{command_id}',
application_id=application_id,
command_id=command_id,
)
return self.request(r)
def upsert_global_command(self, application_id: Snowflake, payload) -> Response[interactions.ApplicationCommand]:
r = Route('POST', '/applications/{application_id}/commands', application_id=application_id)
return self.request(r, json=payload)
def edit_global_command(
self,
application_id: Snowflake,
command_id: Snowflake,
payload: interactions.EditApplicationCommand,
) -> Response[interactions.ApplicationCommand]:
valid_keys = (
'name',
'description',
'options',
)
payload = {k: v for k, v in payload.items() if k in valid_keys} # type: ignore
r = Route(
'PATCH',
'/applications/{application_id}/commands/{command_id}',
application_id=application_id,
command_id=command_id,
)
return self.request(r, json=payload)
def delete_global_command(self, application_id: Snowflake, command_id: Snowflake) -> Response[None]:
r = Route(
'DELETE',
'/applications/{application_id}/commands/{command_id}',
application_id=application_id,
command_id=command_id,
)
return self.request(r)
def bulk_upsert_global_commands(
self, application_id: Snowflake, payload
) -> Response[List[interactions.ApplicationCommand]]:
r = Route('PUT', '/applications/{application_id}/commands', application_id=application_id)
return self.request(r, json=payload)
# Application commands (guild)
def get_guild_commands(
self, application_id: Snowflake, guild_id: Snowflake
) -> Response[List[interactions.ApplicationCommand]]:
r = Route(
'GET',
'/applications/{application_id}/guilds/{guild_id}/commands',
application_id=application_id,
guild_id=guild_id,
)
return self.request(r)
def get_guild_command(
self,
application_id: Snowflake,
guild_id: Snowflake,
command_id: Snowflake,
) -> Response[interactions.ApplicationCommand]:
r = Route(
'GET',
'/applications/{application_id}/guilds/{guild_id}/commands/{command_id}',
application_id=application_id,
guild_id=guild_id,
command_id=command_id,
)
return self.request(r)
def upsert_guild_command(
self,
application_id: Snowflake,
guild_id: Snowflake,
payload: interactions.EditApplicationCommand,
) -> Response[interactions.ApplicationCommand]:
r = Route(
'POST',
'/applications/{application_id}/guilds/{guild_id}/commands',
application_id=application_id,
guild_id=guild_id,
)
return self.request(r, json=payload)
def edit_guild_command(
self,
application_id: Snowflake,
guild_id: Snowflake,
command_id: Snowflake,
payload: interactions.EditApplicationCommand,
) -> Response[interactions.ApplicationCommand]:
valid_keys = (
'name',
'description',
'options',
)
payload = {k: v for k, v in payload.items() if k in valid_keys} # type: ignore
r = Route(
'PATCH',
'/applications/{application_id}/guilds/{guild_id}/commands/{command_id}',
application_id=application_id,
guild_id=guild_id,
command_id=command_id,
)
return self.request(r, json=payload)
def delete_guild_command(
self,
application_id: Snowflake,
guild_id: Snowflake,
command_id: Snowflake,
) -> Response[None]:
r = Route(
'DELETE',
'/applications/{application_id}/guilds/{guild_id}/commands/{command_id}',
application_id=application_id,
guild_id=guild_id,
command_id=command_id,
)
return self.request(r)
def bulk_upsert_guild_commands(
self,
application_id: Snowflake,
guild_id: Snowflake,
payload: List[interactions.EditApplicationCommand],
) -> Response[List[interactions.ApplicationCommand]]:
r = Route(
'PUT',
'/applications/{application_id}/guilds/{guild_id}/commands',
application_id=application_id,
guild_id=guild_id,
)
return self.request(r, json=payload)
# Interaction responses
def _edit_webhook_helper(
self,
route: Route,
file: Optional[File] = None,
content: Optional[str] = None,
embeds: Optional[List[embed.Embed]] = None,
allowed_mentions: Optional[message.AllowedMentions] = None,
):
payload: Dict[str, Any] = {}
if content:
payload['content'] = content
if embeds:
payload['embeds'] = embeds
if allowed_mentions:
payload['allowed_mentions'] = allowed_mentions
form: List[Dict[str, Any]] = [
{
'name': 'payload_json',
'value': utils._to_json(payload),
}
]
if file:
form.append(
{
'name': 'file',
'value': file.fp,
'filename': file.filename,
'content_type': 'application/octet-stream',
}
)
return self.request(route, form=form, files=[file] if file else None)
def create_interaction_response(
self,
interaction_id: Snowflake,
token: str,
*,
type: InteractionResponseType,
data: Optional[interactions.InteractionApplicationCommandCallbackData] = None,
) -> Response[None]:
r = Route(
'POST',
'/interactions/{interaction_id}/{interaction_token}/callback',
interaction_id=interaction_id,
interaction_token=token,
)
payload: Dict[str, Any] = {
'type': type,
}
if data is not None:
payload['data'] = data
return self.request(r, json=payload)
def get_original_interaction_response(
self,
application_id: Snowflake,
token: str,
) -> Response[message.Message]:
r = Route(
'GET',
'/webhooks/{application_id}/{interaction_token}/messages/@original',
application_id=application_id,
interaction_token=token,
)
return self.request(r)
def edit_original_interaction_response(
self,
application_id: Snowflake,
token: str,
file: Optional[File] = None,
content: Optional[str] = None,
embeds: Optional[List[embed.Embed]] = None,
allowed_mentions: Optional[message.AllowedMentions] = None,
) -> Response[message.Message]:
r = Route(
'PATCH',
'/webhooks/{application_id}/{interaction_token}/messages/@original',
application_id=application_id,
interaction_token=token,
)
return self._edit_webhook_helper(r, file=file, content=content, embeds=embeds, allowed_mentions=allowed_mentions)
def delete_original_interaction_response(self, application_id: Snowflake, token: str) -> Response[None]:
r = Route(
'DELETE',
'/webhooks/{application_id}/{interaction_token}/messages/@original',
application_id=application_id,
interaction_token=token,
)
return self.request(r)
def create_followup_message(
self,
application_id: Snowflake,
token: str,
files: List[File] = [],
content: Optional[str] = None,
tts: bool = False,
embeds: Optional[List[embed.Embed]] = None,
allowed_mentions: Optional[message.AllowedMentions] = None,
) -> Response[message.Message]:
r = Route(
'POST',
'/webhooks/{application_id}/{interaction_token}',
application_id=application_id,
interaction_token=token,
)
return self.send_multipart_helper(
r,
content=content,
files=files,
tts=tts,
embeds=embeds,
allowed_mentions=allowed_mentions,
)
def edit_followup_message(
self,
application_id: Snowflake,
token: str,
message_id: Snowflake,
file: Optional[File] = None,
content: Optional[str] = None,
embeds: Optional[List[embed.Embed]] = None,
allowed_mentions: Optional[message.AllowedMentions] = None,
) -> Response[message.Message]:
r = Route(
'PATCH',
'/webhooks/{application_id}/{interaction_token}/messages/{message_id}',
application_id=application_id,
interaction_token=token,
message_id=message_id,
)
return self._edit_webhook_helper(r, file=file, content=content, embeds=embeds, allowed_mentions=allowed_mentions)
def delete_followup_message(self, application_id: Snowflake, token: str, message_id: Snowflake) -> Response[None]:
r = Route(
'DELETE',
'/webhooks/{application_id}/{interaction_token}/messages/{message_id}',
application_id=application_id,
interaction_token=token,
message_id=message_id,
)
return self.request(r)
def get_guild_application_command_permissions(
self,
application_id: Snowflake,
guild_id: Snowflake,
) -> Response[List[interactions.GuildApplicationCommandPermissions]]:
r = Route(
'GET',
'/applications/{application_id}/guilds/{guild_id}/commands/permissions',
application_id=application_id,
guild_id=guild_id,
)
return self.request(r)
def get_application_command_permissions(
self,
application_id: Snowflake,
guild_id: Snowflake,
command_id: Snowflake,
) -> Response[interactions.GuildApplicationCommandPermissions]:
r = Route(
'GET',
'/applications/{application_id}/guilds/{guild_id}/commands/{command_id}/permissions',
application_id=application_id,
guild_id=guild_id,
command_id=command_id,
)
return self.request(r)
def edit_application_command_permissions(
self,
application_id: Snowflake,
guild_id: Snowflake,
command_id: Snowflake,
payload: interactions.BaseGuildApplicationCommandPermissions,
) -> Response[None]:
r = Route(
'PUT',
'/applications/{application_id}/guilds/{guild_id}/commands/{command_id}/permissions',
application_id=application_id,
guild_id=guild_id,
command_id=command_id,
)
return self.request(r, json=payload)
def bulk_edit_guild_application_command_permissions(
self,
application_id: Snowflake,
guild_id: Snowflake,
payload: List[interactions.PartialGuildApplicationCommandPermissions],
) -> Response[None]:
r = Route(
'PUT',
'/applications/{application_id}/guilds/{guild_id}/commands/permissions',
application_id=application_id,
guild_id=guild_id,
)
return self.request(r, json=payload)
# Misc
def application_info(self) -> Response[appinfo.AppInfo]:
return self.request(Route('GET', '/oauth2/applications/@me'))
async def get_gateway(self, *, encoding: str = 'json', zlib: bool = True) -> str:
try:
data = await self.request(Route('GET', '/gateway'))
except HTTPException as exc:
raise GatewayNotFound() from exc
if zlib:
value = '{0}?encoding={1}&v=9&compress=zlib-stream'
else:
value = '{0}?encoding={1}&v=9'
return value.format(data['url'], encoding)
async def get_bot_gateway(self, *, encoding: str = 'json', zlib: bool = True) -> Tuple[int, str]:
try:
data = await self.request(Route('GET', '/gateway/bot'))
except HTTPException as exc:
raise GatewayNotFound() from exc
if zlib:
value = '{0}?encoding={1}&v=9&compress=zlib-stream'
else:
value = '{0}?encoding={1}&v=9'
return data['shards'], value.format(data['url'], encoding)
def get_user(self, user_id: Snowflake) -> Response[user.User]:
return self.request(Route('GET', '/users/{user_id}', user_id=user_id)) | PypiClean |
/django3-viewflow-2.0.2.tar.gz/django3-viewflow-2.0.2/viewflow/nodes/signal.py | from .. import Event, ThisObject, mixins
from ..activation import StartActivation, FuncActivation
from ..exceptions import FlowRuntimeError
class StartSignal(mixins.TaskDescriptionMixin,
mixins.NextNodeMixin,
mixins.DetailViewMixin,
mixins.UndoViewMixin,
mixins.CancelViewMixin,
Event):
"""
Start flow on a django signal receive.
Example::
class MyFlow(Flow):
start = (
flow.StartSignal(
post_save, this.start_flow,
sender=MyModelCls)
.Next(this.approve)
)
...
@flow_start_signal
def start_flow(self, activation, **signal_kwargs):
activation.prepare()
activation.done()
"""
task_type = 'START'
activation_class = StartActivation
def __init__(self, signal, receiver, sender=None, **kwargs):
"""
Instantiate a StartSignal task.
:param signal: A django signal to connect
:param receiver: Callable[activation, **kwargs]
:param sender: Optional signal sender
"""
self.signal = signal
self.receiver = receiver
self.sender = sender
super(StartSignal, self).__init__(**kwargs)
def on_signal(self, sender, **signal_kwargs):
"""Signal handler."""
return self.receiver(sender=sender, flow_task=self, **signal_kwargs)
def ready(self):
"""Resolve internal `this`-references. and subscribe to the signal."""
if isinstance(self.receiver, ThisObject):
self.receiver = getattr(self.flow_class.instance, self.receiver.name)
self.signal.connect(
self.on_signal, sender=self.sender,
dispatch_uid="viewflow.flow.signal/{}.{}.{}".format(
self.flow_class.__module__, self.flow_class.__name__, self.name))
class Signal(mixins.TaskDescriptionMixin,
mixins.NextNodeMixin,
mixins.DetailViewMixin,
mixins.UndoViewMixin,
mixins.CancelViewMixin,
Event):
"""
Execute a callback on a django signal receive.
Example::
class MyFlow(Flow):
wait_for_receipt = (
flow.Signal(
post_create, this.receipt_created,
sender=MyModelCls)
.Next(this.approve)
...
def receipt_created(self, activation, **signal_kwargs):
activation.prepare()
activation.process.receipt = signal_kwargs['instance']
activation.done()
"""
task_type = 'FUNC'
activation_class = FuncActivation
def __init__(self, signal, receiver, sender=None, task_loader=None, allow_skip=False, **kwargs):
"""
Instantiate a Signal task.
:param signal: A django signal to connect
:param receiver: Callable[activation, **kwargs]
:param sender: Optional signal sender
:param task_loader: Callable[**kwargs] -> Task
:param allow_skip: If True task_loader can return None if
signal could be skipped.
You can skip a `task_loader` if the signal going to be
sent with Task instance.
"""
self.signal = signal
self.receiver = receiver
self.sender = sender
self.task_loader = task_loader
self.allow_skip = allow_skip
super(Signal, self).__init__(**kwargs)
def on_signal(self, sender, **signal_kwargs):
"""Signal handler."""
if self.task_loader is None:
if 'task' not in signal_kwargs:
raise FlowRuntimeError('{} have no task_loader and got signal without task instance'.format(self.name))
return self.receiver(sender=sender, **signal_kwargs)
else:
task = self.task_loader(self, sender=sender, **signal_kwargs)
if task is None:
if self.allow_skip is False:
raise FlowRuntimeError("The task_loader didn't return any task for {}\n{}".format(
self.name, signal_kwargs))
else:
return self.receiver(sender=sender, _task=task, **signal_kwargs)
def ready(self):
"""Resolve internal `this`-references. and subscribe to the signal."""
if isinstance(self.receiver, ThisObject):
self.receiver = getattr(self.flow_class.instance, self.receiver.name)
if isinstance(self.task_loader, ThisObject):
self.task_loader = getattr(self.flow_class.instance, self.task_loader.name)
self.signal.connect(
self.on_signal, sender=self.sender,
dispatch_uid="viewflow.flow.signal/{}.{}.{}".format(
self.flow_class.__module__, self.flow_class.__name__, self.name)) | PypiClean |
/pigweed-0.0.14.tar.gz/pigweed-0.0.14/pw_presubmit/source_in_build.py | """Checks that source files are listed in build files, such as BUILD.bazel."""
import logging
from typing import Callable, Sequence
from pw_presubmit import build, format_code, git_repo
from pw_presubmit.presubmit import (
Check,
FileFilter,
PresubmitContext,
PresubmitFailure,
)
_LOG: logging.Logger = logging.getLogger(__name__)
# The filter is used twice for each source_is_in_* check. First to decide
# whether the check should be run. Once it's running, we use ctx.all_paths
# instead of ctx.paths since we want to check that all files are in the build,
# not just changed files, but we need to run ctx.all_paths through the same
# filter within the check or we won't properly ignore files that the caller
# asked to be ignored.
_DEFAULT_BAZEL_EXTENSIONS = (*format_code.C_FORMAT.extensions,)
def bazel(
source_filter: FileFilter,
files_and_extensions_to_check: Sequence[str] = _DEFAULT_BAZEL_EXTENSIONS,
) -> Check:
"""Create a presubmit check that ensures source files are in Bazel files.
Args:
source_filter: filter that selects files that must be in the Bazel build
files_and_extensions_to_check: files and extensions to look for (the
source_filter might match build files that won't be in the build but
this should only match source files)
"""
@source_filter.apply_to_check()
def source_is_in_bazel_build(ctx: PresubmitContext):
"""Checks that source files are in the Bazel build."""
paths = source_filter.filter(ctx.all_paths)
missing = build.check_bazel_build_for_files(
files_and_extensions_to_check,
paths,
bazel_dirs=[ctx.root],
)
if missing:
with ctx.failure_summary_log.open('w') as outs:
print('Missing files:', file=outs)
for miss in missing:
print(miss, file=outs)
_LOG.warning('All source files must appear in BUILD.bazel files')
raise PresubmitFailure
return source_is_in_bazel_build
_DEFAULT_GN_EXTENSIONS = (
'setup.cfg',
'.toml',
'.rst',
'.py',
*format_code.C_FORMAT.extensions,
)
def gn( # pylint: disable=invalid-name
source_filter: FileFilter,
files_and_extensions_to_check: Sequence[str] = _DEFAULT_GN_EXTENSIONS,
) -> Check:
"""Create a presubmit check that ensures source files are in GN files.
Args:
source_filter: filter that selects files that must be in the GN build
files_and_extensions_to_check: files and extensions to look for (the
source_filter might match build files that won't be in the build but
this should only match source files)
"""
@source_filter.apply_to_check()
def source_is_in_gn_build(ctx: PresubmitContext):
"""Checks that source files are in the GN build."""
paths = source_filter.filter(ctx.all_paths)
missing = build.check_gn_build_for_files(
files_and_extensions_to_check,
paths,
gn_build_files=git_repo.list_files(
pathspecs=['BUILD.gn', '*BUILD.gn'], repo_path=ctx.root
),
)
if missing:
with ctx.failure_summary_log.open('w') as outs:
print('Missing files:', file=outs)
for miss in missing:
print(miss, file=outs)
_LOG.warning('All source files must appear in BUILD.gn files')
raise PresubmitFailure
return source_is_in_gn_build
_DEFAULT_CMAKE_EXTENSIONS = (*format_code.C_FORMAT.extensions,)
def cmake(
source_filter: FileFilter,
run_cmake: Callable[[PresubmitContext], None],
files_and_extensions_to_check: Sequence[str] = _DEFAULT_CMAKE_EXTENSIONS,
) -> Check:
"""Create a presubmit check that ensures source files are in CMake files.
Args:
source_filter: filter that selects files that must be in the CMake build
run_cmake: callable that takes a PresubmitContext and invokes CMake
files_and_extensions_to_check: files and extensions to look for (the
source_filter might match build files that won't be in the build but
this should only match source files)
"""
to_check = tuple(files_and_extensions_to_check)
@source_filter.apply_to_check()
def source_is_in_cmake_build(ctx: PresubmitContext):
"""Checks that source files are in the CMake build."""
paths = source_filter.filter(ctx.all_paths)
run_cmake(ctx)
missing = build.check_compile_commands_for_files(
ctx.output_dir / 'compile_commands.json',
(f for f in paths if str(f).endswith(to_check)),
)
if missing:
with ctx.failure_summary_log.open('w') as outs:
print('Missing files:', file=outs)
for miss in missing:
print(miss, file=outs)
_LOG.warning(
'Files missing from CMake:\n%s',
'\n'.join(str(f) for f in missing),
)
raise PresubmitFailure
return source_is_in_cmake_build | PypiClean |
/slack_types-0.0.2-py3-none-any.whl/slack_types/web_api/admin_conversations_search_response.py |
from dataclasses import dataclass
from typing import Optional, List, Any, TypeVar, Callable, Type, cast
T = TypeVar("T")
def from_str(x: Any) -> str:
assert isinstance(x, str)
return x
def from_none(x: Any) -> Any:
assert x is None
return x
def from_union(fs, x):
for f in fs:
try:
return f(x)
except:
pass
assert False
def from_int(x: Any) -> int:
assert isinstance(x, int) and not isinstance(x, bool)
return x
def from_bool(x: Any) -> bool:
assert isinstance(x, bool)
return x
def from_list(f: Callable[[Any], T], x: Any) -> List[T]:
assert isinstance(x, list)
return [f(y) for y in x]
def to_class(c: Type[T], x: Any) -> dict:
assert isinstance(x, c)
return cast(Any, x).to_dict()
@dataclass
class Conversation:
id: Optional[str] = None
name: Optional[str] = None
purpose: Optional[str] = None
member_count: Optional[int] = None
created: Optional[int] = None
creator_id: Optional[str] = None
is_private: Optional[bool] = None
is_archived: Optional[bool] = None
is_general: Optional[bool] = None
last_activity_ts: Optional[int] = None
is_ext_shared: Optional[bool] = None
is_global_shared: Optional[bool] = None
is_org_default: Optional[bool] = None
is_org_mandatory: Optional[bool] = None
is_org_shared: Optional[bool] = None
is_frozen: Optional[bool] = None
internal_team_ids_count: Optional[int] = None
internal_team_ids_sample_team: Optional[str] = None
pending_connected_team_ids: Optional[List[str]] = None
is_pending_ext_shared: Optional[bool] = None
connected_team_ids: Optional[List[str]] = None
conversation_host_id: Optional[str] = None
channel_email_addresses: Optional[List[str]] = None
connected_limited_team_ids: Optional[List[str]] = None
@staticmethod
def from_dict(obj: Any) -> 'Conversation':
assert isinstance(obj, dict)
id = from_union([from_str, from_none], obj.get("id"))
name = from_union([from_str, from_none], obj.get("name"))
purpose = from_union([from_str, from_none], obj.get("purpose"))
member_count = from_union([from_int, from_none], obj.get("member_count"))
created = from_union([from_int, from_none], obj.get("created"))
creator_id = from_union([from_str, from_none], obj.get("creator_id"))
is_private = from_union([from_bool, from_none], obj.get("is_private"))
is_archived = from_union([from_bool, from_none], obj.get("is_archived"))
is_general = from_union([from_bool, from_none], obj.get("is_general"))
last_activity_ts = from_union([from_int, from_none], obj.get("last_activity_ts"))
is_ext_shared = from_union([from_bool, from_none], obj.get("is_ext_shared"))
is_global_shared = from_union([from_bool, from_none], obj.get("is_global_shared"))
is_org_default = from_union([from_bool, from_none], obj.get("is_org_default"))
is_org_mandatory = from_union([from_bool, from_none], obj.get("is_org_mandatory"))
is_org_shared = from_union([from_bool, from_none], obj.get("is_org_shared"))
is_frozen = from_union([from_bool, from_none], obj.get("is_frozen"))
internal_team_ids_count = from_union([from_int, from_none], obj.get("internal_team_ids_count"))
internal_team_ids_sample_team = from_union([from_str, from_none], obj.get("internal_team_ids_sample_team"))
pending_connected_team_ids = from_union([lambda x: from_list(from_str, x), from_none], obj.get("pending_connected_team_ids"))
is_pending_ext_shared = from_union([from_bool, from_none], obj.get("is_pending_ext_shared"))
connected_team_ids = from_union([lambda x: from_list(from_str, x), from_none], obj.get("connected_team_ids"))
conversation_host_id = from_union([from_str, from_none], obj.get("conversation_host_id"))
channel_email_addresses = from_union([lambda x: from_list(from_str, x), from_none], obj.get("channel_email_addresses"))
connected_limited_team_ids = from_union([lambda x: from_list(from_str, x), from_none], obj.get("connected_limited_team_ids"))
return Conversation(id, name, purpose, member_count, created, creator_id, is_private, is_archived, is_general, last_activity_ts, is_ext_shared, is_global_shared, is_org_default, is_org_mandatory, is_org_shared, is_frozen, internal_team_ids_count, internal_team_ids_sample_team, pending_connected_team_ids, is_pending_ext_shared, connected_team_ids, conversation_host_id, channel_email_addresses, connected_limited_team_ids)
def to_dict(self) -> dict:
result: dict = {}
result["id"] = from_union([from_str, from_none], self.id)
result["name"] = from_union([from_str, from_none], self.name)
result["purpose"] = from_union([from_str, from_none], self.purpose)
result["member_count"] = from_union([from_int, from_none], self.member_count)
result["created"] = from_union([from_int, from_none], self.created)
result["creator_id"] = from_union([from_str, from_none], self.creator_id)
result["is_private"] = from_union([from_bool, from_none], self.is_private)
result["is_archived"] = from_union([from_bool, from_none], self.is_archived)
result["is_general"] = from_union([from_bool, from_none], self.is_general)
result["last_activity_ts"] = from_union([from_int, from_none], self.last_activity_ts)
result["is_ext_shared"] = from_union([from_bool, from_none], self.is_ext_shared)
result["is_global_shared"] = from_union([from_bool, from_none], self.is_global_shared)
result["is_org_default"] = from_union([from_bool, from_none], self.is_org_default)
result["is_org_mandatory"] = from_union([from_bool, from_none], self.is_org_mandatory)
result["is_org_shared"] = from_union([from_bool, from_none], self.is_org_shared)
result["is_frozen"] = from_union([from_bool, from_none], self.is_frozen)
result["internal_team_ids_count"] = from_union([from_int, from_none], self.internal_team_ids_count)
result["internal_team_ids_sample_team"] = from_union([from_str, from_none], self.internal_team_ids_sample_team)
result["pending_connected_team_ids"] = from_union([lambda x: from_list(from_str, x), from_none], self.pending_connected_team_ids)
result["is_pending_ext_shared"] = from_union([from_bool, from_none], self.is_pending_ext_shared)
result["connected_team_ids"] = from_union([lambda x: from_list(from_str, x), from_none], self.connected_team_ids)
result["conversation_host_id"] = from_union([from_str, from_none], self.conversation_host_id)
result["channel_email_addresses"] = from_union([lambda x: from_list(from_str, x), from_none], self.channel_email_addresses)
result["connected_limited_team_ids"] = from_union([lambda x: from_list(from_str, x), from_none], self.connected_limited_team_ids)
return result
@dataclass
class AdminConversationsSearchResponse:
ok: Optional[bool] = None
conversations: Optional[List[Conversation]] = None
next_cursor: Optional[str] = None
error: Optional[str] = None
needed: Optional[str] = None
provided: Optional[str] = None
@staticmethod
def from_dict(obj: Any) -> 'AdminConversationsSearchResponse':
assert isinstance(obj, dict)
ok = from_union([from_bool, from_none], obj.get("ok"))
conversations = from_union([lambda x: from_list(Conversation.from_dict, x), from_none], obj.get("conversations"))
next_cursor = from_union([from_str, from_none], obj.get("next_cursor"))
error = from_union([from_str, from_none], obj.get("error"))
needed = from_union([from_str, from_none], obj.get("needed"))
provided = from_union([from_str, from_none], obj.get("provided"))
return AdminConversationsSearchResponse(ok, conversations, next_cursor, error, needed, provided)
def to_dict(self) -> dict:
result: dict = {}
result["ok"] = from_union([from_bool, from_none], self.ok)
result["conversations"] = from_union([lambda x: from_list(lambda x: to_class(Conversation, x), x), from_none], self.conversations)
result["next_cursor"] = from_union([from_str, from_none], self.next_cursor)
result["error"] = from_union([from_str, from_none], self.error)
result["needed"] = from_union([from_str, from_none], self.needed)
result["provided"] = from_union([from_str, from_none], self.provided)
return result
def admin_conversations_search_response_from_dict(s: Any) -> AdminConversationsSearchResponse:
return AdminConversationsSearchResponse.from_dict(s)
def admin_conversations_search_response_to_dict(x: AdminConversationsSearchResponse) -> Any:
return to_class(AdminConversationsSearchResponse, x) | PypiClean |
/django-chartbuilder-0.3.tar.gz/django-chartbuilder-0.3/django_chartbuilder/static/django_chartbuilder/Chartbuilder/bower_components/html5-boilerplate/LICENSE.md | Copyright (c) HTML5 Boilerplate
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
| PypiClean |
/certora_cli_alpha_roy_CERT_1891_allocId_e-20230516.10.30.641439-py3-none-any.whl/certora_cli/EVMVerifier/certoraContextAttribute.py | import argparse
import ast
import logging
import re
import sys
from functools import lru_cache
from dataclasses import dataclass, field
from enum import unique, auto
from pathlib import Path
from typing import Optional, Dict, Any, Callable, List
from EVMVerifier import certoraValidateFuncs as Vf
from Shared import certoraUtils as Util
scripts_dir_path = Path(__file__).parent.resolve() # containing directory
sys.path.insert(0, str(scripts_dir_path))
# logger for issues regarding context
context_logger = logging.getLogger("context")
def validate_prover_args(value: str) -> str:
strings = value.split()
for arg in ContextAttribute:
if arg.value.jar_flag is None:
continue
for string in strings:
if string == arg.value.jar_flag:
raise argparse.ArgumentTypeError(f"the flag {string} should be set using {arg.get_flag()}"
"and not by the --prover_args flag")
return value
def validate_solc_args(value: str) -> str:
'''
making sure no supported flags are set inside the --solc_arg flag
'''
strings = value.split()
for string in strings:
if string in ['--path', '--allow-paths', '--allow-path', '--solc_allow_path']:
raise argparse.ArgumentTypeError(f"the flag {string} should be set using the flag --solc_allow_path")
if string in ['--optimize', '--solc_optimize', '--optimize-runs']:
raise argparse.ArgumentTypeError(f"the flag {string} should be set using the flag --solc_optimize")
if string in ['--via_ir', '--solc_via_ir', '--via-ir']:
raise argparse.ArgumentTypeError(f"the flag {string} should be set using the flag --solc_via_ir")
if string in ['--evm_version', '--evm-version', '--solc_evm_version']:
raise argparse.ArgumentTypeError(f"the flag {string} should be set using the flag --solc_via_ir")
return value
def parse_struct_link(link: str) -> str:
search_res = re.search(r'^\w+:([^:=]+)=\w+$', link)
# We do not require firm form of slot number so we can give more informative warnings
if search_res is None:
raise argparse.ArgumentTypeError(f"Struct link argument {link} must be of the form contractA:<field>=contractB")
if search_res[1].isidentifier():
return link
try:
parsed_int = int(search_res[1], 0) # an integer or a hexadecimal
if parsed_int < 0:
raise argparse.ArgumentTypeError(f"struct link slot number negative at {link}")
except ValueError:
raise argparse.ArgumentTypeError(f"Struct link argument {link} must be of the form contractA:number=contractB"
f" or contractA:fieldName=contractB")
return link
def parse_solc_args(list_as_string: str) -> List[str]:
"""
parse the argument as a list
"""
if Util.is_new_api():
type_deprecated(list_as_string, ContextAttribute.SOLC_ARGS)
v = ast.literal_eval(list_as_string)
if type(v) is not list:
raise argparse.ArgumentTypeError(f'--solc_args: "{list_as_string}" is not a list')
return v
APPEND = 'append'
STORE_TRUE = 'store_true'
STORE_FALSE = 'store_false'
VERSION = 'version'
SINGLE_OR_NONE_OCCURRENCES = '?'
MULTIPLE_OCCURRENCES = '*'
ONE_OR_MORE_OCCURRENCES = '+'
class AttrArgType(Util.NoValEnum):
STRING = auto()
BOOLEAN = auto()
LIST_OF_STRINGS = auto()
ANY = auto()
class ArgStatus(Util.NoValEnum):
REGULAR = auto()
NEW = auto()
DEPRECATED = auto()
class ArgGroups(Util.NoValEnum):
# The order of the groups is the order we want to show the groups in argParse's help
MODE = "Mode of operation. Please choose one, unless using a .conf or .tac file"
USEFUL = "Most frequently used options"
RUN = "Options affecting the type of verification run"
SOLIDITY = "Options that control the Solidity compiler"
LOOP = "Options regarding source code loops"
HASHING = "Options regarding handling of unbounded hashing"
RUN_TIME = "Options that help reduce running time"
LINKAGE = "Options to set addresses and link contracts"
CREATION = "Options to model contract creation"
INFO = "Debugging options"
JAVA = "Arguments passed to the .jar file"
PARTIAL = "These arguments run only specific parts of the tool, or skip parts"
CLOUD = "Fine cloud control arguments"
MISC_HIDDEN = "Miscellaneous hidden arguments"
ENV = ""
@dataclass
class CertoraArgument:
flag: Optional[str] = None # override the 'default': option name
group: Optional[ArgGroups] = None # name of the arg parse (see ArgGroups above)
attr_validation_func: Optional[Callable] = None # TODO more precise
arg_status: ArgStatus = ArgStatus.REGULAR
deprecation_msg: Optional[str] = None
jar_flag: Optional[str] = None # the flag that is sent to the jar (if attr is sent to the jar)
jar_no_value: Optional[bool] = False # if true, flag is sent with no value
help_msg: str = argparse.SUPPRESS
# args for argparse's add_attribute passed as is
argparse_args: Dict[str, Any] = field(default_factory=dict)
arg_type: AttrArgType = AttrArgType.STRING
def get_dest(self) -> Optional[str]:
return self.argparse_args.get('dest')
class UniqueStore(argparse.Action):
"""
This class makes the argparser throw an error for a given flag if it was inserted more than once
"""
def __call__(self, parser: argparse.ArgumentParser, namespace: argparse.Namespace, values: Any, # type: ignore
option_string: str) -> None:
if getattr(namespace, self.dest, self.default) is not self.default:
parser.error(f"{option_string} appears several times.")
setattr(namespace, self.dest, values)
@unique
class ContextAttribute(Util.NoValEnum):
"""
This enum class must be unique. If 2 args have the same value we add the 'flag' attribute to make sure the hash
value is not going to be the same
The order of the attributes is the order we want to show the attributes in argParse's help
"""
FILES = CertoraArgument(
attr_validation_func=Vf.validate_input_file,
arg_type=AttrArgType.LIST_OF_STRINGS,
help_msg="contract files for analysis or a conf file",
flag='files',
argparse_args={
'nargs': MULTIPLE_OCCURRENCES
}
)
VERIFY = CertoraArgument(
group=ArgGroups.MODE,
attr_validation_func=Vf.validate_verify_attr,
arg_type=AttrArgType.STRING,
help_msg="Matches a specification file to a contract",
argparse_args={
'action': UniqueStore
}
)
ASSERT_CONTRACTS_DEPRECATED = CertoraArgument(
group=ArgGroups.MODE,
arg_status=ArgStatus.DEPRECATED,
attr_validation_func=Vf.validate_assert_contract,
arg_type=AttrArgType.LIST_OF_STRINGS,
deprecation_msg="--assert is deprecated; use --assert_contracts instead",
flag='--assert',
argparse_args={
'nargs': ONE_OR_MORE_OCCURRENCES,
'dest': 'assert_contracts_deprecated' if Util.is_new_api() else 'assert_contracts',
'action': APPEND
}
)
# something is definitely under-tested here, because I changed this to take
# a string instead of list of strings and everything just passed!
ASSERT_CONTRACTS = CertoraArgument(
group=ArgGroups.MODE,
arg_status=ArgStatus.NEW,
attr_validation_func=Vf.validate_assert_contract,
arg_type=AttrArgType.LIST_OF_STRINGS,
argparse_args={
'nargs': ONE_OR_MORE_OCCURRENCES,
'action': APPEND,
}
)
BYTECODE_JSONS_DEPRECATED = CertoraArgument(
group=ArgGroups.MODE,
arg_status=ArgStatus.DEPRECATED,
attr_validation_func=Vf.validate_json_file,
flag='--bytecode',
deprecation_msg="--bytecode is deprecated; use --bytecode_jsons instead",
arg_type=AttrArgType.LIST_OF_STRINGS,
jar_flag='-bytecode',
help_msg="List of EVM bytecode json descriptors",
argparse_args={
'nargs': ONE_OR_MORE_OCCURRENCES,
'dest': 'bytecode_jsons_deprecated' if Util.is_new_api() else 'bytecode_jsons',
'action': APPEND
}
)
BYTECODE_JSONS = CertoraArgument(
group=ArgGroups.MODE,
arg_status=ArgStatus.NEW,
attr_validation_func=Vf.validate_json_file,
arg_type=AttrArgType.LIST_OF_STRINGS,
jar_flag='-bytecode',
help_msg="List of EVM bytecode json descriptors",
argparse_args={
'nargs': ONE_OR_MORE_OCCURRENCES,
'action': APPEND
}
)
BYTECODE_SPEC = CertoraArgument(
group=ArgGroups.MODE,
attr_validation_func=Vf.validate_readable_file,
jar_flag='-spec',
help_msg="Spec to use for the provided bytecodes",
argparse_args={
'action': UniqueStore
}
)
MSG = CertoraArgument(
group=ArgGroups.USEFUL,
help_msg="Adds a message description to your run",
argparse_args={
'action': UniqueStore
}
)
# RULE option is for both --rule and --rules
RULE = CertoraArgument(
group=ArgGroups.USEFUL,
arg_type=AttrArgType.LIST_OF_STRINGS,
jar_flag='-rule',
help_msg="Filters the list of rules/invariants to verify",
argparse_args={
'nargs': ONE_OR_MORE_OCCURRENCES,
'action': APPEND
}
)
PROTOCOL_NAME = CertoraArgument(
group=ArgGroups.USEFUL,
help_msg="Adds the protocol's name for easy filtering in the dashboard",
argparse_args={
'action': UniqueStore
}
)
PROTOCOL_AUTHOR = CertoraArgument(
group=ArgGroups.USEFUL,
help_msg="Adds the protocol's author for easy filtering in the dashboard",
argparse_args={
'action': UniqueStore
}
)
MULTI_ASSERT_CHECK = CertoraArgument(
group=ArgGroups.RUN,
arg_type=AttrArgType.BOOLEAN,
jar_no_value=True,
jar_flag='-multiAssertCheck',
help_msg="Checks each assertion statement that occurs in a rule, separately",
argparse_args={
'action': STORE_TRUE
}
)
INCLUDE_EMPTY_FALLBACK = CertoraArgument(
group=ArgGroups.RUN,
arg_type=AttrArgType.BOOLEAN,
jar_flag='-includeEmptyFallback',
argparse_args={
'action': STORE_TRUE
}
)
RULE_SANITY = CertoraArgument(
group=ArgGroups.RUN,
attr_validation_func=Vf.validate_sanity_value,
help_msg="Selects the type of sanity check that will be performed during execution",
jar_flag='-ruleSanityChecks',
argparse_args={
'nargs': SINGLE_OR_NONE_OCCURRENCES,
'action': UniqueStore,
'default': None, # 'default': when no --rule_sanity given, may take from --settings
'const': Vf.RuleSanityValue.BASIC.name.lower() # 'default': when empty --rule_sanity is given
}
)
MULTI_EXAMPLE = CertoraArgument(
group=ArgGroups.RUN,
attr_validation_func=Vf.validate_multi_example_value,
help_msg="Sets the required multi example mode",
jar_flag='-multipleCEX',
argparse_args={
'nargs': SINGLE_OR_NONE_OCCURRENCES,
'action': UniqueStore,
'default': None, # 'default': when no --multi_example given, may take from --settings
'const': Vf.MultiExampleValues.BASIC.name.lower()
}
)
SHORT_OUTPUT = CertoraArgument(
group=ArgGroups.RUN,
arg_type=AttrArgType.BOOLEAN,
jar_flag='-ciMode',
help_msg="Reduces verbosity",
argparse_args={
'action': STORE_TRUE
}
)
NO_CALLTRACE_STORAGE_INFORMATION = CertoraArgument(
group=ArgGroups.RUN,
arg_type=AttrArgType.BOOLEAN,
jar_flag='-noCalltraceStorageInformation',
argparse_args={
'action': STORE_TRUE
}
)
TYPECHECK_ONLY = CertoraArgument(
group=ArgGroups.RUN,
arg_type=AttrArgType.BOOLEAN,
argparse_args={
'action': STORE_TRUE
}
)
SEND_ONLY = CertoraArgument( # --send_only also implies --short_output.
group=ArgGroups.RUN,
arg_type=AttrArgType.BOOLEAN,
help_msg="Makes the request to the prover but does not wait for verifications results",
argparse_args={
'action': STORE_TRUE
}
)
SOLC = CertoraArgument(
group=ArgGroups.SOLIDITY,
attr_validation_func=Vf.validate_exec_file,
help_msg="Path to the Solidity compiler executable file",
argparse_args={
'action': UniqueStore
}
)
SOLC_ARGS = CertoraArgument(
group=ArgGroups.SOLIDITY,
attr_validation_func=validate_solc_args,
arg_status=ArgStatus.NEW,
help_msg="Sends flags directly to the Solidity compiler",
argparse_args={
'action': UniqueStore,
}
)
SOLC_ARGS_DEPRECATED = CertoraArgument(
group=ArgGroups.SOLIDITY,
arg_type=AttrArgType.LIST_OF_STRINGS,
arg_status=ArgStatus.DEPRECATED,
flag='--solc_args_deprecated' if Util.is_new_api() else '--solc_args',
deprecation_msg="--solc_args is deprecated; use --optimize, --via_ir, or --evm_version instead",
help_msg="List of string arguments to pass for the Solidity compiler, for example: "
"\"['--optimize', '--evm-version', 'istanbul', '--via-ir']\"",
argparse_args={
'action': UniqueStore,
'type': parse_solc_args
}
)
SOLC_VIA_IR = CertoraArgument(
group=ArgGroups.SOLIDITY,
arg_status=ArgStatus.NEW,
arg_type=AttrArgType.BOOLEAN,
help_msg="Instructs the solidity compiler to use intermediate representation instead of EVM opcode",
argparse_args={
'action': STORE_TRUE
}
)
SOLC_EVM_VERSION = CertoraArgument(
group=ArgGroups.SOLIDITY,
arg_status=ArgStatus.NEW,
help_msg="Intructs the Solidity compiler to use a specific EVM version",
argparse_args={
'action': UniqueStore
}
)
SOLC_MAP = CertoraArgument(
group=ArgGroups.SOLIDITY,
attr_validation_func=Vf.validate_solc_map,
arg_type=AttrArgType.ANY,
help_msg="Matches each Solidity file with a Solidity compiler executable",
argparse_args={
'action': UniqueStore,
'type': lambda value: Vf.parse_dict('solc_map', value)
}
)
PATH = CertoraArgument(
group=ArgGroups.SOLIDITY,
arg_status=ArgStatus.DEPRECATED,
attr_validation_func=Vf.validate_dir,
deprecation_msg="--path is deprecated; use --solc_allow_path instead",
help_msg="Sets the base path for loading Solidity files",
argparse_args={
'action': UniqueStore
}
)
SOLC_ALLOW_PATH = CertoraArgument(
group=ArgGroups.SOLIDITY,
arg_status=ArgStatus.NEW,
attr_validation_func=Vf.validate_dir,
help_msg="Sets the base path for loading Solidity files",
argparse_args={
'action': UniqueStore
}
)
SOLC_OPTIMIZE = CertoraArgument(
group=ArgGroups.SOLIDITY,
arg_status=ArgStatus.NEW,
attr_validation_func=Vf.validate_non_negative_integer,
help_msg="Tells the Solidity compiler to optimize the gas costs of the contract for a given number of runs",
argparse_args={
'nargs': SINGLE_OR_NONE_OCCURRENCES,
'action': UniqueStore,
'const': -1
}
)
OPTIMIZE = CertoraArgument(
group=ArgGroups.SOLIDITY,
arg_status=ArgStatus.DEPRECATED,
deprecation_msg="--optimize is deprecated; use --solc_optimize instead",
attr_validation_func=Vf.validate_non_negative_integer,
help_msg="Tells the Solidity compiler to optimize the gas costs of the contract for a given number of runs",
argparse_args={
'nargs': SINGLE_OR_NONE_OCCURRENCES,
'action': UniqueStore,
'const': -1
}
)
OPTIMIZE_MAP = CertoraArgument(
group=ArgGroups.SOLIDITY,
attr_validation_func=Vf.validate_solc_optimize_map,
arg_status=ArgStatus.DEPRECATED,
arg_type=AttrArgType.ANY,
deprecation_msg="--optimize_map is deprecated; use --solc_optimize_map instead",
help_msg="Matches each Solidity source file with a number of runs to optimize for",
argparse_args={
'action': UniqueStore,
'type': lambda value: Vf.parse_dict('solc_optimize_map', value)
}
)
SOLC_OPTIMIZE_MAP = CertoraArgument(
group=ArgGroups.SOLIDITY,
arg_status=ArgStatus.NEW,
attr_validation_func=Vf.validate_solc_optimize_map,
arg_type=AttrArgType.ANY,
help_msg="Matches each Solidity source file with a number of runs to optimize for",
argparse_args={
'action': UniqueStore,
'type': lambda value: Vf.parse_dict('solc_optimize_map', value)
}
)
PACKAGES_PATH = CertoraArgument(
group=ArgGroups.SOLIDITY,
attr_validation_func=Vf.validate_dir,
help_msg="Path to a directory including the Solidity packages",
argparse_args={
'action': UniqueStore
}
)
PACKAGES = CertoraArgument(
group=ArgGroups.SOLIDITY,
attr_validation_func=Vf.validate_packages,
arg_type=AttrArgType.LIST_OF_STRINGS,
help_msg="Maps packages to their location in the file system",
argparse_args={
'nargs': ONE_OR_MORE_OCCURRENCES,
'action': APPEND
}
)
OPTIMISTIC_LOOP = CertoraArgument(
group=ArgGroups.LOOP,
arg_type=AttrArgType.BOOLEAN,
jar_flag='-assumeUnwindCond',
jar_no_value=True,
help_msg="After unrolling loops, assume the loop halt conditions hold",
argparse_args={
'action': STORE_TRUE
}
)
LOOP_ITER = CertoraArgument(
group=ArgGroups.LOOP,
attr_validation_func=Vf.validate_non_negative_integer,
jar_flag='-b',
help_msg="Maximum number of loop iterations we verify for",
argparse_args={
'action': UniqueStore
}
)
OPTIMISTIC_HASHING = CertoraArgument(
group=ArgGroups.HASHING,
arg_type=AttrArgType.BOOLEAN,
help_msg="Bounds the length of data (with potentially unbounded length) to the value given in "
"--hashing_length_bound",
jar_flag='-optimisticUnboundedHashing',
argparse_args={
'action': STORE_TRUE
}
)
HASHING_LENGTH_BOUND = CertoraArgument(
group=ArgGroups.HASHING,
attr_validation_func=Vf.validate_non_negative_integer,
jar_flag='-hashingLengthBound',
help_msg="Maximum length of otherwise unbounded data chunks that are being hashed",
argparse_args={
'action': UniqueStore
}
)
METHOD = CertoraArgument(
group=ArgGroups.RUN_TIME,
attr_validation_func=Vf.validate_method,
jar_flag='-method',
help_msg="Filters methods to be verified by their signature",
argparse_args={
'action': UniqueStore
}
)
CACHE = CertoraArgument(
group=ArgGroups.RUN_TIME,
argparse_args={
'action': UniqueStore
}
)
SMT_TIMEOUT = CertoraArgument(
group=ArgGroups.RUN_TIME,
attr_validation_func=Vf.validate_positive_integer,
jar_flag='-t',
argparse_args={
'action': UniqueStore
}
)
LINK = CertoraArgument(
group=ArgGroups.LINKAGE,
attr_validation_func=Vf.validate_link_attr,
arg_type=AttrArgType.LIST_OF_STRINGS,
help_msg="Links a slot in a contract with another contract",
argparse_args={
'nargs': ONE_OR_MORE_OCCURRENCES,
'action': APPEND
}
)
ADDRESS = CertoraArgument(
group=ArgGroups.LINKAGE,
attr_validation_func=Vf.validate_address,
arg_type=AttrArgType.LIST_OF_STRINGS,
help_msg="Sets the address of a contract to a given address",
argparse_args={
'nargs': ONE_OR_MORE_OCCURRENCES,
'action': APPEND
}
)
STRUCT_LINK_DEPRECATED = CertoraArgument(
group=ArgGroups.LINKAGE,
attr_validation_func=Vf.validate_struct_link,
arg_type=AttrArgType.LIST_OF_STRINGS,
arg_status=ArgStatus.DEPRECATED,
deprecation_msg="--structLink is deprecated; use --struct_link instead",
flag="--structLink",
help_msg="Links a slot in a struct with another contract",
argparse_args={
'nargs': ONE_OR_MORE_OCCURRENCES,
'action': APPEND,
'dest': 'structLink' if Util.is_new_api() else 'struct_link',
'type': lambda value: type_deprecated(value, ContextAttribute.STRUCT_LINK_DEPRECATED)
}
)
STRUCT_LINK = CertoraArgument(
group=ArgGroups.LINKAGE,
arg_status=ArgStatus.NEW,
attr_validation_func=Vf.validate_struct_link,
arg_type=AttrArgType.LIST_OF_STRINGS,
help_msg="Links a slot in a struct with another contract",
argparse_args={
'nargs': ONE_OR_MORE_OCCURRENCES,
'action': APPEND,
}
)
PROTOTYPE = CertoraArgument(
group=ArgGroups.CREATION,
attr_validation_func=Vf.validate_prototype_attr,
arg_type=AttrArgType.LIST_OF_STRINGS,
help_msg="Prototype defines that for a constructor bytecode prefixed by the given string, we generate an "
"instance of the given contract",
argparse_args={
'nargs': ONE_OR_MORE_OCCURRENCES,
'action': APPEND
}
)
DYNAMIC_BOUND = CertoraArgument(
group=ArgGroups.CREATION,
attr_validation_func=Vf.validate_non_negative_integer,
jar_flag='-dynamicCreationBound',
help_msg="Maximum times a contract will be cloned",
argparse_args={
'action': UniqueStore
}
)
DYNAMIC_DISPATCH = CertoraArgument(
group=ArgGroups.CREATION,
arg_type=AttrArgType.BOOLEAN,
jar_flag='-dispatchOnCreated',
help_msg="Automatically apply the DISPATCHER summary on newly created instances",
argparse_args={
'action': STORE_TRUE
}
)
DEBUG = CertoraArgument(
group=ArgGroups.INFO,
arg_type=AttrArgType.BOOLEAN,
argparse_args={
'action': STORE_TRUE
}
)
SHOW_DEBUG_TOPICS = CertoraArgument(
group=ArgGroups.INFO,
arg_type=AttrArgType.BOOLEAN,
flag='--show_debug_topics', # added to prevent dup with DEBUG
argparse_args={
'action': STORE_TRUE
}
)
DEBUG_TOPICS = CertoraArgument(
group=ArgGroups.INFO,
arg_type=AttrArgType.LIST_OF_STRINGS,
argparse_args={
'nargs': ONE_OR_MORE_OCCURRENCES,
'action': APPEND
}
)
VERSION = CertoraArgument(
group=ArgGroups.INFO,
arg_type=AttrArgType.BOOLEAN,
help_msg="Shows the tool version",
argparse_args={
'action': VERSION,
'version': 'This message should never be reached'
}
)
JAR = CertoraArgument(
group=ArgGroups.JAVA,
attr_validation_func=Vf.validate_jar,
argparse_args={
'action': UniqueStore
}
)
JAVA_ARGS_DEPRECATED = CertoraArgument(
group=ArgGroups.JAVA,
attr_validation_func=Vf.validate_java_args,
arg_type=AttrArgType.LIST_OF_STRINGS,
arg_status=ArgStatus.DEPRECATED,
deprecation_msg="--javaArgs is deprecated; use --java_args instead",
flag="--javaArgs",
argparse_args={
'action': APPEND,
'dest': 'javaArgs' if Util.is_new_api() else 'java_args',
'type': lambda value: type_deprecated(value, ContextAttribute.JAVA_ARGS_DEPRECATED)
}
)
JAVA_ARGS = CertoraArgument(
group=ArgGroups.JAVA,
arg_status=ArgStatus.NEW,
arg_type=AttrArgType.LIST_OF_STRINGS,
argparse_args={
'action': APPEND,
}
)
CHECK_ARGS = CertoraArgument(
group=ArgGroups.PARTIAL,
arg_type=AttrArgType.BOOLEAN,
flag='--check_args', # added to prevent dup with DISABLE_LOCAL_TYPECHECKING
argparse_args={
'action': STORE_TRUE
}
)
BUILD_ONLY = CertoraArgument(
group=ArgGroups.PARTIAL,
arg_type=AttrArgType.BOOLEAN,
flag='--build_only', # added to prevent dup with CHECK_ARGS
argparse_args={
'action': STORE_TRUE
}
)
BUILD_DIR = CertoraArgument(
group=ArgGroups.PARTIAL,
attr_validation_func=Vf.validate_build_dir,
argparse_args={
'action': UniqueStore
}
)
DISABLE_LOCAL_TYPECHECKING = CertoraArgument(
group=ArgGroups.PARTIAL,
arg_type=AttrArgType.BOOLEAN,
flag=None if Util.is_new_api() else '--disableLocalTypeChecking',
argparse_args={
'action': STORE_TRUE
}
)
NO_COMPARE = CertoraArgument(
group=ArgGroups.PARTIAL,
arg_type=AttrArgType.BOOLEAN,
flag='--no_compare', # added to prevent dup with CHECK_ARGS
argparse_args={
'action': STORE_TRUE
}
)
EXPECTED_FILE = CertoraArgument(
group=ArgGroups.PARTIAL,
attr_validation_func=Vf.validate_optional_readable_file,
argparse_args={
'action': UniqueStore
}
)
QUEUE_WAIT_MINUTES = CertoraArgument(
group=ArgGroups.CLOUD,
attr_validation_func=Vf.validate_non_negative_integer,
flag='--queue_wait_minutes', # added to prevent dup with MAX_POLL_MINUTES
argparse_args={
'action': UniqueStore
}
)
MAX_POLL_MINUTES = CertoraArgument(
group=ArgGroups.CLOUD,
attr_validation_func=Vf.validate_non_negative_integer,
flag='--max_poll_minutes', # added to prevent dup with QUEUE_WAIT_MINUTES
argparse_args={
'action': UniqueStore
}
)
LOG_QUERY_FREQUENCY_SECONDS = CertoraArgument(
group=ArgGroups.CLOUD,
attr_validation_func=Vf.validate_non_negative_integer,
flag='--log_query_frequency_seconds', # added to prevent dup with QUEUE_WAIT_MINUTES
argparse_args={
'action': UniqueStore
}
)
MAX_ATTEMPTS_TO_FETCH_OUTPUT = CertoraArgument(
group=ArgGroups.CLOUD,
attr_validation_func=Vf.validate_non_negative_integer,
flag='--max_attempts_to_fetch_output', # added to prevent dup with QUEUE_WAIT_MINUTES
argparse_args={
'action': UniqueStore
}
)
DELAY_FETCH_OUTPUT_SECONDS = CertoraArgument(
group=ArgGroups.CLOUD,
attr_validation_func=Vf.validate_non_negative_integer,
flag='--delay_fetch_output_seconds', # added to prevent dup with QUEUE_WAIT_MINUTES
argparse_args={
'action': UniqueStore
}
)
PROCESS = CertoraArgument(
group=ArgGroups.CLOUD,
argparse_args={
'action': UniqueStore,
'default': 'emv'
}
)
SETTINGS = CertoraArgument(
group=ArgGroups.MISC_HIDDEN,
arg_type=AttrArgType.LIST_OF_STRINGS,
attr_validation_func=Vf.validate_settings_attr,
arg_status=ArgStatus.DEPRECATED,
argparse_args={
'action': APPEND
}
)
"""
The content of prover_args is added as is to the jar command without any flag. If jar_flag was set to None, this
attribute would have been skipped altogether. setting jar_flag to empty string ensures that the value will be added
to the jar as is
"""
PROVER_ARGS = CertoraArgument(
group=ArgGroups.MISC_HIDDEN,
attr_validation_func=validate_prover_args,
arg_status=ArgStatus.NEW,
jar_flag='',
help_msg="Sends flags directly to the prover",
argparse_args={
'action': UniqueStore,
}
)
COMMIT_SHA1 = CertoraArgument(
group=ArgGroups.MISC_HIDDEN,
attr_validation_func=Vf.validate_git_hash,
argparse_args={
'action': UniqueStore
}
)
DISABLE_AUTO_CACHE_KEY_GEN = CertoraArgument(
flag='--disable_auto_cache_key_gen', # added to prevent dup with SKIP_PAYABLE_ENVFREE_CHECK
group=ArgGroups.MISC_HIDDEN,
arg_type=AttrArgType.BOOLEAN,
argparse_args={
'action': STORE_TRUE
}
)
MAX_GRAPH_DEPTH = CertoraArgument(
group=ArgGroups.MISC_HIDDEN,
attr_validation_func=Vf.validate_non_negative_integer,
jar_flag='-graphDrawLimit',
argparse_args={
'action': UniqueStore
}
)
TOOL_OUTPUT = CertoraArgument(
group=ArgGroups.MISC_HIDDEN,
arg_status=ArgStatus.NEW,
attr_validation_func=Vf.validate_tool_output_path,
jar_flag='json',
argparse_args={
'action': UniqueStore,
}
)
TOOL_OUTPUT_DEPRECATED = CertoraArgument(
group=ArgGroups.MISC_HIDDEN,
arg_status=ArgStatus.DEPRECATED,
deprecation_msg="--toolOutput is deprecated; use --tool_output instead",
attr_validation_func=Vf.validate_tool_output_path,
jar_flag='-json',
flag='--toolOutput',
argparse_args={
'action': UniqueStore,
'dest': 'tool_output_deprecated' if Util.is_new_api() else 'tool_output'
}
)
INTERNAL_FUNCS = CertoraArgument(
group=ArgGroups.MISC_HIDDEN,
attr_validation_func=Vf.validate_json_file,
argparse_args={
'action': UniqueStore
}
)
COINBASE_MODE_DEPRECATED = CertoraArgument(
group=ArgGroups.MISC_HIDDEN,
arg_status=ArgStatus.DEPRECATED,
arg_type=AttrArgType.BOOLEAN,
deprecation_msg="--coinbaseMode is deprecated; use --coinbase_mode instead",
flag='--coinbaseMode',
jar_flag='-coinbaseFeaturesMode',
argparse_args={
'action': STORE_TRUE,
'dest': 'coinbaseMode'
}
)
COINBASE_MODE = CertoraArgument(
group=ArgGroups.MISC_HIDDEN,
arg_status=ArgStatus.NEW,
arg_type=AttrArgType.BOOLEAN,
jar_flag='-coinbaseFeaturesMode',
argparse_args={
'action': STORE_TRUE
}
)
GET_CONF = CertoraArgument(
group=ArgGroups.MISC_HIDDEN,
arg_status=ArgStatus.DEPRECATED,
deprecation_msg="--get_conf is deprecated; use --conf_file instead",
attr_validation_func=Vf.validate_conf_file,
argparse_args={
'action': UniqueStore,
'type': lambda value: type_deprecated(value, ContextAttribute.GET_CONF)
}
)
CONF_OUTPUT_FILE = CertoraArgument(
group=ArgGroups.MISC_HIDDEN,
arg_status=ArgStatus.NEW,
argparse_args={
'action': UniqueStore
}
)
SKIP_PAYABLE_ENVFREE_CHECK = CertoraArgument(
flag='--skip_payable_envfree_check', # added to prevent dup with DISABLE_AUTO_CACHE_KEY_GEN
group=ArgGroups.MISC_HIDDEN,
jar_flag='-skipPayableEnvfreeCheck',
arg_type=AttrArgType.BOOLEAN,
argparse_args={
'action': STORE_TRUE
}
)
RUN_SOURCE = CertoraArgument(
group=ArgGroups.MISC_HIDDEN,
attr_validation_func=Vf.validate_run_source,
argparse_args={
'action': UniqueStore
}
)
ASSERT_AUTOFINDERS_SUCCESS = CertoraArgument(
flag="--assert_autofinder_success",
group=ArgGroups.MISC_HIDDEN,
arg_type=AttrArgType.BOOLEAN,
argparse_args={
'action': STORE_TRUE
}
)
STAGING = CertoraArgument(
group=ArgGroups.ENV,
arg_status=ArgStatus.DEPRECATED,
deprecation_msg="--staging is deprecated; use --server staging instead",
argparse_args={
'nargs': SINGLE_OR_NONE_OCCURRENCES,
'default': None,
'const': "",
'action': UniqueStore
}
)
CLOUD = CertoraArgument(
group=ArgGroups.ENV,
arg_status=ArgStatus.DEPRECATED,
deprecation_msg="--cloud is deprecated; use --prover_version instead",
argparse_args={
'nargs': SINGLE_OR_NONE_OCCURRENCES,
'default': None,
'const': "",
'action': UniqueStore
}
)
PROVER_VERSION = CertoraArgument(
group=ArgGroups.ENV,
arg_status=ArgStatus.NEW,
help_msg="Instructs the prover to use a build that is not the default",
argparse_args={
'action': UniqueStore
}
)
SERVER = CertoraArgument(
group=ArgGroups.ENV,
attr_validation_func=Vf.validate_server_value,
arg_status=ArgStatus.NEW,
argparse_args={
'action': UniqueStore
}
)
def to_csv(self) -> str:
"""
dump attributes of an input option to a row in a csv file where the separator is ampersand & and not a comma (,)
For example,
--link & list_of_strings & linkage & & & Links a slot in a contract with another contract & regular
Note that empty strings and None values are recorded as well
@return: value as a string with & as separator
"""
row = [
self.get_flag(), # name
self.value.arg_type.name.lower() if self.value.arg_type is not None else ' ', # type
self.value.group.name.lower() if self.value.group is not None else ' ', # group
self.value.deprecation_msg.lower() if self.value.deprecation_msg is not None else ' ', # deprecation_msg
self.value.jar_flag if self.value.jar_flag is not None else ' ', # jar_flag
self.value.help_msg if self.value.help_msg != '==SUPPRESS==' else ' ', # help_msg
self.value.arg_status.name.lower() if self.value.arg_status is not None else ' ', # arg_status
]
return ' & '.join(row)
@staticmethod
def csv_dump(file_path: Path) -> None:
with file_path.open('w') as f:
f.write("sep=&\n")
f.write("name & type & argparse group & status & deprecation_msg & jar_flag & help_msg\n ")
for attr in ContextAttribute:
f.write(f"{attr.to_csv()}\n")
def validate_value(self, value: str) -> None:
if self.value.attr_validation_func is not None:
self.value.attr_validation_func(value)
def get_flag(self) -> str:
return self.value.flag if self.value.flag is not None else '--' + self.name.lower()
def get_conf_key(self) -> str:
dest = self.value.get_dest()
return dest if dest is not None else self.name.lower()
def type_deprecated(value: str, attr: ContextAttribute) -> str:
if Util.is_new_api():
raise argparse.ArgumentTypeError(attr.value.deprecation_msg)
return value
CONF_ATTR = ContextAttribute.CONF_OUTPUT_FILE if Util.is_new_api() else ContextAttribute.GET_CONF
@lru_cache(maxsize=1, typed=False)
def all_context_keys() -> List[str]:
return [attr.get_conf_key() for attr in ContextAttribute if attr is not CONF_ATTR] | PypiClean |
/airtable-async-ti-0.0.1b11.tar.gz/airtable-async-ti-0.0.1b11/README.md | # Asynchronous Airtable Python Wrapper
[![Python 3.7](https://img.shields.io/badge/python-3.7-blue.svg)](https://www.python.org/downloads/release/python-370)
[![Python 3.8](https://img.shields.io/badge/python-3.8-blue.svg)](https://www.python.org/downloads/release/python-380)
[![PyPI version](https://badge.fury.io/py/airtable-async.svg)](https://badge.fury.io/py/airtable-async)
[![PyPI - Downloads](https://img.shields.io/pypi/dm/airtable-async.svg?label=pypi%20downloads)](https://pypi.org/project/airtable-async/)
[![Build Status](https://travis-ci.org/lfparis/airbase.svg?branch=master)](https://travis-ci.org/lfparis/airbase)
[![Coverage Status](https://coveralls.io/repos/github/lfparis/airbase/badge.svg?branch=master)](https://coveralls.io/github/lfparis/airbase?branch=master)
## Installing
```bash
pip install airtable-async
```
Requirements: Python 3.7+
## Documentation
*coming soon*
## Example
```python
import asyncio
from airbase import Airtable
api_key = "your Airtable API key found at https://airtable.com/account"
base_key = "name or id of a base"
table_key = "name or id of a table in that base"
async def main() -> None:
async with Airtable(api_key=api_key) as at:
at: Airtable
# Get all bases for a user
await at.get_bases()
# Get one base by name
base = await at.get_base(base_key, key="name")
# Get one base by id
base = await at.get_base(base_key, key="id")
# Get one base by either id or name
base = await at.get_base(base_key)
# Base Attributes
print(base.id)
print(base.name)
print(base.permission_level)
# Set base logging level (debug, info, warning, error, etc)
# Default is "info"
base.log = "debug"
# Get all tables for a base
await base.get_tables()
# Get one table by name
table = await base.get_table(table_key, key="name")
# Get one table by id
table = await base.get_table(table_key, key="id")
# Get one table by either id or name
table = await base.get_table(table_key)
# Base Attributes
print(table.base)
print(table.name)
print(table.id)
print(table.primary_field_id)
print(table.primary_field_name)
print(table.fields)
print(table.views)
# Get a record in that table
table_record = await table.get_record("record_id")
# Get all records in that table
table_records = await table.get_records()
# Get all records in a view in that table
view_records = await table.get_records(view="view id or name")
# Get only certain fields for all records in that table
reduced_fields_records = await table.get_records(
filter_by_fields=["field1, field2"]
)
# Get all records in that table that pass a formula
filtered_records = await table.get_records(
filter_by_formula="Airtable Formula"
)
# Post a record in that table
record = {"fields": {"field1": "value1", "field2": "value2"}}
await table.post_record(record)
# Post several records in that table
records = [
{"fields": {"field1": "value1", "field2": "value2"}},
{"fields": {"field1": "value1", "field2": "value2"}},
{"fields": {"field1": "value1", "field2": "value2"}},
]
await table.post_records(records)
# Update a record in that table
record = {
"id": "record id",
"fields": {"field1": "value1", "field2": "value2"},
}
await table.update_record(record)
# Update several records in that table
records = [
{
"id": "record id",
"fields": {"field1": "value1", "field2": "value2"},
},
{
"id": "record id",
"fields": {"field1": "value1", "field2": "value2"},
},
{
"id": "record id",
"fields": {"field1": "value1", "field2": "value2"},
},
]
await table.update_records(records)
# Delete a record in that table
record = {
"id": "record id",
}
await table.delete_record(record)
# Delete several records in that table
records = [
{"id": "record id"},
{"id": "record id"},
{"id": "record id"},
]
await table.delete_records(records)
if __name__ == "__main__":
asyncio.run(main())
```
## License
[MIT](https://opensource.org/licenses/MIT) | PypiClean |
/deepl-scraper-pp-0.1.2.tar.gz/deepl-scraper-pp-0.1.2/deepl_scraper_pp/deepl_tr.py |
from typing import Union
import asyncio
from urllib.parse import quote
from pyquery import PyQuery as pq
import pyppeteer
import logzero
from logzero import logger
from linetimer import CodeTimer
# from get_ppbrowser.get_ppbrowser import get_ppbrowser
URL = r"https://www.deepl.com/translator"
_ = """
with CodeTimer(name="loading BROWER", unit="s"):
# from deepl_tr_pp.deepl_tr_pp import deepl_tr_pp, LOOP, BROWSER, get_ppbrowser
from get_ppbrowser.get_ppbrowser import LOOP, BROWSER
with CodeTimer(name="start a page", unit="s"):
# URL = 'https://www.deepl.com/translator#auto/zh/'
PAGE = LOOP.run_until_complete(BROWSER.newPage())
try:
LOOP.run_until_complete(PAGE.goto(URL, timeout=45 * 1000))
except Exception as exc:
logger.error("exc: %s, exiting", exc)
raise SystemExit("Unable to make initial connection to deelp") from exc
# """
# fmt: off
async def deepl_tr(
text: str,
from_lang: str = "auto",
to_lang: str = "zh",
page=None,
verbose: Union[bool, int] = False,
timeout: float = 5,
):
# fmt: on
"""Deepl via pyppeteer.
text = "Test it and more"
from_lang="auto"
to_lang="zh"
page=PAGE
verbose=True
"""
#
# set verbose=40 to turn most things off
if isinstance(verbose, bool):
if verbose:
logzero.setup_default_logger(level=10)
else:
logzero.setup_default_logger(level=20)
else: # integer: log_level
logzero.setup_default_logger(level=verbose)
logger.debug(" Entry ")
if page is None:
try:
# browser = await get_ppbrowser()
browser = await pyppeteer.launch()
except Exception as exc:
logger.error(exc)
raise
try:
page = await browser.newPage()
except Exception as exc:
logger.error(exc)
raise
url = r"https://www.deepl.com/translator"
try:
await page.goto(url, timeout=45 * 1000)
except Exception as exc:
logger.error(exc)
raise
url0 = f"{URL}#{from_lang}/{to_lang}/"
url_ = f"{URL}#{from_lang}/{to_lang}/{quote(text)}"
# selector = ".lmt__language_select--target > button > span"
if verbose < 11 or verbose is True:
_ = False # silent
else:
_ = True
with CodeTimer(name="fetching", unit="s", silent=_):
_ = """
await page.goto(url0)
try:
await page.waitForSelector(selector, timeout=8000)
except Exception as exc:
raise
# """
try:
content = await page.content()
except Exception as exc:
logger.error(exc)
raise
doc = pq(content)
text_old = doc('#source-dummydiv').text()
logger.debug("Old source: %s", text_old)
try:
deepl_tr.first_run
except AttributeError:
deepl_tr.first_run = 1
text_old = "_some unlikely random text_"
# selector = "div.lmt__translations_as_text"
if text.strip() == text_old.strip():
logger.debug(" ** early result: ** ")
logger.debug("%s, %s", text, doc('.lmt__translations_as_text__text_btn').text())
doc = pq(await page.content())
content = doc('.lmt__translations_as_text__text_btn').text()
else:
# record content
try:
# page.goto(url_)
await page.goto(url0)
except Exception as exc:
logger.error(exc)
raise
try:
await page.waitForSelector(".lmt__translations_as_text", timeout=20000)
except Exception as exc:
logger.error(exc)
raise
doc = pq(await page.content())
content_old = doc('.lmt__translations_as_text__text_btn').text()
# selector = ".lmt__translations_as_text"
# selector = ".lmt__textarea.lmt__target_textarea.lmt__textarea_base_style"
# selector = ".lmt__textarea.lmt__target_textarea"
# selector = '.lmt__translations_as_text__text_btn'
try:
await page.goto(url_)
except Exception as exc:
logger.error(exc)
raise
try:
await page.waitForSelector(".lmt__translations_as_text", timeout=20000)
except Exception as exc:
logger.error(exc)
raise
doc = pq(await page.content())
content = doc('.lmt__translations_as_text__text_btn').text()
logger.debug(
"content_old: [%s], \n\t content: [%s]",
content_old, content
)
# loop until content changed
idx = 0
# bound = 50 # 5s
while idx < timeout / 0.1:
idx += 1
await asyncio.sleep(.1)
doc = pq(await page.content())
content = doc('.lmt__translations_as_text__text_btn').text()
logger.debug(
"content_old: (%s), \n\tcontent: (%s)", content_old, content
)
if content_old != content and bool(content):
break
logger.debug(" loop: %s", idx)
logger.debug(" Fini ")
return content
async def main():
"""Main."""
import sys
text = "test this and that and more"
res = await deepl_tr(text)
logger.info("%s, %s,", text, res)
if len(sys.argv) > 1:
text = " ".join(sys.argv[1:])
else:
text = "test this and that"
res = await deepl_tr(text)
logger.info("%s, %s,", text, res)
if __name__ == "__main__":
try:
loop = asyncio.get_event_loop()
except Exception as exc:
logger.error(exc)
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
loop.run_until_complete(main())
except Exception as exc:
logger.error(exc)
finally:
loop.close()
_ = """
import sys
text = "test this and that and more"
res = LOOP.run_until_complete(deepl_tr(text))
logger.info("%s, %s,", text, res)
if len(sys.argv) > 1:
text = " ".join(sys.argv[1:])
else:
text = "test this and that"
res = LOOP.run_until_complete(deepl_tr(text))
logger.info("%s, %s,", text, res)
# """ | PypiClean |
/apache_superset_db-1.5.1.2-py3-none-any.whl/superset/importexport/api.py | import json
from datetime import datetime
from io import BytesIO
from zipfile import is_zipfile, ZipFile
from flask import request, Response, send_file
from flask_appbuilder.api import BaseApi, expose, protect
from superset.commands.export.assets import ExportAssetsCommand
from superset.commands.importers.exceptions import (
IncorrectFormatError,
NoValidFilesFoundError,
)
from superset.commands.importers.v1.assets import ImportAssetsCommand
from superset.commands.importers.v1.utils import get_contents_from_bundle
from superset.extensions import event_logger
from superset.views.base_api import requires_form_data
class ImportExportRestApi(BaseApi):
"""
API for exporting all assets or importing them.
"""
resource_name = "assets"
openapi_spec_tag = "Import/export"
@expose("/export/", methods=["GET"])
@protect()
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.export",
log_to_statsd=False,
)
def export(self) -> Response:
"""
Export all assets.
---
get:
description: >-
Returns a ZIP file with all the Superset assets (databases, datasets, charts,
dashboards, saved queries) as YAML files.
responses:
200:
description: ZIP file
content:
application/zip:
schema:
type: string
format: binary
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
404:
$ref: '#/components/responses/404'
500:
$ref: '#/components/responses/500'
"""
timestamp = datetime.now().strftime("%Y%m%dT%H%M%S")
root = f"assets_export_{timestamp}"
filename = f"{root}.zip"
buf = BytesIO()
with ZipFile(buf, "w") as bundle:
for file_name, file_content in ExportAssetsCommand().run():
with bundle.open(f"{root}/{file_name}", "w") as fp:
fp.write(file_content.encode())
buf.seek(0)
response = send_file(
buf,
mimetype="application/zip",
as_attachment=True,
attachment_filename=filename,
)
return response
@expose("/import/", methods=["POST"])
@protect()
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.import_",
log_to_statsd=False,
)
@requires_form_data
def import_(self) -> Response:
"""Import multiple assets
---
post:
requestBody:
required: true
content:
multipart/form-data:
schema:
type: object
properties:
bundle:
description: upload file (ZIP or JSON)
type: string
format: binary
passwords:
description: >-
JSON map of passwords for each featured database in the
ZIP file. If the ZIP includes a database config in the path
`databases/MyDatabase.yaml`, the password should be provided
in the following format:
`{"databases/MyDatabase.yaml": "my_password"}`.
type: string
responses:
200:
description: Dashboard import result
content:
application/json:
schema:
type: object
properties:
message:
type: string
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
upload = request.files.get("bundle")
if not upload:
return self.response_400()
if not is_zipfile(upload):
raise IncorrectFormatError("Not a ZIP file")
with ZipFile(upload) as bundle:
contents = get_contents_from_bundle(bundle)
if not contents:
raise NoValidFilesFoundError()
passwords = (
json.loads(request.form["passwords"])
if "passwords" in request.form
else None
)
command = ImportAssetsCommand(contents, passwords=passwords)
command.run()
return self.response(200, message="OK") | PypiClean |
/ahrli_huobi_client-1.0.8-py3-none-any.whl/huobi/impl/websocketconnection.py | import threading
import websocket
import gzip
import ssl
import logging
from urllib import parse
import urllib.parse
from huobi.base.printtime import PrintDate
from huobi.constant.system import ApiVersion
from huobi.impl.utils.apisignaturev2 import create_signature_v2
from huobi.impl.utils.timeservice import get_current_timestamp
from huobi.impl.utils.urlparamsbuilder import UrlParamsBuilder
from huobi.impl.utils.apisignature import create_signature
from huobi.exception.huobiapiexception import HuobiApiException
from huobi.impl.utils import *
# Key: ws, Value: connection
websocket_connection_handler = dict()
def on_message(ws, message):
websocket_connection = websocket_connection_handler[ws]
websocket_connection.on_message(message)
return
def on_error(ws, error):
websocket_connection = websocket_connection_handler[ws]
websocket_connection.on_failure(error)
def on_close(ws):
websocket_connection = websocket_connection_handler[ws]
websocket_connection.on_close()
def on_open(ws):
websocket_connection = websocket_connection_handler[ws]
websocket_connection.on_open(ws)
connection_id = 0
class ConnectionState:
IDLE = 0
CONNECTED = 1
CLOSED_ON_ERROR = 2
def websocket_func(*args):
connection_instance = args[0]
connection_instance.ws = websocket.WebSocketApp(connection_instance.url,
on_message=on_message,
on_error=on_error,
on_close=on_close)
global websocket_connection_handler
websocket_connection_handler[connection_instance.ws] = connection_instance
connection_instance.logger.info("[Sub][" + str(connection_instance.id) + "] Connecting...")
connection_instance.delay_in_second = -1
connection_instance.ws.on_open = on_open
connection_instance.ws.run_forever(sslopt={"cert_reqs": ssl.CERT_NONE})
connection_instance.logger.info("[Sub][" + str(connection_instance.id) + "] Connection event loop down")
if connection_instance.state == ConnectionState.CONNECTED:
connection_instance.state = ConnectionState.IDLE
class WebsocketConnection:
def __init__(self, api_key, secret_key, uri, watch_dog, request):
# threading.Thread.__init__(self)
self.__thread = None
self.__market_url = "wss://api.huobi.io/ws"
self.__trading_url = "wss://api.huobi.io/ws/" + request.api_version
self.__api_key = api_key
self.__secret_key = secret_key
self.request = request
self.__watch_dog = watch_dog
self.delay_in_second = -1
self.ws = None
self.last_receive_time = 0
self.logger = logging.getLogger("huobi-client")
self.state = ConnectionState.IDLE
global connection_id
connection_id += 1
self.id = connection_id
host = urllib.parse.urlparse(uri).hostname
if host.find("api") == 0:
self.__market_url = "wss://" + host + "/ws"
self.__trading_url = "wss://" + host + "/ws/" + request.api_version
else:
self.__market_url = "wss://" + host + "/api/ws"
self.__trading_url = "wss://" + host + "/ws/" + request.api_version
if request.is_trading:
self.url = self.__trading_url
else:
self.url = self.__market_url
def in_delay_connection(self):
return self.delay_in_second != -1
def re_connect_in_delay(self, delay_in_second):
if self.ws is not None:
self.ws.close()
self.ws = None
self.delay_in_second = delay_in_second
self.logger.warning("[Sub][" + str(self.id) + "] Reconnecting after "
+ str(self.delay_in_second) + " seconds later")
def re_connect(self):
if self.delay_in_second != 0:
self.delay_in_second -= 1
self.logger.warning("In delay connection: " + str(self.delay_in_second))
else:
self.connect()
def connect(self):
if self.state == ConnectionState.CONNECTED:
self.logger.info("[Sub][" + str(self.id) + "] Already connected")
else:
self.__thread = threading.Thread(target=websocket_func, args=[self])
self.__thread.start()
def send(self, data):
#print("sending data :", data)
self.ws.send(data)
def close(self):
self.ws.close()
del websocket_connection_handler[self.ws]
self.__watch_dog.on_connection_closed(self)
self.logger.error("[Sub][" + str(self.id) + "] Closing normally")
def on_open(self, ws):
#print("### open ###")
self.logger.info("[Sub][" + str(self.id) + "] Connected to server")
self.ws = ws
self.last_receive_time = get_current_timestamp()
self.state = ConnectionState.CONNECTED
self.__watch_dog.on_connection_created(self)
if self.request.is_trading:
try:
if self.request.api_version == ApiVersion.VERSION_V1:
builder = UrlParamsBuilder()
create_signature(self.__api_key, self.__secret_key,
"GET", self.url, builder)
builder.put_url("op", "auth")
self.send(builder.build_url_to_json())
elif self.request.api_version == ApiVersion.VERSION_V2:
builder = UrlParamsBuilder()
create_signature_v2(self.__api_key, self.__secret_key,
"GET", self.url, builder)
self.send(builder.build_url_to_json())
else:
self.on_error("api version for create the signature fill failed")
except Exception as e:
self.on_error("Unexpected error when create the signature: " + str(e))
else:
if self.request.subscription_handler is not None:
self.request.subscription_handler(self)
return
def on_error(self, error_message):
if self.request.error_handler is not None:
exception = HuobiApiException(HuobiApiException.SUBSCRIPTION_ERROR, error_message)
self.request.error_handler(exception)
self.logger.error("[Sub][" + str(self.id) + "] " + str(error_message))
def on_failure(self, error):
self.on_error("Unexpected error: " + str(error))
self.close_on_error()
def on_message(self, message):
self.last_receive_time = get_current_timestamp()
if isinstance(message, (str)):
#print("RX string : ", message)
json_wrapper = parse_json_from_string(message)
elif isinstance(message, (bytes)):
#print("RX bytes: " + gzip.decompress(message).decode("utf-8"))
json_wrapper = parse_json_from_string(gzip.decompress(message).decode("utf-8"))
else:
print("RX unknow type : ", type(message))
return
if json_wrapper.contain_key("status") and json_wrapper.get_string("status") != "ok":
error_code = json_wrapper.get_string_or_default("err-code", "Unknown error")
error_msg = json_wrapper.get_string_or_default("err-msg", "Unknown error")
self.on_error(error_code + ": " + error_msg)
elif json_wrapper.contain_key("err-code") and json_wrapper.get_int("err-code") != 0:
error_code = json_wrapper.get_string_or_default("err-code", "Unknown error")
error_msg = json_wrapper.get_string_or_default("err-msg", "Unknown error")
self.on_error(error_code + ": " + error_msg)
elif json_wrapper.contain_key("op"):
op = json_wrapper.get_string("op")
if op == "notify":
self.__on_receive(json_wrapper)
elif op == "ping":
ping_ts = json_wrapper.get_string("ts")
self.__process_ping_on_trading_line(ping_ts)
elif op == "auth":
if self.request.subscription_handler is not None:
self.request.subscription_handler(self)
elif op == "req":
self.__on_receive(json_wrapper)
elif json_wrapper.contain_key("action"): # for V2
action_name = json_wrapper.get_string("action")
if action_name == "ping":
action_data = json_wrapper.get_object("data")
ping_ts = action_data.get_string("ts")
self.__process_ping_on_v2_trade(ping_ts)
elif action_name == "sub":
action_code = json_wrapper.get_int("code")
if action_code == 200:
logging.info("subscribe ACK received")
else:
logging.error("receive error data : " + message)
elif action_name == "req": #
action_code = json_wrapper.get_int("code")
if action_code == 200:
logging.info("signature ACK received")
if self.request.subscription_handler is not None:
self.request.subscription_handler(self)
else:
logging.error("receive error data : " + message)
elif action_name == "push":
action_data = json_wrapper.get_object("data")
if action_data:
self.__on_receive(json_wrapper)
else:
logging.error("receive error push data : " + message)
elif json_wrapper.contain_key("ch"):
self.__on_receive(json_wrapper)
elif json_wrapper.contain_key("rep"):
self.__on_receive(json_wrapper)
elif json_wrapper.contain_key("ping"):
ping_ts = json_wrapper.get_string("ping")
self.__process_ping_on_market_line(ping_ts)
else:
print("unknown data process, RX: " + gzip.decompress(message).decode("utf-8"))
def __on_receive(self, json_wrapper):
res = None
try:
if self.request.json_parser is not None:
res = self.request.json_parser(json_wrapper)
except Exception as e:
self.on_error("Failed to parse server's response: " + str(e))
try:
if self.request.update_callback is not None:
self.request.update_callback(res)
except Exception as e:
self.on_error("Process error: " + str(e)
+ " You should capture the exception in your error handler")
if self.request.auto_close:
self.close()
def __process_ping_on_trading_line(self, ping_ts):
#self.send("{\"op\":\"pong\",\"ts\":" + str(get_current_timestamp()) + "}")
#PrintDate.timestamp_to_date(ping_ts)
self.send("{\"op\":\"pong\",\"ts\":" + str(ping_ts) + "}")
return
def __process_ping_on_market_line(self, ping_ts):
#self.send("{\"pong\":" + str(get_current_timestamp()) + "}")
#PrintDate.timestamp_to_date(ping_ts)
self.send("{\"pong\":" + str(ping_ts) + "}")
return
def __process_ping_on_v2_trade(self, ping_ts):
# PrintDate.timestamp_to_date(ping_ts)
self.send("{\"action\": \"pong\",\"data\": {\"ts\": " + str(ping_ts) +"}}")
return
def close_on_error(self):
if self.ws is not None:
self.ws.close()
self.state = ConnectionState.CLOSED_ON_ERROR
self.logger.error("[Sub][" + str(self.id) + "] Connection is closing due to error") | PypiClean |
/nextbox_ui_plugin-0.13.0.tar.gz/nextbox_ui_plugin-0.13.0/nextbox_ui_plugin/forms.py | from django import forms
from ipam.models import VLAN
from .models import SavedTopology
from dcim.models import Device, Site, Region
from django.conf import settings
from packaging import version
NETBOX_CURRENT_VERSION = version.parse(settings.VERSION)
if NETBOX_CURRENT_VERSION >= version.parse("2.11.0"):
from dcim.models import Location
else:
from dcim.models import RackGroup as Location
if NETBOX_CURRENT_VERSION >= version.parse("3.0") :
from django.utils.translation import gettext as _
if NETBOX_CURRENT_VERSION < version.parse("3.5"):
from utilities.forms import (
BootstrapMixin,
DynamicModelMultipleChoiceField,
DynamicModelChoiceField
)
else:
from utilities.forms import BootstrapMixin
from utilities.forms.fields import (
DynamicModelMultipleChoiceField,
DynamicModelChoiceField
)
class TopologyFilterForm(BootstrapMixin, forms.Form):
model = Device
device_id = DynamicModelMultipleChoiceField(
queryset=Device.objects.all(),
to_field_name='id',
required=False,
null_option='None',
)
location_id = DynamicModelMultipleChoiceField(
queryset=Location.objects.all(),
required=False,
to_field_name='id',
null_option='None',
)
site_id = DynamicModelMultipleChoiceField(
queryset=Site.objects.all(),
required=False,
to_field_name='id',
null_option='None',
)
vlan_id = DynamicModelChoiceField(
queryset=VLAN.objects.all(),
required=False,
to_field_name='id',
null_option='None',
)
region_id = DynamicModelMultipleChoiceField(
queryset=Region.objects.all(),
required=False,
to_field_name='id',
null_option='None',
)
if NETBOX_CURRENT_VERSION >= version.parse("3.0") :
device_id.label = _('Devices')
location_id.label = _('Location')
site_id.label = _('Sites')
vlan_id.label = _('Vlan')
region_id.label = _('Regions')
class LoadSavedTopologyFilterForm(BootstrapMixin, forms.Form):
def __init__(self, *args, **kwargs):
user = kwargs.pop('user', None)
super(LoadSavedTopologyFilterForm, self).__init__(*args, **kwargs)
self.fields['saved_topology_id'].queryset = SavedTopology.objects.filter(created_by=user)
model = SavedTopology
saved_topology_id = forms.ModelChoiceField(
queryset=None,
to_field_name='id',
required=True
) | PypiClean |
/superset_extender-1.0.0-py3-none-any.whl/supextend/static/vendor/bootstrap/js/bootstrap.bundle.min.js | !function(t,e){"object"==typeof exports&&"undefined"!=typeof module?e(exports,require("jquery")):"function"==typeof define&&define.amd?define(["exports","jquery"],e):e((t="undefined"!=typeof globalThis?globalThis:t||self).bootstrap={},t.jQuery)}(this,(function(t,e){"use strict";function n(t){return t&&"object"==typeof t&&"default"in t?t:{default:t}}var i=n(e);function o(t,e){for(var n=0;n<e.length;n++){var i=e[n];i.enumerable=i.enumerable||!1,i.configurable=!0,"value"in i&&(i.writable=!0),Object.defineProperty(t,i.key,i)}}function r(t,e,n){return e&&o(t.prototype,e),n&&o(t,n),t}function a(){return(a=Object.assign||function(t){for(var e=1;e<arguments.length;e++){var n=arguments[e];for(var i in n)Object.prototype.hasOwnProperty.call(n,i)&&(t[i]=n[i])}return t}).apply(this,arguments)}function s(t){var e=this,n=!1;return i.default(this).one(l.TRANSITION_END,(function(){n=!0})),setTimeout((function(){n||l.triggerTransitionEnd(e)}),t),this}var l={TRANSITION_END:"bsTransitionEnd",getUID:function(t){do{t+=~~(1e6*Math.random())}while(document.getElementById(t));return t},getSelectorFromElement:function(t){var e=t.getAttribute("data-target");if(!e||"#"===e){var n=t.getAttribute("href");e=n&&"#"!==n?n.trim():""}try{return document.querySelector(e)?e:null}catch(t){return null}},getTransitionDurationFromElement:function(t){if(!t)return 0;var e=i.default(t).css("transition-duration"),n=i.default(t).css("transition-delay"),o=parseFloat(e),r=parseFloat(n);return o||r?(e=e.split(",")[0],n=n.split(",")[0],1e3*(parseFloat(e)+parseFloat(n))):0},reflow:function(t){return t.offsetHeight},triggerTransitionEnd:function(t){i.default(t).trigger("transitionend")},supportsTransitionEnd:function(){return Boolean("transitionend")},isElement:function(t){return(t[0]||t).nodeType},typeCheckConfig:function(t,e,n){for(var i in n)if(Object.prototype.hasOwnProperty.call(n,i)){var o=n[i],r=e[i],a=r&&l.isElement(r)?"element":null===(s=r)||"undefined"==typeof s?""+s:{}.toString.call(s).match(/\s([a-z]+)/i)[1].toLowerCase();if(!new RegExp(o).test(a))throw new Error(t.toUpperCase()+': Option "'+i+'" provided type "'+a+'" but expected type "'+o+'".')}var s},findShadowRoot:function(t){if(!document.documentElement.attachShadow)return null;if("function"==typeof t.getRootNode){var e=t.getRootNode();return e instanceof ShadowRoot?e:null}return t instanceof ShadowRoot?t:t.parentNode?l.findShadowRoot(t.parentNode):null},jQueryDetection:function(){if("undefined"==typeof i.default)throw new TypeError("Bootstrap's JavaScript requires jQuery. jQuery must be included before Bootstrap's JavaScript.");var t=i.default.fn.jquery.split(" ")[0].split(".");if(t[0]<2&&t[1]<9||1===t[0]&&9===t[1]&&t[2]<1||t[0]>=4)throw new Error("Bootstrap's JavaScript requires at least jQuery v1.9.1 but less than v4.0.0")}};l.jQueryDetection(),i.default.fn.emulateTransitionEnd=s,i.default.event.special[l.TRANSITION_END]={bindType:"transitionend",delegateType:"transitionend",handle:function(t){if(i.default(t.target).is(this))return t.handleObj.handler.apply(this,arguments)}};var u="alert",f=i.default.fn[u],d=function(){function t(t){this._element=t}var e=t.prototype;return e.close=function(t){var e=this._element;t&&(e=this._getRootElement(t)),this._triggerCloseEvent(e).isDefaultPrevented()||this._removeElement(e)},e.dispose=function(){i.default.removeData(this._element,"bs.alert"),this._element=null},e._getRootElement=function(t){var e=l.getSelectorFromElement(t),n=!1;return e&&(n=document.querySelector(e)),n||(n=i.default(t).closest(".alert")[0]),n},e._triggerCloseEvent=function(t){var e=i.default.Event("close.bs.alert");return i.default(t).trigger(e),e},e._removeElement=function(t){var e=this;if(i.default(t).removeClass("show"),i.default(t).hasClass("fade")){var n=l.getTransitionDurationFromElement(t);i.default(t).one(l.TRANSITION_END,(function(n){return e._destroyElement(t,n)})).emulateTransitionEnd(n)}else this._destroyElement(t)},e._destroyElement=function(t){i.default(t).detach().trigger("closed.bs.alert").remove()},t._jQueryInterface=function(e){return this.each((function(){var n=i.default(this),o=n.data("bs.alert");o||(o=new t(this),n.data("bs.alert",o)),"close"===e&&o[e](this)}))},t._handleDismiss=function(t){return function(e){e&&e.preventDefault(),t.close(this)}},r(t,null,[{key:"VERSION",get:function(){return"4.6.0"}}]),t}();i.default(document).on("click.bs.alert.data-api",'[data-dismiss="alert"]',d._handleDismiss(new d)),i.default.fn[u]=d._jQueryInterface,i.default.fn[u].Constructor=d,i.default.fn[u].noConflict=function(){return i.default.fn[u]=f,d._jQueryInterface};var c=i.default.fn.button,h=function(){function t(t){this._element=t,this.shouldAvoidTriggerChange=!1}var e=t.prototype;return e.toggle=function(){var t=!0,e=!0,n=i.default(this._element).closest('[data-toggle="buttons"]')[0];if(n){var o=this._element.querySelector('input:not([type="hidden"])');if(o){if("radio"===o.type)if(o.checked&&this._element.classList.contains("active"))t=!1;else{var r=n.querySelector(".active");r&&i.default(r).removeClass("active")}t&&("checkbox"!==o.type&&"radio"!==o.type||(o.checked=!this._element.classList.contains("active")),this.shouldAvoidTriggerChange||i.default(o).trigger("change")),o.focus(),e=!1}}this._element.hasAttribute("disabled")||this._element.classList.contains("disabled")||(e&&this._element.setAttribute("aria-pressed",!this._element.classList.contains("active")),t&&i.default(this._element).toggleClass("active"))},e.dispose=function(){i.default.removeData(this._element,"bs.button"),this._element=null},t._jQueryInterface=function(e,n){return this.each((function(){var o=i.default(this),r=o.data("bs.button");r||(r=new t(this),o.data("bs.button",r)),r.shouldAvoidTriggerChange=n,"toggle"===e&&r[e]()}))},r(t,null,[{key:"VERSION",get:function(){return"4.6.0"}}]),t}();i.default(document).on("click.bs.button.data-api",'[data-toggle^="button"]',(function(t){var e=t.target,n=e;if(i.default(e).hasClass("btn")||(e=i.default(e).closest(".btn")[0]),!e||e.hasAttribute("disabled")||e.classList.contains("disabled"))t.preventDefault();else{var o=e.querySelector('input:not([type="hidden"])');if(o&&(o.hasAttribute("disabled")||o.classList.contains("disabled")))return void t.preventDefault();"INPUT"!==n.tagName&&"LABEL"===e.tagName||h._jQueryInterface.call(i.default(e),"toggle","INPUT"===n.tagName)}})).on("focus.bs.button.data-api blur.bs.button.data-api",'[data-toggle^="button"]',(function(t){var e=i.default(t.target).closest(".btn")[0];i.default(e).toggleClass("focus",/^focus(in)?$/.test(t.type))})),i.default(window).on("load.bs.button.data-api",(function(){for(var t=[].slice.call(document.querySelectorAll('[data-toggle="buttons"] .btn')),e=0,n=t.length;e<n;e++){var i=t[e],o=i.querySelector('input:not([type="hidden"])');o.checked||o.hasAttribute("checked")?i.classList.add("active"):i.classList.remove("active")}for(var r=0,a=(t=[].slice.call(document.querySelectorAll('[data-toggle="button"]'))).length;r<a;r++){var s=t[r];"true"===s.getAttribute("aria-pressed")?s.classList.add("active"):s.classList.remove("active")}})),i.default.fn.button=h._jQueryInterface,i.default.fn.button.Constructor=h,i.default.fn.button.noConflict=function(){return i.default.fn.button=c,h._jQueryInterface};var p="carousel",m=".bs.carousel",g=i.default.fn[p],v={interval:5e3,keyboard:!0,slide:!1,pause:"hover",wrap:!0,touch:!0},_={interval:"(number|boolean)",keyboard:"boolean",slide:"(boolean|string)",pause:"(string|boolean)",wrap:"boolean",touch:"boolean"},b={TOUCH:"touch",PEN:"pen"},y=function(){function t(t,e){this._items=null,this._interval=null,this._activeElement=null,this._isPaused=!1,this._isSliding=!1,this.touchTimeout=null,this.touchStartX=0,this.touchDeltaX=0,this._config=this._getConfig(e),this._element=t,this._indicatorsElement=this._element.querySelector(".carousel-indicators"),this._touchSupported="ontouchstart"in document.documentElement||navigator.maxTouchPoints>0,this._pointerEvent=Boolean(window.PointerEvent||window.MSPointerEvent),this._addEventListeners()}var e=t.prototype;return e.next=function(){this._isSliding||this._slide("next")},e.nextWhenVisible=function(){var t=i.default(this._element);!document.hidden&&t.is(":visible")&&"hidden"!==t.css("visibility")&&this.next()},e.prev=function(){this._isSliding||this._slide("prev")},e.pause=function(t){t||(this._isPaused=!0),this._element.querySelector(".carousel-item-next, .carousel-item-prev")&&(l.triggerTransitionEnd(this._element),this.cycle(!0)),clearInterval(this._interval),this._interval=null},e.cycle=function(t){t||(this._isPaused=!1),this._interval&&(clearInterval(this._interval),this._interval=null),this._config.interval&&!this._isPaused&&(this._updateInterval(),this._interval=setInterval((document.visibilityState?this.nextWhenVisible:this.next).bind(this),this._config.interval))},e.to=function(t){var e=this;this._activeElement=this._element.querySelector(".active.carousel-item");var n=this._getItemIndex(this._activeElement);if(!(t>this._items.length-1||t<0))if(this._isSliding)i.default(this._element).one("slid.bs.carousel",(function(){return e.to(t)}));else{if(n===t)return this.pause(),void this.cycle();var o=t>n?"next":"prev";this._slide(o,this._items[t])}},e.dispose=function(){i.default(this._element).off(m),i.default.removeData(this._element,"bs.carousel"),this._items=null,this._config=null,this._element=null,this._interval=null,this._isPaused=null,this._isSliding=null,this._activeElement=null,this._indicatorsElement=null},e._getConfig=function(t){return t=a({},v,t),l.typeCheckConfig(p,t,_),t},e._handleSwipe=function(){var t=Math.abs(this.touchDeltaX);if(!(t<=40)){var e=t/this.touchDeltaX;this.touchDeltaX=0,e>0&&this.prev(),e<0&&this.next()}},e._addEventListeners=function(){var t=this;this._config.keyboard&&i.default(this._element).on("keydown.bs.carousel",(function(e){return t._keydown(e)})),"hover"===this._config.pause&&i.default(this._element).on("mouseenter.bs.carousel",(function(e){return t.pause(e)})).on("mouseleave.bs.carousel",(function(e){return t.cycle(e)})),this._config.touch&&this._addTouchEventListeners()},e._addTouchEventListeners=function(){var t=this;if(this._touchSupported){var e=function(e){t._pointerEvent&&b[e.originalEvent.pointerType.toUpperCase()]?t.touchStartX=e.originalEvent.clientX:t._pointerEvent||(t.touchStartX=e.originalEvent.touches[0].clientX)},n=function(e){t._pointerEvent&&b[e.originalEvent.pointerType.toUpperCase()]&&(t.touchDeltaX=e.originalEvent.clientX-t.touchStartX),t._handleSwipe(),"hover"===t._config.pause&&(t.pause(),t.touchTimeout&&clearTimeout(t.touchTimeout),t.touchTimeout=setTimeout((function(e){return t.cycle(e)}),500+t._config.interval))};i.default(this._element.querySelectorAll(".carousel-item img")).on("dragstart.bs.carousel",(function(t){return t.preventDefault()})),this._pointerEvent?(i.default(this._element).on("pointerdown.bs.carousel",(function(t){return e(t)})),i.default(this._element).on("pointerup.bs.carousel",(function(t){return n(t)})),this._element.classList.add("pointer-event")):(i.default(this._element).on("touchstart.bs.carousel",(function(t){return e(t)})),i.default(this._element).on("touchmove.bs.carousel",(function(e){return function(e){e.originalEvent.touches&&e.originalEvent.touches.length>1?t.touchDeltaX=0:t.touchDeltaX=e.originalEvent.touches[0].clientX-t.touchStartX}(e)})),i.default(this._element).on("touchend.bs.carousel",(function(t){return n(t)})))}},e._keydown=function(t){if(!/input|textarea/i.test(t.target.tagName))switch(t.which){case 37:t.preventDefault(),this.prev();break;case 39:t.preventDefault(),this.next()}},e._getItemIndex=function(t){return this._items=t&&t.parentNode?[].slice.call(t.parentNode.querySelectorAll(".carousel-item")):[],this._items.indexOf(t)},e._getItemByDirection=function(t,e){var n="next"===t,i="prev"===t,o=this._getItemIndex(e),r=this._items.length-1;if((i&&0===o||n&&o===r)&&!this._config.wrap)return e;var a=(o+("prev"===t?-1:1))%this._items.length;return-1===a?this._items[this._items.length-1]:this._items[a]},e._triggerSlideEvent=function(t,e){var n=this._getItemIndex(t),o=this._getItemIndex(this._element.querySelector(".active.carousel-item")),r=i.default.Event("slide.bs.carousel",{relatedTarget:t,direction:e,from:o,to:n});return i.default(this._element).trigger(r),r},e._setActiveIndicatorElement=function(t){if(this._indicatorsElement){var e=[].slice.call(this._indicatorsElement.querySelectorAll(".active"));i.default(e).removeClass("active");var n=this._indicatorsElement.children[this._getItemIndex(t)];n&&i.default(n).addClass("active")}},e._updateInterval=function(){var t=this._activeElement||this._element.querySelector(".active.carousel-item");if(t){var e=parseInt(t.getAttribute("data-interval"),10);e?(this._config.defaultInterval=this._config.defaultInterval||this._config.interval,this._config.interval=e):this._config.interval=this._config.defaultInterval||this._config.interval}},e._slide=function(t,e){var n,o,r,a=this,s=this._element.querySelector(".active.carousel-item"),u=this._getItemIndex(s),f=e||s&&this._getItemByDirection(t,s),d=this._getItemIndex(f),c=Boolean(this._interval);if("next"===t?(n="carousel-item-left",o="carousel-item-next",r="left"):(n="carousel-item-right",o="carousel-item-prev",r="right"),f&&i.default(f).hasClass("active"))this._isSliding=!1;else if(!this._triggerSlideEvent(f,r).isDefaultPrevented()&&s&&f){this._isSliding=!0,c&&this.pause(),this._setActiveIndicatorElement(f),this._activeElement=f;var h=i.default.Event("slid.bs.carousel",{relatedTarget:f,direction:r,from:u,to:d});if(i.default(this._element).hasClass("slide")){i.default(f).addClass(o),l.reflow(f),i.default(s).addClass(n),i.default(f).addClass(n);var p=l.getTransitionDurationFromElement(s);i.default(s).one(l.TRANSITION_END,(function(){i.default(f).removeClass(n+" "+o).addClass("active"),i.default(s).removeClass("active "+o+" "+n),a._isSliding=!1,setTimeout((function(){return i.default(a._element).trigger(h)}),0)})).emulateTransitionEnd(p)}else i.default(s).removeClass("active"),i.default(f).addClass("active"),this._isSliding=!1,i.default(this._element).trigger(h);c&&this.cycle()}},t._jQueryInterface=function(e){return this.each((function(){var n=i.default(this).data("bs.carousel"),o=a({},v,i.default(this).data());"object"==typeof e&&(o=a({},o,e));var r="string"==typeof e?e:o.slide;if(n||(n=new t(this,o),i.default(this).data("bs.carousel",n)),"number"==typeof e)n.to(e);else if("string"==typeof r){if("undefined"==typeof n[r])throw new TypeError('No method named "'+r+'"');n[r]()}else o.interval&&o.ride&&(n.pause(),n.cycle())}))},t._dataApiClickHandler=function(e){var n=l.getSelectorFromElement(this);if(n){var o=i.default(n)[0];if(o&&i.default(o).hasClass("carousel")){var r=a({},i.default(o).data(),i.default(this).data()),s=this.getAttribute("data-slide-to");s&&(r.interval=!1),t._jQueryInterface.call(i.default(o),r),s&&i.default(o).data("bs.carousel").to(s),e.preventDefault()}}},r(t,null,[{key:"VERSION",get:function(){return"4.6.0"}},{key:"Default",get:function(){return v}}]),t}();i.default(document).on("click.bs.carousel.data-api","[data-slide], [data-slide-to]",y._dataApiClickHandler),i.default(window).on("load.bs.carousel.data-api",(function(){for(var t=[].slice.call(document.querySelectorAll('[data-ride="carousel"]')),e=0,n=t.length;e<n;e++){var o=i.default(t[e]);y._jQueryInterface.call(o,o.data())}})),i.default.fn[p]=y._jQueryInterface,i.default.fn[p].Constructor=y,i.default.fn[p].noConflict=function(){return i.default.fn[p]=g,y._jQueryInterface};var w="collapse",E=i.default.fn[w],T={toggle:!0,parent:""},C={toggle:"boolean",parent:"(string|element)"},S=function(){function t(t,e){this._isTransitioning=!1,this._element=t,this._config=this._getConfig(e),this._triggerArray=[].slice.call(document.querySelectorAll('[data-toggle="collapse"][href="#'+t.id+'"],[data-toggle="collapse"][data-target="#'+t.id+'"]'));for(var n=[].slice.call(document.querySelectorAll('[data-toggle="collapse"]')),i=0,o=n.length;i<o;i++){var r=n[i],a=l.getSelectorFromElement(r),s=[].slice.call(document.querySelectorAll(a)).filter((function(e){return e===t}));null!==a&&s.length>0&&(this._selector=a,this._triggerArray.push(r))}this._parent=this._config.parent?this._getParent():null,this._config.parent||this._addAriaAndCollapsedClass(this._element,this._triggerArray),this._config.toggle&&this.toggle()}var e=t.prototype;return e.toggle=function(){i.default(this._element).hasClass("show")?this.hide():this.show()},e.show=function(){var e,n,o=this;if(!this._isTransitioning&&!i.default(this._element).hasClass("show")&&(this._parent&&0===(e=[].slice.call(this._parent.querySelectorAll(".show, .collapsing")).filter((function(t){return"string"==typeof o._config.parent?t.getAttribute("data-parent")===o._config.parent:t.classList.contains("collapse")}))).length&&(e=null),!(e&&(n=i.default(e).not(this._selector).data("bs.collapse"))&&n._isTransitioning))){var r=i.default.Event("show.bs.collapse");if(i.default(this._element).trigger(r),!r.isDefaultPrevented()){e&&(t._jQueryInterface.call(i.default(e).not(this._selector),"hide"),n||i.default(e).data("bs.collapse",null));var a=this._getDimension();i.default(this._element).removeClass("collapse").addClass("collapsing"),this._element.style[a]=0,this._triggerArray.length&&i.default(this._triggerArray).removeClass("collapsed").attr("aria-expanded",!0),this.setTransitioning(!0);var s="scroll"+(a[0].toUpperCase()+a.slice(1)),u=l.getTransitionDurationFromElement(this._element);i.default(this._element).one(l.TRANSITION_END,(function(){i.default(o._element).removeClass("collapsing").addClass("collapse show"),o._element.style[a]="",o.setTransitioning(!1),i.default(o._element).trigger("shown.bs.collapse")})).emulateTransitionEnd(u),this._element.style[a]=this._element[s]+"px"}}},e.hide=function(){var t=this;if(!this._isTransitioning&&i.default(this._element).hasClass("show")){var e=i.default.Event("hide.bs.collapse");if(i.default(this._element).trigger(e),!e.isDefaultPrevented()){var n=this._getDimension();this._element.style[n]=this._element.getBoundingClientRect()[n]+"px",l.reflow(this._element),i.default(this._element).addClass("collapsing").removeClass("collapse show");var o=this._triggerArray.length;if(o>0)for(var r=0;r<o;r++){var a=this._triggerArray[r],s=l.getSelectorFromElement(a);if(null!==s)i.default([].slice.call(document.querySelectorAll(s))).hasClass("show")||i.default(a).addClass("collapsed").attr("aria-expanded",!1)}this.setTransitioning(!0);this._element.style[n]="";var u=l.getTransitionDurationFromElement(this._element);i.default(this._element).one(l.TRANSITION_END,(function(){t.setTransitioning(!1),i.default(t._element).removeClass("collapsing").addClass("collapse").trigger("hidden.bs.collapse")})).emulateTransitionEnd(u)}}},e.setTransitioning=function(t){this._isTransitioning=t},e.dispose=function(){i.default.removeData(this._element,"bs.collapse"),this._config=null,this._parent=null,this._element=null,this._triggerArray=null,this._isTransitioning=null},e._getConfig=function(t){return(t=a({},T,t)).toggle=Boolean(t.toggle),l.typeCheckConfig(w,t,C),t},e._getDimension=function(){return i.default(this._element).hasClass("width")?"width":"height"},e._getParent=function(){var e,n=this;l.isElement(this._config.parent)?(e=this._config.parent,"undefined"!=typeof this._config.parent.jquery&&(e=this._config.parent[0])):e=document.querySelector(this._config.parent);var o='[data-toggle="collapse"][data-parent="'+this._config.parent+'"]',r=[].slice.call(e.querySelectorAll(o));return i.default(r).each((function(e,i){n._addAriaAndCollapsedClass(t._getTargetFromElement(i),[i])})),e},e._addAriaAndCollapsedClass=function(t,e){var n=i.default(t).hasClass("show");e.length&&i.default(e).toggleClass("collapsed",!n).attr("aria-expanded",n)},t._getTargetFromElement=function(t){var e=l.getSelectorFromElement(t);return e?document.querySelector(e):null},t._jQueryInterface=function(e){return this.each((function(){var n=i.default(this),o=n.data("bs.collapse"),r=a({},T,n.data(),"object"==typeof e&&e?e:{});if(!o&&r.toggle&&"string"==typeof e&&/show|hide/.test(e)&&(r.toggle=!1),o||(o=new t(this,r),n.data("bs.collapse",o)),"string"==typeof e){if("undefined"==typeof o[e])throw new TypeError('No method named "'+e+'"');o[e]()}}))},r(t,null,[{key:"VERSION",get:function(){return"4.6.0"}},{key:"Default",get:function(){return T}}]),t}();i.default(document).on("click.bs.collapse.data-api",'[data-toggle="collapse"]',(function(t){"A"===t.currentTarget.tagName&&t.preventDefault();var e=i.default(this),n=l.getSelectorFromElement(this),o=[].slice.call(document.querySelectorAll(n));i.default(o).each((function(){var t=i.default(this),n=t.data("bs.collapse")?"toggle":e.data();S._jQueryInterface.call(t,n)}))})),i.default.fn[w]=S._jQueryInterface,i.default.fn[w].Constructor=S,i.default.fn[w].noConflict=function(){return i.default.fn[w]=E,S._jQueryInterface};var D="undefined"!=typeof window&&"undefined"!=typeof document&&"undefined"!=typeof navigator,N=function(){for(var t=["Edge","Trident","Firefox"],e=0;e<t.length;e+=1)if(D&&navigator.userAgent.indexOf(t[e])>=0)return 1;return 0}();var k=D&&window.Promise?function(t){var e=!1;return function(){e||(e=!0,window.Promise.resolve().then((function(){e=!1,t()})))}}:function(t){var e=!1;return function(){e||(e=!0,setTimeout((function(){e=!1,t()}),N))}};function A(t){return t&&"[object Function]"==={}.toString.call(t)}function I(t,e){if(1!==t.nodeType)return[];var n=t.ownerDocument.defaultView.getComputedStyle(t,null);return e?n[e]:n}function O(t){return"HTML"===t.nodeName?t:t.parentNode||t.host}function x(t){if(!t)return document.body;switch(t.nodeName){case"HTML":case"BODY":return t.ownerDocument.body;case"#document":return t.body}var e=I(t),n=e.overflow,i=e.overflowX,o=e.overflowY;return/(auto|scroll|overlay)/.test(n+o+i)?t:x(O(t))}function j(t){return t&&t.referenceNode?t.referenceNode:t}var L=D&&!(!window.MSInputMethodContext||!document.documentMode),P=D&&/MSIE 10/.test(navigator.userAgent);function F(t){return 11===t?L:10===t?P:L||P}function R(t){if(!t)return document.documentElement;for(var e=F(10)?document.body:null,n=t.offsetParent||null;n===e&&t.nextElementSibling;)n=(t=t.nextElementSibling).offsetParent;var i=n&&n.nodeName;return i&&"BODY"!==i&&"HTML"!==i?-1!==["TH","TD","TABLE"].indexOf(n.nodeName)&&"static"===I(n,"position")?R(n):n:t?t.ownerDocument.documentElement:document.documentElement}function H(t){return null!==t.parentNode?H(t.parentNode):t}function M(t,e){if(!(t&&t.nodeType&&e&&e.nodeType))return document.documentElement;var n=t.compareDocumentPosition(e)&Node.DOCUMENT_POSITION_FOLLOWING,i=n?t:e,o=n?e:t,r=document.createRange();r.setStart(i,0),r.setEnd(o,0);var a,s,l=r.commonAncestorContainer;if(t!==l&&e!==l||i.contains(o))return"BODY"===(s=(a=l).nodeName)||"HTML"!==s&&R(a.firstElementChild)!==a?R(l):l;var u=H(t);return u.host?M(u.host,e):M(t,H(e).host)}function q(t){var e=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"top",n="top"===e?"scrollTop":"scrollLeft",i=t.nodeName;if("BODY"===i||"HTML"===i){var o=t.ownerDocument.documentElement,r=t.ownerDocument.scrollingElement||o;return r[n]}return t[n]}function B(t,e){var n=arguments.length>2&&void 0!==arguments[2]&&arguments[2],i=q(e,"top"),o=q(e,"left"),r=n?-1:1;return t.top+=i*r,t.bottom+=i*r,t.left+=o*r,t.right+=o*r,t}function Q(t,e){var n="x"===e?"Left":"Top",i="Left"===n?"Right":"Bottom";return parseFloat(t["border"+n+"Width"])+parseFloat(t["border"+i+"Width"])}function W(t,e,n,i){return Math.max(e["offset"+t],e["scroll"+t],n["client"+t],n["offset"+t],n["scroll"+t],F(10)?parseInt(n["offset"+t])+parseInt(i["margin"+("Height"===t?"Top":"Left")])+parseInt(i["margin"+("Height"===t?"Bottom":"Right")]):0)}function U(t){var e=t.body,n=t.documentElement,i=F(10)&&getComputedStyle(n);return{height:W("Height",e,n,i),width:W("Width",e,n,i)}}var V=function(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")},Y=function(){function t(t,e){for(var n=0;n<e.length;n++){var i=e[n];i.enumerable=i.enumerable||!1,i.configurable=!0,"value"in i&&(i.writable=!0),Object.defineProperty(t,i.key,i)}}return function(e,n,i){return n&&t(e.prototype,n),i&&t(e,i),e}}(),z=function(t,e,n){return e in t?Object.defineProperty(t,e,{value:n,enumerable:!0,configurable:!0,writable:!0}):t[e]=n,t},X=Object.assign||function(t){for(var e=1;e<arguments.length;e++){var n=arguments[e];for(var i in n)Object.prototype.hasOwnProperty.call(n,i)&&(t[i]=n[i])}return t};function K(t){return X({},t,{right:t.left+t.width,bottom:t.top+t.height})}function G(t){var e={};try{if(F(10)){e=t.getBoundingClientRect();var n=q(t,"top"),i=q(t,"left");e.top+=n,e.left+=i,e.bottom+=n,e.right+=i}else e=t.getBoundingClientRect()}catch(t){}var o={left:e.left,top:e.top,width:e.right-e.left,height:e.bottom-e.top},r="HTML"===t.nodeName?U(t.ownerDocument):{},a=r.width||t.clientWidth||o.width,s=r.height||t.clientHeight||o.height,l=t.offsetWidth-a,u=t.offsetHeight-s;if(l||u){var f=I(t);l-=Q(f,"x"),u-=Q(f,"y"),o.width-=l,o.height-=u}return K(o)}function $(t,e){var n=arguments.length>2&&void 0!==arguments[2]&&arguments[2],i=F(10),o="HTML"===e.nodeName,r=G(t),a=G(e),s=x(t),l=I(e),u=parseFloat(l.borderTopWidth),f=parseFloat(l.borderLeftWidth);n&&o&&(a.top=Math.max(a.top,0),a.left=Math.max(a.left,0));var d=K({top:r.top-a.top-u,left:r.left-a.left-f,width:r.width,height:r.height});if(d.marginTop=0,d.marginLeft=0,!i&&o){var c=parseFloat(l.marginTop),h=parseFloat(l.marginLeft);d.top-=u-c,d.bottom-=u-c,d.left-=f-h,d.right-=f-h,d.marginTop=c,d.marginLeft=h}return(i&&!n?e.contains(s):e===s&&"BODY"!==s.nodeName)&&(d=B(d,e)),d}function J(t){var e=arguments.length>1&&void 0!==arguments[1]&&arguments[1],n=t.ownerDocument.documentElement,i=$(t,n),o=Math.max(n.clientWidth,window.innerWidth||0),r=Math.max(n.clientHeight,window.innerHeight||0),a=e?0:q(n),s=e?0:q(n,"left"),l={top:a-i.top+i.marginTop,left:s-i.left+i.marginLeft,width:o,height:r};return K(l)}function Z(t){var e=t.nodeName;if("BODY"===e||"HTML"===e)return!1;if("fixed"===I(t,"position"))return!0;var n=O(t);return!!n&&Z(n)}function tt(t){if(!t||!t.parentElement||F())return document.documentElement;for(var e=t.parentElement;e&&"none"===I(e,"transform");)e=e.parentElement;return e||document.documentElement}function et(t,e,n,i){var o=arguments.length>4&&void 0!==arguments[4]&&arguments[4],r={top:0,left:0},a=o?tt(t):M(t,j(e));if("viewport"===i)r=J(a,o);else{var s=void 0;"scrollParent"===i?"BODY"===(s=x(O(e))).nodeName&&(s=t.ownerDocument.documentElement):s="window"===i?t.ownerDocument.documentElement:i;var l=$(s,a,o);if("HTML"!==s.nodeName||Z(a))r=l;else{var u=U(t.ownerDocument),f=u.height,d=u.width;r.top+=l.top-l.marginTop,r.bottom=f+l.top,r.left+=l.left-l.marginLeft,r.right=d+l.left}}var c="number"==typeof(n=n||0);return r.left+=c?n:n.left||0,r.top+=c?n:n.top||0,r.right-=c?n:n.right||0,r.bottom-=c?n:n.bottom||0,r}function nt(t){return t.width*t.height}function it(t,e,n,i,o){var r=arguments.length>5&&void 0!==arguments[5]?arguments[5]:0;if(-1===t.indexOf("auto"))return t;var a=et(n,i,r,o),s={top:{width:a.width,height:e.top-a.top},right:{width:a.right-e.right,height:a.height},bottom:{width:a.width,height:a.bottom-e.bottom},left:{width:e.left-a.left,height:a.height}},l=Object.keys(s).map((function(t){return X({key:t},s[t],{area:nt(s[t])})})).sort((function(t,e){return e.area-t.area})),u=l.filter((function(t){var e=t.width,i=t.height;return e>=n.clientWidth&&i>=n.clientHeight})),f=u.length>0?u[0].key:l[0].key,d=t.split("-")[1];return f+(d?"-"+d:"")}function ot(t,e,n){var i=arguments.length>3&&void 0!==arguments[3]?arguments[3]:null,o=i?tt(e):M(e,j(n));return $(n,o,i)}function rt(t){var e=t.ownerDocument.defaultView.getComputedStyle(t),n=parseFloat(e.marginTop||0)+parseFloat(e.marginBottom||0),i=parseFloat(e.marginLeft||0)+parseFloat(e.marginRight||0);return{width:t.offsetWidth+i,height:t.offsetHeight+n}}function at(t){var e={left:"right",right:"left",bottom:"top",top:"bottom"};return t.replace(/left|right|bottom|top/g,(function(t){return e[t]}))}function st(t,e,n){n=n.split("-")[0];var i=rt(t),o={width:i.width,height:i.height},r=-1!==["right","left"].indexOf(n),a=r?"top":"left",s=r?"left":"top",l=r?"height":"width",u=r?"width":"height";return o[a]=e[a]+e[l]/2-i[l]/2,o[s]=n===s?e[s]-i[u]:e[at(s)],o}function lt(t,e){return Array.prototype.find?t.find(e):t.filter(e)[0]}function ut(t,e,n){return(void 0===n?t:t.slice(0,function(t,e,n){if(Array.prototype.findIndex)return t.findIndex((function(t){return t[e]===n}));var i=lt(t,(function(t){return t[e]===n}));return t.indexOf(i)}(t,"name",n))).forEach((function(t){t.function&&console.warn("`modifier.function` is deprecated, use `modifier.fn`!");var n=t.function||t.fn;t.enabled&&A(n)&&(e.offsets.popper=K(e.offsets.popper),e.offsets.reference=K(e.offsets.reference),e=n(e,t))})),e}function ft(){if(!this.state.isDestroyed){var t={instance:this,styles:{},arrowStyles:{},attributes:{},flipped:!1,offsets:{}};t.offsets.reference=ot(this.state,this.popper,this.reference,this.options.positionFixed),t.placement=it(this.options.placement,t.offsets.reference,this.popper,this.reference,this.options.modifiers.flip.boundariesElement,this.options.modifiers.flip.padding),t.originalPlacement=t.placement,t.positionFixed=this.options.positionFixed,t.offsets.popper=st(this.popper,t.offsets.reference,t.placement),t.offsets.popper.position=this.options.positionFixed?"fixed":"absolute",t=ut(this.modifiers,t),this.state.isCreated?this.options.onUpdate(t):(this.state.isCreated=!0,this.options.onCreate(t))}}function dt(t,e){return t.some((function(t){var n=t.name;return t.enabled&&n===e}))}function ct(t){for(var e=[!1,"ms","Webkit","Moz","O"],n=t.charAt(0).toUpperCase()+t.slice(1),i=0;i<e.length;i++){var o=e[i],r=o?""+o+n:t;if("undefined"!=typeof document.body.style[r])return r}return null}function ht(){return this.state.isDestroyed=!0,dt(this.modifiers,"applyStyle")&&(this.popper.removeAttribute("x-placement"),this.popper.style.position="",this.popper.style.top="",this.popper.style.left="",this.popper.style.right="",this.popper.style.bottom="",this.popper.style.willChange="",this.popper.style[ct("transform")]=""),this.disableEventListeners(),this.options.removeOnDestroy&&this.popper.parentNode.removeChild(this.popper),this}function pt(t){var e=t.ownerDocument;return e?e.defaultView:window}function mt(t,e,n,i){n.updateBound=i,pt(t).addEventListener("resize",n.updateBound,{passive:!0});var o=x(t);return function t(e,n,i,o){var r="BODY"===e.nodeName,a=r?e.ownerDocument.defaultView:e;a.addEventListener(n,i,{passive:!0}),r||t(x(a.parentNode),n,i,o),o.push(a)}(o,"scroll",n.updateBound,n.scrollParents),n.scrollElement=o,n.eventsEnabled=!0,n}function gt(){this.state.eventsEnabled||(this.state=mt(this.reference,this.options,this.state,this.scheduleUpdate))}function vt(){var t,e;this.state.eventsEnabled&&(cancelAnimationFrame(this.scheduleUpdate),this.state=(t=this.reference,e=this.state,pt(t).removeEventListener("resize",e.updateBound),e.scrollParents.forEach((function(t){t.removeEventListener("scroll",e.updateBound)})),e.updateBound=null,e.scrollParents=[],e.scrollElement=null,e.eventsEnabled=!1,e))}function _t(t){return""!==t&&!isNaN(parseFloat(t))&&isFinite(t)}function bt(t,e){Object.keys(e).forEach((function(n){var i="";-1!==["width","height","top","right","bottom","left"].indexOf(n)&&_t(e[n])&&(i="px"),t.style[n]=e[n]+i}))}var yt=D&&/Firefox/i.test(navigator.userAgent);function wt(t,e,n){var i=lt(t,(function(t){return t.name===e})),o=!!i&&t.some((function(t){return t.name===n&&t.enabled&&t.order<i.order}));if(!o){var r="`"+e+"`",a="`"+n+"`";console.warn(a+" modifier is required by "+r+" modifier in order to work, be sure to include it before "+r+"!")}return o}var Et=["auto-start","auto","auto-end","top-start","top","top-end","right-start","right","right-end","bottom-end","bottom","bottom-start","left-end","left","left-start"],Tt=Et.slice(3);function Ct(t){var e=arguments.length>1&&void 0!==arguments[1]&&arguments[1],n=Tt.indexOf(t),i=Tt.slice(n+1).concat(Tt.slice(0,n));return e?i.reverse():i}var St="flip",Dt="clockwise",Nt="counterclockwise";function kt(t,e,n,i){var o=[0,0],r=-1!==["right","left"].indexOf(i),a=t.split(/(\+|\-)/).map((function(t){return t.trim()})),s=a.indexOf(lt(a,(function(t){return-1!==t.search(/,|\s/)})));a[s]&&-1===a[s].indexOf(",")&&console.warn("Offsets separated by white space(s) are deprecated, use a comma (,) instead.");var l=/\s*,\s*|\s+/,u=-1!==s?[a.slice(0,s).concat([a[s].split(l)[0]]),[a[s].split(l)[1]].concat(a.slice(s+1))]:[a];return(u=u.map((function(t,i){var o=(1===i?!r:r)?"height":"width",a=!1;return t.reduce((function(t,e){return""===t[t.length-1]&&-1!==["+","-"].indexOf(e)?(t[t.length-1]=e,a=!0,t):a?(t[t.length-1]+=e,a=!1,t):t.concat(e)}),[]).map((function(t){return function(t,e,n,i){var o=t.match(/((?:\-|\+)?\d*\.?\d*)(.*)/),r=+o[1],a=o[2];if(!r)return t;if(0===a.indexOf("%")){var s=void 0;switch(a){case"%p":s=n;break;case"%":case"%r":default:s=i}return K(s)[e]/100*r}if("vh"===a||"vw"===a)return("vh"===a?Math.max(document.documentElement.clientHeight,window.innerHeight||0):Math.max(document.documentElement.clientWidth,window.innerWidth||0))/100*r;return r}(t,o,e,n)}))}))).forEach((function(t,e){t.forEach((function(n,i){_t(n)&&(o[e]+=n*("-"===t[i-1]?-1:1))}))})),o}var At={placement:"bottom",positionFixed:!1,eventsEnabled:!0,removeOnDestroy:!1,onCreate:function(){},onUpdate:function(){},modifiers:{shift:{order:100,enabled:!0,fn:function(t){var e=t.placement,n=e.split("-")[0],i=e.split("-")[1];if(i){var o=t.offsets,r=o.reference,a=o.popper,s=-1!==["bottom","top"].indexOf(n),l=s?"left":"top",u=s?"width":"height",f={start:z({},l,r[l]),end:z({},l,r[l]+r[u]-a[u])};t.offsets.popper=X({},a,f[i])}return t}},offset:{order:200,enabled:!0,fn:function(t,e){var n=e.offset,i=t.placement,o=t.offsets,r=o.popper,a=o.reference,s=i.split("-")[0],l=void 0;return l=_t(+n)?[+n,0]:kt(n,r,a,s),"left"===s?(r.top+=l[0],r.left-=l[1]):"right"===s?(r.top+=l[0],r.left+=l[1]):"top"===s?(r.left+=l[0],r.top-=l[1]):"bottom"===s&&(r.left+=l[0],r.top+=l[1]),t.popper=r,t},offset:0},preventOverflow:{order:300,enabled:!0,fn:function(t,e){var n=e.boundariesElement||R(t.instance.popper);t.instance.reference===n&&(n=R(n));var i=ct("transform"),o=t.instance.popper.style,r=o.top,a=o.left,s=o[i];o.top="",o.left="",o[i]="";var l=et(t.instance.popper,t.instance.reference,e.padding,n,t.positionFixed);o.top=r,o.left=a,o[i]=s,e.boundaries=l;var u=e.priority,f=t.offsets.popper,d={primary:function(t){var n=f[t];return f[t]<l[t]&&!e.escapeWithReference&&(n=Math.max(f[t],l[t])),z({},t,n)},secondary:function(t){var n="right"===t?"left":"top",i=f[n];return f[t]>l[t]&&!e.escapeWithReference&&(i=Math.min(f[n],l[t]-("right"===t?f.width:f.height))),z({},n,i)}};return u.forEach((function(t){var e=-1!==["left","top"].indexOf(t)?"primary":"secondary";f=X({},f,d[e](t))})),t.offsets.popper=f,t},priority:["left","right","top","bottom"],padding:5,boundariesElement:"scrollParent"},keepTogether:{order:400,enabled:!0,fn:function(t){var e=t.offsets,n=e.popper,i=e.reference,o=t.placement.split("-")[0],r=Math.floor,a=-1!==["top","bottom"].indexOf(o),s=a?"right":"bottom",l=a?"left":"top",u=a?"width":"height";return n[s]<r(i[l])&&(t.offsets.popper[l]=r(i[l])-n[u]),n[l]>r(i[s])&&(t.offsets.popper[l]=r(i[s])),t}},arrow:{order:500,enabled:!0,fn:function(t,e){var n;if(!wt(t.instance.modifiers,"arrow","keepTogether"))return t;var i=e.element;if("string"==typeof i){if(!(i=t.instance.popper.querySelector(i)))return t}else if(!t.instance.popper.contains(i))return console.warn("WARNING: `arrow.element` must be child of its popper element!"),t;var o=t.placement.split("-")[0],r=t.offsets,a=r.popper,s=r.reference,l=-1!==["left","right"].indexOf(o),u=l?"height":"width",f=l?"Top":"Left",d=f.toLowerCase(),c=l?"left":"top",h=l?"bottom":"right",p=rt(i)[u];s[h]-p<a[d]&&(t.offsets.popper[d]-=a[d]-(s[h]-p)),s[d]+p>a[h]&&(t.offsets.popper[d]+=s[d]+p-a[h]),t.offsets.popper=K(t.offsets.popper);var m=s[d]+s[u]/2-p/2,g=I(t.instance.popper),v=parseFloat(g["margin"+f]),_=parseFloat(g["border"+f+"Width"]),b=m-t.offsets.popper[d]-v-_;return b=Math.max(Math.min(a[u]-p,b),0),t.arrowElement=i,t.offsets.arrow=(z(n={},d,Math.round(b)),z(n,c,""),n),t},element:"[x-arrow]"},flip:{order:600,enabled:!0,fn:function(t,e){if(dt(t.instance.modifiers,"inner"))return t;if(t.flipped&&t.placement===t.originalPlacement)return t;var n=et(t.instance.popper,t.instance.reference,e.padding,e.boundariesElement,t.positionFixed),i=t.placement.split("-")[0],o=at(i),r=t.placement.split("-")[1]||"",a=[];switch(e.behavior){case St:a=[i,o];break;case Dt:a=Ct(i);break;case Nt:a=Ct(i,!0);break;default:a=e.behavior}return a.forEach((function(s,l){if(i!==s||a.length===l+1)return t;i=t.placement.split("-")[0],o=at(i);var u=t.offsets.popper,f=t.offsets.reference,d=Math.floor,c="left"===i&&d(u.right)>d(f.left)||"right"===i&&d(u.left)<d(f.right)||"top"===i&&d(u.bottom)>d(f.top)||"bottom"===i&&d(u.top)<d(f.bottom),h=d(u.left)<d(n.left),p=d(u.right)>d(n.right),m=d(u.top)<d(n.top),g=d(u.bottom)>d(n.bottom),v="left"===i&&h||"right"===i&&p||"top"===i&&m||"bottom"===i&&g,_=-1!==["top","bottom"].indexOf(i),b=!!e.flipVariations&&(_&&"start"===r&&h||_&&"end"===r&&p||!_&&"start"===r&&m||!_&&"end"===r&&g),y=!!e.flipVariationsByContent&&(_&&"start"===r&&p||_&&"end"===r&&h||!_&&"start"===r&&g||!_&&"end"===r&&m),w=b||y;(c||v||w)&&(t.flipped=!0,(c||v)&&(i=a[l+1]),w&&(r=function(t){return"end"===t?"start":"start"===t?"end":t}(r)),t.placement=i+(r?"-"+r:""),t.offsets.popper=X({},t.offsets.popper,st(t.instance.popper,t.offsets.reference,t.placement)),t=ut(t.instance.modifiers,t,"flip"))})),t},behavior:"flip",padding:5,boundariesElement:"viewport",flipVariations:!1,flipVariationsByContent:!1},inner:{order:700,enabled:!1,fn:function(t){var e=t.placement,n=e.split("-")[0],i=t.offsets,o=i.popper,r=i.reference,a=-1!==["left","right"].indexOf(n),s=-1===["top","left"].indexOf(n);return o[a?"left":"top"]=r[n]-(s?o[a?"width":"height"]:0),t.placement=at(e),t.offsets.popper=K(o),t}},hide:{order:800,enabled:!0,fn:function(t){if(!wt(t.instance.modifiers,"hide","preventOverflow"))return t;var e=t.offsets.reference,n=lt(t.instance.modifiers,(function(t){return"preventOverflow"===t.name})).boundaries;if(e.bottom<n.top||e.left>n.right||e.top>n.bottom||e.right<n.left){if(!0===t.hide)return t;t.hide=!0,t.attributes["x-out-of-boundaries"]=""}else{if(!1===t.hide)return t;t.hide=!1,t.attributes["x-out-of-boundaries"]=!1}return t}},computeStyle:{order:850,enabled:!0,fn:function(t,e){var n=e.x,i=e.y,o=t.offsets.popper,r=lt(t.instance.modifiers,(function(t){return"applyStyle"===t.name})).gpuAcceleration;void 0!==r&&console.warn("WARNING: `gpuAcceleration` option moved to `computeStyle` modifier and will not be supported in future versions of Popper.js!");var a=void 0!==r?r:e.gpuAcceleration,s=R(t.instance.popper),l=G(s),u={position:o.position},f=function(t,e){var n=t.offsets,i=n.popper,o=n.reference,r=Math.round,a=Math.floor,s=function(t){return t},l=r(o.width),u=r(i.width),f=-1!==["left","right"].indexOf(t.placement),d=-1!==t.placement.indexOf("-"),c=e?f||d||l%2==u%2?r:a:s,h=e?r:s;return{left:c(l%2==1&&u%2==1&&!d&&e?i.left-1:i.left),top:h(i.top),bottom:h(i.bottom),right:c(i.right)}}(t,window.devicePixelRatio<2||!yt),d="bottom"===n?"top":"bottom",c="right"===i?"left":"right",h=ct("transform"),p=void 0,m=void 0;if(m="bottom"===d?"HTML"===s.nodeName?-s.clientHeight+f.bottom:-l.height+f.bottom:f.top,p="right"===c?"HTML"===s.nodeName?-s.clientWidth+f.right:-l.width+f.right:f.left,a&&h)u[h]="translate3d("+p+"px, "+m+"px, 0)",u[d]=0,u[c]=0,u.willChange="transform";else{var g="bottom"===d?-1:1,v="right"===c?-1:1;u[d]=m*g,u[c]=p*v,u.willChange=d+", "+c}var _={"x-placement":t.placement};return t.attributes=X({},_,t.attributes),t.styles=X({},u,t.styles),t.arrowStyles=X({},t.offsets.arrow,t.arrowStyles),t},gpuAcceleration:!0,x:"bottom",y:"right"},applyStyle:{order:900,enabled:!0,fn:function(t){var e,n;return bt(t.instance.popper,t.styles),e=t.instance.popper,n=t.attributes,Object.keys(n).forEach((function(t){!1!==n[t]?e.setAttribute(t,n[t]):e.removeAttribute(t)})),t.arrowElement&&Object.keys(t.arrowStyles).length&&bt(t.arrowElement,t.arrowStyles),t},onLoad:function(t,e,n,i,o){var r=ot(o,e,t,n.positionFixed),a=it(n.placement,r,e,t,n.modifiers.flip.boundariesElement,n.modifiers.flip.padding);return e.setAttribute("x-placement",a),bt(e,{position:n.positionFixed?"fixed":"absolute"}),n},gpuAcceleration:void 0}}},It=function(){function t(e,n){var i=this,o=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{};V(this,t),this.scheduleUpdate=function(){return requestAnimationFrame(i.update)},this.update=k(this.update.bind(this)),this.options=X({},t.Defaults,o),this.state={isDestroyed:!1,isCreated:!1,scrollParents:[]},this.reference=e&&e.jquery?e[0]:e,this.popper=n&&n.jquery?n[0]:n,this.options.modifiers={},Object.keys(X({},t.Defaults.modifiers,o.modifiers)).forEach((function(e){i.options.modifiers[e]=X({},t.Defaults.modifiers[e]||{},o.modifiers?o.modifiers[e]:{})})),this.modifiers=Object.keys(this.options.modifiers).map((function(t){return X({name:t},i.options.modifiers[t])})).sort((function(t,e){return t.order-e.order})),this.modifiers.forEach((function(t){t.enabled&&A(t.onLoad)&&t.onLoad(i.reference,i.popper,i.options,t,i.state)})),this.update();var r=this.options.eventsEnabled;r&&this.enableEventListeners(),this.state.eventsEnabled=r}return Y(t,[{key:"update",value:function(){return ft.call(this)}},{key:"destroy",value:function(){return ht.call(this)}},{key:"enableEventListeners",value:function(){return gt.call(this)}},{key:"disableEventListeners",value:function(){return vt.call(this)}}]),t}();It.Utils=("undefined"!=typeof window?window:global).PopperUtils,It.placements=Et,It.Defaults=At;var Ot="dropdown",xt=i.default.fn[Ot],jt=new RegExp("38|40|27"),Lt={offset:0,flip:!0,boundary:"scrollParent",reference:"toggle",display:"dynamic",popperConfig:null},Pt={offset:"(number|string|function)",flip:"boolean",boundary:"(string|element)",reference:"(string|element)",display:"string",popperConfig:"(null|object)"},Ft=function(){function t(t,e){this._element=t,this._popper=null,this._config=this._getConfig(e),this._menu=this._getMenuElement(),this._inNavbar=this._detectNavbar(),this._addEventListeners()}var e=t.prototype;return e.toggle=function(){if(!this._element.disabled&&!i.default(this._element).hasClass("disabled")){var e=i.default(this._menu).hasClass("show");t._clearMenus(),e||this.show(!0)}},e.show=function(e){if(void 0===e&&(e=!1),!(this._element.disabled||i.default(this._element).hasClass("disabled")||i.default(this._menu).hasClass("show"))){var n={relatedTarget:this._element},o=i.default.Event("show.bs.dropdown",n),r=t._getParentFromElement(this._element);if(i.default(r).trigger(o),!o.isDefaultPrevented()){if(!this._inNavbar&&e){if("undefined"==typeof It)throw new TypeError("Bootstrap's dropdowns require Popper (https://popper.js.org)");var a=this._element;"parent"===this._config.reference?a=r:l.isElement(this._config.reference)&&(a=this._config.reference,"undefined"!=typeof this._config.reference.jquery&&(a=this._config.reference[0])),"scrollParent"!==this._config.boundary&&i.default(r).addClass("position-static"),this._popper=new It(a,this._menu,this._getPopperConfig())}"ontouchstart"in document.documentElement&&0===i.default(r).closest(".navbar-nav").length&&i.default(document.body).children().on("mouseover",null,i.default.noop),this._element.focus(),this._element.setAttribute("aria-expanded",!0),i.default(this._menu).toggleClass("show"),i.default(r).toggleClass("show").trigger(i.default.Event("shown.bs.dropdown",n))}}},e.hide=function(){if(!this._element.disabled&&!i.default(this._element).hasClass("disabled")&&i.default(this._menu).hasClass("show")){var e={relatedTarget:this._element},n=i.default.Event("hide.bs.dropdown",e),o=t._getParentFromElement(this._element);i.default(o).trigger(n),n.isDefaultPrevented()||(this._popper&&this._popper.destroy(),i.default(this._menu).toggleClass("show"),i.default(o).toggleClass("show").trigger(i.default.Event("hidden.bs.dropdown",e)))}},e.dispose=function(){i.default.removeData(this._element,"bs.dropdown"),i.default(this._element).off(".bs.dropdown"),this._element=null,this._menu=null,null!==this._popper&&(this._popper.destroy(),this._popper=null)},e.update=function(){this._inNavbar=this._detectNavbar(),null!==this._popper&&this._popper.scheduleUpdate()},e._addEventListeners=function(){var t=this;i.default(this._element).on("click.bs.dropdown",(function(e){e.preventDefault(),e.stopPropagation(),t.toggle()}))},e._getConfig=function(t){return t=a({},this.constructor.Default,i.default(this._element).data(),t),l.typeCheckConfig(Ot,t,this.constructor.DefaultType),t},e._getMenuElement=function(){if(!this._menu){var e=t._getParentFromElement(this._element);e&&(this._menu=e.querySelector(".dropdown-menu"))}return this._menu},e._getPlacement=function(){var t=i.default(this._element.parentNode),e="bottom-start";return t.hasClass("dropup")?e=i.default(this._menu).hasClass("dropdown-menu-right")?"top-end":"top-start":t.hasClass("dropright")?e="right-start":t.hasClass("dropleft")?e="left-start":i.default(this._menu).hasClass("dropdown-menu-right")&&(e="bottom-end"),e},e._detectNavbar=function(){return i.default(this._element).closest(".navbar").length>0},e._getOffset=function(){var t=this,e={};return"function"==typeof this._config.offset?e.fn=function(e){return e.offsets=a({},e.offsets,t._config.offset(e.offsets,t._element)||{}),e}:e.offset=this._config.offset,e},e._getPopperConfig=function(){var t={placement:this._getPlacement(),modifiers:{offset:this._getOffset(),flip:{enabled:this._config.flip},preventOverflow:{boundariesElement:this._config.boundary}}};return"static"===this._config.display&&(t.modifiers.applyStyle={enabled:!1}),a({},t,this._config.popperConfig)},t._jQueryInterface=function(e){return this.each((function(){var n=i.default(this).data("bs.dropdown");if(n||(n=new t(this,"object"==typeof e?e:null),i.default(this).data("bs.dropdown",n)),"string"==typeof e){if("undefined"==typeof n[e])throw new TypeError('No method named "'+e+'"');n[e]()}}))},t._clearMenus=function(e){if(!e||3!==e.which&&("keyup"!==e.type||9===e.which))for(var n=[].slice.call(document.querySelectorAll('[data-toggle="dropdown"]')),o=0,r=n.length;o<r;o++){var a=t._getParentFromElement(n[o]),s=i.default(n[o]).data("bs.dropdown"),l={relatedTarget:n[o]};if(e&&"click"===e.type&&(l.clickEvent=e),s){var u=s._menu;if(i.default(a).hasClass("show")&&!(e&&("click"===e.type&&/input|textarea/i.test(e.target.tagName)||"keyup"===e.type&&9===e.which)&&i.default.contains(a,e.target))){var f=i.default.Event("hide.bs.dropdown",l);i.default(a).trigger(f),f.isDefaultPrevented()||("ontouchstart"in document.documentElement&&i.default(document.body).children().off("mouseover",null,i.default.noop),n[o].setAttribute("aria-expanded","false"),s._popper&&s._popper.destroy(),i.default(u).removeClass("show"),i.default(a).removeClass("show").trigger(i.default.Event("hidden.bs.dropdown",l)))}}}},t._getParentFromElement=function(t){var e,n=l.getSelectorFromElement(t);return n&&(e=document.querySelector(n)),e||t.parentNode},t._dataApiKeydownHandler=function(e){if(!(/input|textarea/i.test(e.target.tagName)?32===e.which||27!==e.which&&(40!==e.which&&38!==e.which||i.default(e.target).closest(".dropdown-menu").length):!jt.test(e.which))&&!this.disabled&&!i.default(this).hasClass("disabled")){var n=t._getParentFromElement(this),o=i.default(n).hasClass("show");if(o||27!==e.which){if(e.preventDefault(),e.stopPropagation(),!o||27===e.which||32===e.which)return 27===e.which&&i.default(n.querySelector('[data-toggle="dropdown"]')).trigger("focus"),void i.default(this).trigger("click");var r=[].slice.call(n.querySelectorAll(".dropdown-menu .dropdown-item:not(.disabled):not(:disabled)")).filter((function(t){return i.default(t).is(":visible")}));if(0!==r.length){var a=r.indexOf(e.target);38===e.which&&a>0&&a--,40===e.which&&a<r.length-1&&a++,a<0&&(a=0),r[a].focus()}}}},r(t,null,[{key:"VERSION",get:function(){return"4.6.0"}},{key:"Default",get:function(){return Lt}},{key:"DefaultType",get:function(){return Pt}}]),t}();i.default(document).on("keydown.bs.dropdown.data-api",'[data-toggle="dropdown"]',Ft._dataApiKeydownHandler).on("keydown.bs.dropdown.data-api",".dropdown-menu",Ft._dataApiKeydownHandler).on("click.bs.dropdown.data-api keyup.bs.dropdown.data-api",Ft._clearMenus).on("click.bs.dropdown.data-api",'[data-toggle="dropdown"]',(function(t){t.preventDefault(),t.stopPropagation(),Ft._jQueryInterface.call(i.default(this),"toggle")})).on("click.bs.dropdown.data-api",".dropdown form",(function(t){t.stopPropagation()})),i.default.fn[Ot]=Ft._jQueryInterface,i.default.fn[Ot].Constructor=Ft,i.default.fn[Ot].noConflict=function(){return i.default.fn[Ot]=xt,Ft._jQueryInterface};var Rt=i.default.fn.modal,Ht={backdrop:!0,keyboard:!0,focus:!0,show:!0},Mt={backdrop:"(boolean|string)",keyboard:"boolean",focus:"boolean",show:"boolean"},qt=function(){function t(t,e){this._config=this._getConfig(e),this._element=t,this._dialog=t.querySelector(".modal-dialog"),this._backdrop=null,this._isShown=!1,this._isBodyOverflowing=!1,this._ignoreBackdropClick=!1,this._isTransitioning=!1,this._scrollbarWidth=0}var e=t.prototype;return e.toggle=function(t){return this._isShown?this.hide():this.show(t)},e.show=function(t){var e=this;if(!this._isShown&&!this._isTransitioning){i.default(this._element).hasClass("fade")&&(this._isTransitioning=!0);var n=i.default.Event("show.bs.modal",{relatedTarget:t});i.default(this._element).trigger(n),this._isShown||n.isDefaultPrevented()||(this._isShown=!0,this._checkScrollbar(),this._setScrollbar(),this._adjustDialog(),this._setEscapeEvent(),this._setResizeEvent(),i.default(this._element).on("click.dismiss.bs.modal",'[data-dismiss="modal"]',(function(t){return e.hide(t)})),i.default(this._dialog).on("mousedown.dismiss.bs.modal",(function(){i.default(e._element).one("mouseup.dismiss.bs.modal",(function(t){i.default(t.target).is(e._element)&&(e._ignoreBackdropClick=!0)}))})),this._showBackdrop((function(){return e._showElement(t)})))}},e.hide=function(t){var e=this;if(t&&t.preventDefault(),this._isShown&&!this._isTransitioning){var n=i.default.Event("hide.bs.modal");if(i.default(this._element).trigger(n),this._isShown&&!n.isDefaultPrevented()){this._isShown=!1;var o=i.default(this._element).hasClass("fade");if(o&&(this._isTransitioning=!0),this._setEscapeEvent(),this._setResizeEvent(),i.default(document).off("focusin.bs.modal"),i.default(this._element).removeClass("show"),i.default(this._element).off("click.dismiss.bs.modal"),i.default(this._dialog).off("mousedown.dismiss.bs.modal"),o){var r=l.getTransitionDurationFromElement(this._element);i.default(this._element).one(l.TRANSITION_END,(function(t){return e._hideModal(t)})).emulateTransitionEnd(r)}else this._hideModal()}}},e.dispose=function(){[window,this._element,this._dialog].forEach((function(t){return i.default(t).off(".bs.modal")})),i.default(document).off("focusin.bs.modal"),i.default.removeData(this._element,"bs.modal"),this._config=null,this._element=null,this._dialog=null,this._backdrop=null,this._isShown=null,this._isBodyOverflowing=null,this._ignoreBackdropClick=null,this._isTransitioning=null,this._scrollbarWidth=null},e.handleUpdate=function(){this._adjustDialog()},e._getConfig=function(t){return t=a({},Ht,t),l.typeCheckConfig("modal",t,Mt),t},e._triggerBackdropTransition=function(){var t=this,e=i.default.Event("hidePrevented.bs.modal");if(i.default(this._element).trigger(e),!e.isDefaultPrevented()){var n=this._element.scrollHeight>document.documentElement.clientHeight;n||(this._element.style.overflowY="hidden"),this._element.classList.add("modal-static");var o=l.getTransitionDurationFromElement(this._dialog);i.default(this._element).off(l.TRANSITION_END),i.default(this._element).one(l.TRANSITION_END,(function(){t._element.classList.remove("modal-static"),n||i.default(t._element).one(l.TRANSITION_END,(function(){t._element.style.overflowY=""})).emulateTransitionEnd(t._element,o)})).emulateTransitionEnd(o),this._element.focus()}},e._showElement=function(t){var e=this,n=i.default(this._element).hasClass("fade"),o=this._dialog?this._dialog.querySelector(".modal-body"):null;this._element.parentNode&&this._element.parentNode.nodeType===Node.ELEMENT_NODE||document.body.appendChild(this._element),this._element.style.display="block",this._element.removeAttribute("aria-hidden"),this._element.setAttribute("aria-modal",!0),this._element.setAttribute("role","dialog"),i.default(this._dialog).hasClass("modal-dialog-scrollable")&&o?o.scrollTop=0:this._element.scrollTop=0,n&&l.reflow(this._element),i.default(this._element).addClass("show"),this._config.focus&&this._enforceFocus();var r=i.default.Event("shown.bs.modal",{relatedTarget:t}),a=function(){e._config.focus&&e._element.focus(),e._isTransitioning=!1,i.default(e._element).trigger(r)};if(n){var s=l.getTransitionDurationFromElement(this._dialog);i.default(this._dialog).one(l.TRANSITION_END,a).emulateTransitionEnd(s)}else a()},e._enforceFocus=function(){var t=this;i.default(document).off("focusin.bs.modal").on("focusin.bs.modal",(function(e){document!==e.target&&t._element!==e.target&&0===i.default(t._element).has(e.target).length&&t._element.focus()}))},e._setEscapeEvent=function(){var t=this;this._isShown?i.default(this._element).on("keydown.dismiss.bs.modal",(function(e){t._config.keyboard&&27===e.which?(e.preventDefault(),t.hide()):t._config.keyboard||27!==e.which||t._triggerBackdropTransition()})):this._isShown||i.default(this._element).off("keydown.dismiss.bs.modal")},e._setResizeEvent=function(){var t=this;this._isShown?i.default(window).on("resize.bs.modal",(function(e){return t.handleUpdate(e)})):i.default(window).off("resize.bs.modal")},e._hideModal=function(){var t=this;this._element.style.display="none",this._element.setAttribute("aria-hidden",!0),this._element.removeAttribute("aria-modal"),this._element.removeAttribute("role"),this._isTransitioning=!1,this._showBackdrop((function(){i.default(document.body).removeClass("modal-open"),t._resetAdjustments(),t._resetScrollbar(),i.default(t._element).trigger("hidden.bs.modal")}))},e._removeBackdrop=function(){this._backdrop&&(i.default(this._backdrop).remove(),this._backdrop=null)},e._showBackdrop=function(t){var e=this,n=i.default(this._element).hasClass("fade")?"fade":"";if(this._isShown&&this._config.backdrop){if(this._backdrop=document.createElement("div"),this._backdrop.className="modal-backdrop",n&&this._backdrop.classList.add(n),i.default(this._backdrop).appendTo(document.body),i.default(this._element).on("click.dismiss.bs.modal",(function(t){e._ignoreBackdropClick?e._ignoreBackdropClick=!1:t.target===t.currentTarget&&("static"===e._config.backdrop?e._triggerBackdropTransition():e.hide())})),n&&l.reflow(this._backdrop),i.default(this._backdrop).addClass("show"),!t)return;if(!n)return void t();var o=l.getTransitionDurationFromElement(this._backdrop);i.default(this._backdrop).one(l.TRANSITION_END,t).emulateTransitionEnd(o)}else if(!this._isShown&&this._backdrop){i.default(this._backdrop).removeClass("show");var r=function(){e._removeBackdrop(),t&&t()};if(i.default(this._element).hasClass("fade")){var a=l.getTransitionDurationFromElement(this._backdrop);i.default(this._backdrop).one(l.TRANSITION_END,r).emulateTransitionEnd(a)}else r()}else t&&t()},e._adjustDialog=function(){var t=this._element.scrollHeight>document.documentElement.clientHeight;!this._isBodyOverflowing&&t&&(this._element.style.paddingLeft=this._scrollbarWidth+"px"),this._isBodyOverflowing&&!t&&(this._element.style.paddingRight=this._scrollbarWidth+"px")},e._resetAdjustments=function(){this._element.style.paddingLeft="",this._element.style.paddingRight=""},e._checkScrollbar=function(){var t=document.body.getBoundingClientRect();this._isBodyOverflowing=Math.round(t.left+t.right)<window.innerWidth,this._scrollbarWidth=this._getScrollbarWidth()},e._setScrollbar=function(){var t=this;if(this._isBodyOverflowing){var e=[].slice.call(document.querySelectorAll(".fixed-top, .fixed-bottom, .is-fixed, .sticky-top")),n=[].slice.call(document.querySelectorAll(".sticky-top"));i.default(e).each((function(e,n){var o=n.style.paddingRight,r=i.default(n).css("padding-right");i.default(n).data("padding-right",o).css("padding-right",parseFloat(r)+t._scrollbarWidth+"px")})),i.default(n).each((function(e,n){var o=n.style.marginRight,r=i.default(n).css("margin-right");i.default(n).data("margin-right",o).css("margin-right",parseFloat(r)-t._scrollbarWidth+"px")}));var o=document.body.style.paddingRight,r=i.default(document.body).css("padding-right");i.default(document.body).data("padding-right",o).css("padding-right",parseFloat(r)+this._scrollbarWidth+"px")}i.default(document.body).addClass("modal-open")},e._resetScrollbar=function(){var t=[].slice.call(document.querySelectorAll(".fixed-top, .fixed-bottom, .is-fixed, .sticky-top"));i.default(t).each((function(t,e){var n=i.default(e).data("padding-right");i.default(e).removeData("padding-right"),e.style.paddingRight=n||""}));var e=[].slice.call(document.querySelectorAll(".sticky-top"));i.default(e).each((function(t,e){var n=i.default(e).data("margin-right");"undefined"!=typeof n&&i.default(e).css("margin-right",n).removeData("margin-right")}));var n=i.default(document.body).data("padding-right");i.default(document.body).removeData("padding-right"),document.body.style.paddingRight=n||""},e._getScrollbarWidth=function(){var t=document.createElement("div");t.className="modal-scrollbar-measure",document.body.appendChild(t);var e=t.getBoundingClientRect().width-t.clientWidth;return document.body.removeChild(t),e},t._jQueryInterface=function(e,n){return this.each((function(){var o=i.default(this).data("bs.modal"),r=a({},Ht,i.default(this).data(),"object"==typeof e&&e?e:{});if(o||(o=new t(this,r),i.default(this).data("bs.modal",o)),"string"==typeof e){if("undefined"==typeof o[e])throw new TypeError('No method named "'+e+'"');o[e](n)}else r.show&&o.show(n)}))},r(t,null,[{key:"VERSION",get:function(){return"4.6.0"}},{key:"Default",get:function(){return Ht}}]),t}();i.default(document).on("click.bs.modal.data-api",'[data-toggle="modal"]',(function(t){var e,n=this,o=l.getSelectorFromElement(this);o&&(e=document.querySelector(o));var r=i.default(e).data("bs.modal")?"toggle":a({},i.default(e).data(),i.default(this).data());"A"!==this.tagName&&"AREA"!==this.tagName||t.preventDefault();var s=i.default(e).one("show.bs.modal",(function(t){t.isDefaultPrevented()||s.one("hidden.bs.modal",(function(){i.default(n).is(":visible")&&n.focus()}))}));qt._jQueryInterface.call(i.default(e),r,this)})),i.default.fn.modal=qt._jQueryInterface,i.default.fn.modal.Constructor=qt,i.default.fn.modal.noConflict=function(){return i.default.fn.modal=Rt,qt._jQueryInterface};var Bt=["background","cite","href","itemtype","longdesc","poster","src","xlink:href"],Qt={"*":["class","dir","id","lang","role",/^aria-[\w-]*$/i],a:["target","href","title","rel"],area:[],b:[],br:[],col:[],code:[],div:[],em:[],hr:[],h1:[],h2:[],h3:[],h4:[],h5:[],h6:[],i:[],img:["src","srcset","alt","title","width","height"],li:[],ol:[],p:[],pre:[],s:[],small:[],span:[],sub:[],sup:[],strong:[],u:[],ul:[]},Wt=/^(?:(?:https?|mailto|ftp|tel|file):|[^#&/:?]*(?:[#/?]|$))/gi,Ut=/^data:(?:image\/(?:bmp|gif|jpeg|jpg|png|tiff|webp)|video\/(?:mpeg|mp4|ogg|webm)|audio\/(?:mp3|oga|ogg|opus));base64,[\d+/a-z]+=*$/i;function Vt(t,e,n){if(0===t.length)return t;if(n&&"function"==typeof n)return n(t);for(var i=(new window.DOMParser).parseFromString(t,"text/html"),o=Object.keys(e),r=[].slice.call(i.body.querySelectorAll("*")),a=function(t,n){var i=r[t],a=i.nodeName.toLowerCase();if(-1===o.indexOf(i.nodeName.toLowerCase()))return i.parentNode.removeChild(i),"continue";var s=[].slice.call(i.attributes),l=[].concat(e["*"]||[],e[a]||[]);s.forEach((function(t){(function(t,e){var n=t.nodeName.toLowerCase();if(-1!==e.indexOf(n))return-1===Bt.indexOf(n)||Boolean(t.nodeValue.match(Wt)||t.nodeValue.match(Ut));for(var i=e.filter((function(t){return t instanceof RegExp})),o=0,r=i.length;o<r;o++)if(n.match(i[o]))return!0;return!1})(t,l)||i.removeAttribute(t.nodeName)}))},s=0,l=r.length;s<l;s++)a(s);return i.body.innerHTML}var Yt="tooltip",zt=i.default.fn[Yt],Xt=new RegExp("(^|\\s)bs-tooltip\\S+","g"),Kt=["sanitize","whiteList","sanitizeFn"],Gt={animation:"boolean",template:"string",title:"(string|element|function)",trigger:"string",delay:"(number|object)",html:"boolean",selector:"(string|boolean)",placement:"(string|function)",offset:"(number|string|function)",container:"(string|element|boolean)",fallbackPlacement:"(string|array)",boundary:"(string|element)",customClass:"(string|function)",sanitize:"boolean",sanitizeFn:"(null|function)",whiteList:"object",popperConfig:"(null|object)"},$t={AUTO:"auto",TOP:"top",RIGHT:"right",BOTTOM:"bottom",LEFT:"left"},Jt={animation:!0,template:'<div class="tooltip" role="tooltip"><div class="arrow"></div><div class="tooltip-inner"></div></div>',trigger:"hover focus",title:"",delay:0,html:!1,selector:!1,placement:"top",offset:0,container:!1,fallbackPlacement:"flip",boundary:"scrollParent",customClass:"",sanitize:!0,sanitizeFn:null,whiteList:Qt,popperConfig:null},Zt={HIDE:"hide.bs.tooltip",HIDDEN:"hidden.bs.tooltip",SHOW:"show.bs.tooltip",SHOWN:"shown.bs.tooltip",INSERTED:"inserted.bs.tooltip",CLICK:"click.bs.tooltip",FOCUSIN:"focusin.bs.tooltip",FOCUSOUT:"focusout.bs.tooltip",MOUSEENTER:"mouseenter.bs.tooltip",MOUSELEAVE:"mouseleave.bs.tooltip"},te=function(){function t(t,e){if("undefined"==typeof It)throw new TypeError("Bootstrap's tooltips require Popper (https://popper.js.org)");this._isEnabled=!0,this._timeout=0,this._hoverState="",this._activeTrigger={},this._popper=null,this.element=t,this.config=this._getConfig(e),this.tip=null,this._setListeners()}var e=t.prototype;return e.enable=function(){this._isEnabled=!0},e.disable=function(){this._isEnabled=!1},e.toggleEnabled=function(){this._isEnabled=!this._isEnabled},e.toggle=function(t){if(this._isEnabled)if(t){var e=this.constructor.DATA_KEY,n=i.default(t.currentTarget).data(e);n||(n=new this.constructor(t.currentTarget,this._getDelegateConfig()),i.default(t.currentTarget).data(e,n)),n._activeTrigger.click=!n._activeTrigger.click,n._isWithActiveTrigger()?n._enter(null,n):n._leave(null,n)}else{if(i.default(this.getTipElement()).hasClass("show"))return void this._leave(null,this);this._enter(null,this)}},e.dispose=function(){clearTimeout(this._timeout),i.default.removeData(this.element,this.constructor.DATA_KEY),i.default(this.element).off(this.constructor.EVENT_KEY),i.default(this.element).closest(".modal").off("hide.bs.modal",this._hideModalHandler),this.tip&&i.default(this.tip).remove(),this._isEnabled=null,this._timeout=null,this._hoverState=null,this._activeTrigger=null,this._popper&&this._popper.destroy(),this._popper=null,this.element=null,this.config=null,this.tip=null},e.show=function(){var t=this;if("none"===i.default(this.element).css("display"))throw new Error("Please use show on visible elements");var e=i.default.Event(this.constructor.Event.SHOW);if(this.isWithContent()&&this._isEnabled){i.default(this.element).trigger(e);var n=l.findShadowRoot(this.element),o=i.default.contains(null!==n?n:this.element.ownerDocument.documentElement,this.element);if(e.isDefaultPrevented()||!o)return;var r=this.getTipElement(),a=l.getUID(this.constructor.NAME);r.setAttribute("id",a),this.element.setAttribute("aria-describedby",a),this.setContent(),this.config.animation&&i.default(r).addClass("fade");var s="function"==typeof this.config.placement?this.config.placement.call(this,r,this.element):this.config.placement,u=this._getAttachment(s);this.addAttachmentClass(u);var f=this._getContainer();i.default(r).data(this.constructor.DATA_KEY,this),i.default.contains(this.element.ownerDocument.documentElement,this.tip)||i.default(r).appendTo(f),i.default(this.element).trigger(this.constructor.Event.INSERTED),this._popper=new It(this.element,r,this._getPopperConfig(u)),i.default(r).addClass("show"),i.default(r).addClass(this.config.customClass),"ontouchstart"in document.documentElement&&i.default(document.body).children().on("mouseover",null,i.default.noop);var d=function(){t.config.animation&&t._fixTransition();var e=t._hoverState;t._hoverState=null,i.default(t.element).trigger(t.constructor.Event.SHOWN),"out"===e&&t._leave(null,t)};if(i.default(this.tip).hasClass("fade")){var c=l.getTransitionDurationFromElement(this.tip);i.default(this.tip).one(l.TRANSITION_END,d).emulateTransitionEnd(c)}else d()}},e.hide=function(t){var e=this,n=this.getTipElement(),o=i.default.Event(this.constructor.Event.HIDE),r=function(){"show"!==e._hoverState&&n.parentNode&&n.parentNode.removeChild(n),e._cleanTipClass(),e.element.removeAttribute("aria-describedby"),i.default(e.element).trigger(e.constructor.Event.HIDDEN),null!==e._popper&&e._popper.destroy(),t&&t()};if(i.default(this.element).trigger(o),!o.isDefaultPrevented()){if(i.default(n).removeClass("show"),"ontouchstart"in document.documentElement&&i.default(document.body).children().off("mouseover",null,i.default.noop),this._activeTrigger.click=!1,this._activeTrigger.focus=!1,this._activeTrigger.hover=!1,i.default(this.tip).hasClass("fade")){var a=l.getTransitionDurationFromElement(n);i.default(n).one(l.TRANSITION_END,r).emulateTransitionEnd(a)}else r();this._hoverState=""}},e.update=function(){null!==this._popper&&this._popper.scheduleUpdate()},e.isWithContent=function(){return Boolean(this.getTitle())},e.addAttachmentClass=function(t){i.default(this.getTipElement()).addClass("bs-tooltip-"+t)},e.getTipElement=function(){return this.tip=this.tip||i.default(this.config.template)[0],this.tip},e.setContent=function(){var t=this.getTipElement();this.setElementContent(i.default(t.querySelectorAll(".tooltip-inner")),this.getTitle()),i.default(t).removeClass("fade show")},e.setElementContent=function(t,e){"object"!=typeof e||!e.nodeType&&!e.jquery?this.config.html?(this.config.sanitize&&(e=Vt(e,this.config.whiteList,this.config.sanitizeFn)),t.html(e)):t.text(e):this.config.html?i.default(e).parent().is(t)||t.empty().append(e):t.text(i.default(e).text())},e.getTitle=function(){var t=this.element.getAttribute("data-original-title");return t||(t="function"==typeof this.config.title?this.config.title.call(this.element):this.config.title),t},e._getPopperConfig=function(t){var e=this;return a({},{placement:t,modifiers:{offset:this._getOffset(),flip:{behavior:this.config.fallbackPlacement},arrow:{element:".arrow"},preventOverflow:{boundariesElement:this.config.boundary}},onCreate:function(t){t.originalPlacement!==t.placement&&e._handlePopperPlacementChange(t)},onUpdate:function(t){return e._handlePopperPlacementChange(t)}},this.config.popperConfig)},e._getOffset=function(){var t=this,e={};return"function"==typeof this.config.offset?e.fn=function(e){return e.offsets=a({},e.offsets,t.config.offset(e.offsets,t.element)||{}),e}:e.offset=this.config.offset,e},e._getContainer=function(){return!1===this.config.container?document.body:l.isElement(this.config.container)?i.default(this.config.container):i.default(document).find(this.config.container)},e._getAttachment=function(t){return $t[t.toUpperCase()]},e._setListeners=function(){var t=this;this.config.trigger.split(" ").forEach((function(e){if("click"===e)i.default(t.element).on(t.constructor.Event.CLICK,t.config.selector,(function(e){return t.toggle(e)}));else if("manual"!==e){var n="hover"===e?t.constructor.Event.MOUSEENTER:t.constructor.Event.FOCUSIN,o="hover"===e?t.constructor.Event.MOUSELEAVE:t.constructor.Event.FOCUSOUT;i.default(t.element).on(n,t.config.selector,(function(e){return t._enter(e)})).on(o,t.config.selector,(function(e){return t._leave(e)}))}})),this._hideModalHandler=function(){t.element&&t.hide()},i.default(this.element).closest(".modal").on("hide.bs.modal",this._hideModalHandler),this.config.selector?this.config=a({},this.config,{trigger:"manual",selector:""}):this._fixTitle()},e._fixTitle=function(){var t=typeof this.element.getAttribute("data-original-title");(this.element.getAttribute("title")||"string"!==t)&&(this.element.setAttribute("data-original-title",this.element.getAttribute("title")||""),this.element.setAttribute("title",""))},e._enter=function(t,e){var n=this.constructor.DATA_KEY;(e=e||i.default(t.currentTarget).data(n))||(e=new this.constructor(t.currentTarget,this._getDelegateConfig()),i.default(t.currentTarget).data(n,e)),t&&(e._activeTrigger["focusin"===t.type?"focus":"hover"]=!0),i.default(e.getTipElement()).hasClass("show")||"show"===e._hoverState?e._hoverState="show":(clearTimeout(e._timeout),e._hoverState="show",e.config.delay&&e.config.delay.show?e._timeout=setTimeout((function(){"show"===e._hoverState&&e.show()}),e.config.delay.show):e.show())},e._leave=function(t,e){var n=this.constructor.DATA_KEY;(e=e||i.default(t.currentTarget).data(n))||(e=new this.constructor(t.currentTarget,this._getDelegateConfig()),i.default(t.currentTarget).data(n,e)),t&&(e._activeTrigger["focusout"===t.type?"focus":"hover"]=!1),e._isWithActiveTrigger()||(clearTimeout(e._timeout),e._hoverState="out",e.config.delay&&e.config.delay.hide?e._timeout=setTimeout((function(){"out"===e._hoverState&&e.hide()}),e.config.delay.hide):e.hide())},e._isWithActiveTrigger=function(){for(var t in this._activeTrigger)if(this._activeTrigger[t])return!0;return!1},e._getConfig=function(t){var e=i.default(this.element).data();return Object.keys(e).forEach((function(t){-1!==Kt.indexOf(t)&&delete e[t]})),"number"==typeof(t=a({},this.constructor.Default,e,"object"==typeof t&&t?t:{})).delay&&(t.delay={show:t.delay,hide:t.delay}),"number"==typeof t.title&&(t.title=t.title.toString()),"number"==typeof t.content&&(t.content=t.content.toString()),l.typeCheckConfig(Yt,t,this.constructor.DefaultType),t.sanitize&&(t.template=Vt(t.template,t.whiteList,t.sanitizeFn)),t},e._getDelegateConfig=function(){var t={};if(this.config)for(var e in this.config)this.constructor.Default[e]!==this.config[e]&&(t[e]=this.config[e]);return t},e._cleanTipClass=function(){var t=i.default(this.getTipElement()),e=t.attr("class").match(Xt);null!==e&&e.length&&t.removeClass(e.join(""))},e._handlePopperPlacementChange=function(t){this.tip=t.instance.popper,this._cleanTipClass(),this.addAttachmentClass(this._getAttachment(t.placement))},e._fixTransition=function(){var t=this.getTipElement(),e=this.config.animation;null===t.getAttribute("x-placement")&&(i.default(t).removeClass("fade"),this.config.animation=!1,this.hide(),this.show(),this.config.animation=e)},t._jQueryInterface=function(e){return this.each((function(){var n=i.default(this),o=n.data("bs.tooltip"),r="object"==typeof e&&e;if((o||!/dispose|hide/.test(e))&&(o||(o=new t(this,r),n.data("bs.tooltip",o)),"string"==typeof e)){if("undefined"==typeof o[e])throw new TypeError('No method named "'+e+'"');o[e]()}}))},r(t,null,[{key:"VERSION",get:function(){return"4.6.0"}},{key:"Default",get:function(){return Jt}},{key:"NAME",get:function(){return Yt}},{key:"DATA_KEY",get:function(){return"bs.tooltip"}},{key:"Event",get:function(){return Zt}},{key:"EVENT_KEY",get:function(){return".bs.tooltip"}},{key:"DefaultType",get:function(){return Gt}}]),t}();i.default.fn[Yt]=te._jQueryInterface,i.default.fn[Yt].Constructor=te,i.default.fn[Yt].noConflict=function(){return i.default.fn[Yt]=zt,te._jQueryInterface};var ee="popover",ne=i.default.fn[ee],ie=new RegExp("(^|\\s)bs-popover\\S+","g"),oe=a({},te.Default,{placement:"right",trigger:"click",content:"",template:'<div class="popover" role="tooltip"><div class="arrow"></div><h3 class="popover-header"></h3><div class="popover-body"></div></div>'}),re=a({},te.DefaultType,{content:"(string|element|function)"}),ae={HIDE:"hide.bs.popover",HIDDEN:"hidden.bs.popover",SHOW:"show.bs.popover",SHOWN:"shown.bs.popover",INSERTED:"inserted.bs.popover",CLICK:"click.bs.popover",FOCUSIN:"focusin.bs.popover",FOCUSOUT:"focusout.bs.popover",MOUSEENTER:"mouseenter.bs.popover",MOUSELEAVE:"mouseleave.bs.popover"},se=function(t){var e,n;function o(){return t.apply(this,arguments)||this}n=t,(e=o).prototype=Object.create(n.prototype),e.prototype.constructor=e,e.__proto__=n;var a=o.prototype;return a.isWithContent=function(){return this.getTitle()||this._getContent()},a.addAttachmentClass=function(t){i.default(this.getTipElement()).addClass("bs-popover-"+t)},a.getTipElement=function(){return this.tip=this.tip||i.default(this.config.template)[0],this.tip},a.setContent=function(){var t=i.default(this.getTipElement());this.setElementContent(t.find(".popover-header"),this.getTitle());var e=this._getContent();"function"==typeof e&&(e=e.call(this.element)),this.setElementContent(t.find(".popover-body"),e),t.removeClass("fade show")},a._getContent=function(){return this.element.getAttribute("data-content")||this.config.content},a._cleanTipClass=function(){var t=i.default(this.getTipElement()),e=t.attr("class").match(ie);null!==e&&e.length>0&&t.removeClass(e.join(""))},o._jQueryInterface=function(t){return this.each((function(){var e=i.default(this).data("bs.popover"),n="object"==typeof t?t:null;if((e||!/dispose|hide/.test(t))&&(e||(e=new o(this,n),i.default(this).data("bs.popover",e)),"string"==typeof t)){if("undefined"==typeof e[t])throw new TypeError('No method named "'+t+'"');e[t]()}}))},r(o,null,[{key:"VERSION",get:function(){return"4.6.0"}},{key:"Default",get:function(){return oe}},{key:"NAME",get:function(){return ee}},{key:"DATA_KEY",get:function(){return"bs.popover"}},{key:"Event",get:function(){return ae}},{key:"EVENT_KEY",get:function(){return".bs.popover"}},{key:"DefaultType",get:function(){return re}}]),o}(te);i.default.fn[ee]=se._jQueryInterface,i.default.fn[ee].Constructor=se,i.default.fn[ee].noConflict=function(){return i.default.fn[ee]=ne,se._jQueryInterface};var le="scrollspy",ue=i.default.fn[le],fe={offset:10,method:"auto",target:""},de={offset:"number",method:"string",target:"(string|element)"},ce=function(){function t(t,e){var n=this;this._element=t,this._scrollElement="BODY"===t.tagName?window:t,this._config=this._getConfig(e),this._selector=this._config.target+" .nav-link,"+this._config.target+" .list-group-item,"+this._config.target+" .dropdown-item",this._offsets=[],this._targets=[],this._activeTarget=null,this._scrollHeight=0,i.default(this._scrollElement).on("scroll.bs.scrollspy",(function(t){return n._process(t)})),this.refresh(),this._process()}var e=t.prototype;return e.refresh=function(){var t=this,e=this._scrollElement===this._scrollElement.window?"offset":"position",n="auto"===this._config.method?e:this._config.method,o="position"===n?this._getScrollTop():0;this._offsets=[],this._targets=[],this._scrollHeight=this._getScrollHeight(),[].slice.call(document.querySelectorAll(this._selector)).map((function(t){var e,r=l.getSelectorFromElement(t);if(r&&(e=document.querySelector(r)),e){var a=e.getBoundingClientRect();if(a.width||a.height)return[i.default(e)[n]().top+o,r]}return null})).filter((function(t){return t})).sort((function(t,e){return t[0]-e[0]})).forEach((function(e){t._offsets.push(e[0]),t._targets.push(e[1])}))},e.dispose=function(){i.default.removeData(this._element,"bs.scrollspy"),i.default(this._scrollElement).off(".bs.scrollspy"),this._element=null,this._scrollElement=null,this._config=null,this._selector=null,this._offsets=null,this._targets=null,this._activeTarget=null,this._scrollHeight=null},e._getConfig=function(t){if("string"!=typeof(t=a({},fe,"object"==typeof t&&t?t:{})).target&&l.isElement(t.target)){var e=i.default(t.target).attr("id");e||(e=l.getUID(le),i.default(t.target).attr("id",e)),t.target="#"+e}return l.typeCheckConfig(le,t,de),t},e._getScrollTop=function(){return this._scrollElement===window?this._scrollElement.pageYOffset:this._scrollElement.scrollTop},e._getScrollHeight=function(){return this._scrollElement.scrollHeight||Math.max(document.body.scrollHeight,document.documentElement.scrollHeight)},e._getOffsetHeight=function(){return this._scrollElement===window?window.innerHeight:this._scrollElement.getBoundingClientRect().height},e._process=function(){var t=this._getScrollTop()+this._config.offset,e=this._getScrollHeight(),n=this._config.offset+e-this._getOffsetHeight();if(this._scrollHeight!==e&&this.refresh(),t>=n){var i=this._targets[this._targets.length-1];this._activeTarget!==i&&this._activate(i)}else{if(this._activeTarget&&t<this._offsets[0]&&this._offsets[0]>0)return this._activeTarget=null,void this._clear();for(var o=this._offsets.length;o--;){this._activeTarget!==this._targets[o]&&t>=this._offsets[o]&&("undefined"==typeof this._offsets[o+1]||t<this._offsets[o+1])&&this._activate(this._targets[o])}}},e._activate=function(t){this._activeTarget=t,this._clear();var e=this._selector.split(",").map((function(e){return e+'[data-target="'+t+'"],'+e+'[href="'+t+'"]'})),n=i.default([].slice.call(document.querySelectorAll(e.join(","))));n.hasClass("dropdown-item")?(n.closest(".dropdown").find(".dropdown-toggle").addClass("active"),n.addClass("active")):(n.addClass("active"),n.parents(".nav, .list-group").prev(".nav-link, .list-group-item").addClass("active"),n.parents(".nav, .list-group").prev(".nav-item").children(".nav-link").addClass("active")),i.default(this._scrollElement).trigger("activate.bs.scrollspy",{relatedTarget:t})},e._clear=function(){[].slice.call(document.querySelectorAll(this._selector)).filter((function(t){return t.classList.contains("active")})).forEach((function(t){return t.classList.remove("active")}))},t._jQueryInterface=function(e){return this.each((function(){var n=i.default(this).data("bs.scrollspy");if(n||(n=new t(this,"object"==typeof e&&e),i.default(this).data("bs.scrollspy",n)),"string"==typeof e){if("undefined"==typeof n[e])throw new TypeError('No method named "'+e+'"');n[e]()}}))},r(t,null,[{key:"VERSION",get:function(){return"4.6.0"}},{key:"Default",get:function(){return fe}}]),t}();i.default(window).on("load.bs.scrollspy.data-api",(function(){for(var t=[].slice.call(document.querySelectorAll('[data-spy="scroll"]')),e=t.length;e--;){var n=i.default(t[e]);ce._jQueryInterface.call(n,n.data())}})),i.default.fn[le]=ce._jQueryInterface,i.default.fn[le].Constructor=ce,i.default.fn[le].noConflict=function(){return i.default.fn[le]=ue,ce._jQueryInterface};var he=i.default.fn.tab,pe=function(){function t(t){this._element=t}var e=t.prototype;return e.show=function(){var t=this;if(!(this._element.parentNode&&this._element.parentNode.nodeType===Node.ELEMENT_NODE&&i.default(this._element).hasClass("active")||i.default(this._element).hasClass("disabled"))){var e,n,o=i.default(this._element).closest(".nav, .list-group")[0],r=l.getSelectorFromElement(this._element);if(o){var a="UL"===o.nodeName||"OL"===o.nodeName?"> li > .active":".active";n=(n=i.default.makeArray(i.default(o).find(a)))[n.length-1]}var s=i.default.Event("hide.bs.tab",{relatedTarget:this._element}),u=i.default.Event("show.bs.tab",{relatedTarget:n});if(n&&i.default(n).trigger(s),i.default(this._element).trigger(u),!u.isDefaultPrevented()&&!s.isDefaultPrevented()){r&&(e=document.querySelector(r)),this._activate(this._element,o);var f=function(){var e=i.default.Event("hidden.bs.tab",{relatedTarget:t._element}),o=i.default.Event("shown.bs.tab",{relatedTarget:n});i.default(n).trigger(e),i.default(t._element).trigger(o)};e?this._activate(e,e.parentNode,f):f()}}},e.dispose=function(){i.default.removeData(this._element,"bs.tab"),this._element=null},e._activate=function(t,e,n){var o=this,r=(!e||"UL"!==e.nodeName&&"OL"!==e.nodeName?i.default(e).children(".active"):i.default(e).find("> li > .active"))[0],a=n&&r&&i.default(r).hasClass("fade"),s=function(){return o._transitionComplete(t,r,n)};if(r&&a){var u=l.getTransitionDurationFromElement(r);i.default(r).removeClass("show").one(l.TRANSITION_END,s).emulateTransitionEnd(u)}else s()},e._transitionComplete=function(t,e,n){if(e){i.default(e).removeClass("active");var o=i.default(e.parentNode).find("> .dropdown-menu .active")[0];o&&i.default(o).removeClass("active"),"tab"===e.getAttribute("role")&&e.setAttribute("aria-selected",!1)}if(i.default(t).addClass("active"),"tab"===t.getAttribute("role")&&t.setAttribute("aria-selected",!0),l.reflow(t),t.classList.contains("fade")&&t.classList.add("show"),t.parentNode&&i.default(t.parentNode).hasClass("dropdown-menu")){var r=i.default(t).closest(".dropdown")[0];if(r){var a=[].slice.call(r.querySelectorAll(".dropdown-toggle"));i.default(a).addClass("active")}t.setAttribute("aria-expanded",!0)}n&&n()},t._jQueryInterface=function(e){return this.each((function(){var n=i.default(this),o=n.data("bs.tab");if(o||(o=new t(this),n.data("bs.tab",o)),"string"==typeof e){if("undefined"==typeof o[e])throw new TypeError('No method named "'+e+'"');o[e]()}}))},r(t,null,[{key:"VERSION",get:function(){return"4.6.0"}}]),t}();i.default(document).on("click.bs.tab.data-api",'[data-toggle="tab"], [data-toggle="pill"], [data-toggle="list"]',(function(t){t.preventDefault(),pe._jQueryInterface.call(i.default(this),"show")})),i.default.fn.tab=pe._jQueryInterface,i.default.fn.tab.Constructor=pe,i.default.fn.tab.noConflict=function(){return i.default.fn.tab=he,pe._jQueryInterface};var me=i.default.fn.toast,ge={animation:"boolean",autohide:"boolean",delay:"number"},ve={animation:!0,autohide:!0,delay:500},_e=function(){function t(t,e){this._element=t,this._config=this._getConfig(e),this._timeout=null,this._setListeners()}var e=t.prototype;return e.show=function(){var t=this,e=i.default.Event("show.bs.toast");if(i.default(this._element).trigger(e),!e.isDefaultPrevented()){this._clearTimeout(),this._config.animation&&this._element.classList.add("fade");var n=function(){t._element.classList.remove("showing"),t._element.classList.add("show"),i.default(t._element).trigger("shown.bs.toast"),t._config.autohide&&(t._timeout=setTimeout((function(){t.hide()}),t._config.delay))};if(this._element.classList.remove("hide"),l.reflow(this._element),this._element.classList.add("showing"),this._config.animation){var o=l.getTransitionDurationFromElement(this._element);i.default(this._element).one(l.TRANSITION_END,n).emulateTransitionEnd(o)}else n()}},e.hide=function(){if(this._element.classList.contains("show")){var t=i.default.Event("hide.bs.toast");i.default(this._element).trigger(t),t.isDefaultPrevented()||this._close()}},e.dispose=function(){this._clearTimeout(),this._element.classList.contains("show")&&this._element.classList.remove("show"),i.default(this._element).off("click.dismiss.bs.toast"),i.default.removeData(this._element,"bs.toast"),this._element=null,this._config=null},e._getConfig=function(t){return t=a({},ve,i.default(this._element).data(),"object"==typeof t&&t?t:{}),l.typeCheckConfig("toast",t,this.constructor.DefaultType),t},e._setListeners=function(){var t=this;i.default(this._element).on("click.dismiss.bs.toast",'[data-dismiss="toast"]',(function(){return t.hide()}))},e._close=function(){var t=this,e=function(){t._element.classList.add("hide"),i.default(t._element).trigger("hidden.bs.toast")};if(this._element.classList.remove("show"),this._config.animation){var n=l.getTransitionDurationFromElement(this._element);i.default(this._element).one(l.TRANSITION_END,e).emulateTransitionEnd(n)}else e()},e._clearTimeout=function(){clearTimeout(this._timeout),this._timeout=null},t._jQueryInterface=function(e){return this.each((function(){var n=i.default(this),o=n.data("bs.toast");if(o||(o=new t(this,"object"==typeof e&&e),n.data("bs.toast",o)),"string"==typeof e){if("undefined"==typeof o[e])throw new TypeError('No method named "'+e+'"');o[e](this)}}))},r(t,null,[{key:"VERSION",get:function(){return"4.6.0"}},{key:"DefaultType",get:function(){return ge}},{key:"Default",get:function(){return ve}}]),t}();i.default.fn.toast=_e._jQueryInterface,i.default.fn.toast.Constructor=_e,i.default.fn.toast.noConflict=function(){return i.default.fn.toast=me,_e._jQueryInterface},t.Alert=d,t.Button=h,t.Carousel=y,t.Collapse=S,t.Dropdown=Ft,t.Modal=qt,t.Popover=se,t.Scrollspy=ce,t.Tab=pe,t.Toast=_e,t.Tooltip=te,t.Util=l,Object.defineProperty(t,"__esModule",{value:!0})}));
//# sourceMappingURL=bootstrap.bundle.min.js.map | PypiClean |
/PlexTraktSync-0.27.2-py3-none-any.whl/plextraktsync/plex/PlexRatings.py | from __future__ import annotations
from typing import TYPE_CHECKING
from plextraktsync.decorators.flatten import flatten_dict
from plextraktsync.decorators.memoize import memoize
if TYPE_CHECKING:
from plextraktsync.plex.PlexApi import PlexApi
from plextraktsync.plex.PlexLibraryItem import PlexLibraryItem
from plextraktsync.plex.PlexLibrarySection import PlexLibrarySection
class PlexRatings:
plex: PlexApi
def __init__(self, plex: PlexApi):
self.plex = plex
def get(self, m: PlexLibraryItem, show_id: int = None):
section_id = m.item.librarySectionID
# item is from section that is in excluded-libraries
# this can happen when doing "inspect"
if section_id not in self.plex.library_sections:
return None
media_type = m.media_type
section = self.plex.library_sections[section_id]
ratings = self.ratings(section, media_type)
if media_type in ["movies", "shows"]:
# For movies and shows, just return from the dict
user_rating = (
ratings[m.item.ratingKey] if m.item.ratingKey in ratings else None
)
elif media_type == "episodes":
# For episodes the ratings is just (show_id, show_rating) tuples
# if show id is not listed, return none, otherwise fetch from item itself
if show_id not in ratings:
return None
user_rating = m.item.userRating
else:
raise RuntimeError(f"Unsupported media type: {media_type}")
if user_rating is None:
return None
return int(user_rating)
@staticmethod
@memoize
@flatten_dict
def ratings(section: PlexLibrarySection, media_type: str):
key = {
"movies": "userRating",
"episodes": "episode.userRating",
"shows": "show.userRating",
}[media_type]
filters = {
"and": [
{f"{key}>>": -1},
]
}
for item in section.search(filters=filters):
yield item.ratingKey, item.userRating | PypiClean |
/retro_data_structures-0.23.0-py3-none-any.whl/retro_data_structures/properties/corruption/archetypes/PhysicsDebrisPropertiesOrientationEnum.py | import dataclasses
import struct
import typing
from retro_data_structures.game_check import Game
from retro_data_structures.properties.base_property import BaseProperty
import retro_data_structures.enums.corruption as enums
@dataclasses.dataclass()
class PhysicsDebrisPropertiesOrientationEnum(BaseProperty):
orientation: enums.UnknownEnum1 = dataclasses.field(default=enums.UnknownEnum1.Unknown1)
@classmethod
def game(cls) -> Game:
return Game.CORRUPTION
@classmethod
def from_stream(cls, data: typing.BinaryIO, size: typing.Optional[int] = None, default_override: typing.Optional[dict] = None):
property_count = struct.unpack(">H", data.read(2))[0]
if default_override is None and (result := _fast_decode(data, property_count)) is not None:
return result
present_fields = default_override or {}
for _ in range(property_count):
property_id, property_size = struct.unpack(">LH", data.read(6))
start = data.tell()
try:
property_name, decoder = _property_decoder[property_id]
present_fields[property_name] = decoder(data, property_size)
except KeyError:
raise RuntimeError(f"Unknown property: 0x{property_id:08x}")
assert data.tell() - start == property_size
return cls(**present_fields)
def to_stream(self, data: typing.BinaryIO, default_override: typing.Optional[dict] = None):
default_override = default_override or {}
data.write(b'\x00\x01') # 1 properties
data.write(b'\xf4\xbff7') # 0xf4bf6637
data.write(b'\x00\x04') # size
self.orientation.to_stream(data)
@classmethod
def from_json(cls, data: dict):
return cls(
orientation=enums.UnknownEnum1.from_json(data['orientation']),
)
def to_json(self) -> dict:
return {
'orientation': self.orientation.to_json(),
}
_FAST_FORMAT = None
_FAST_IDS = (0xf4bf6637)
def _fast_decode(data: typing.BinaryIO, property_count: int) -> typing.Optional[PhysicsDebrisPropertiesOrientationEnum]:
if property_count != 1:
return None
global _FAST_FORMAT
if _FAST_FORMAT is None:
_FAST_FORMAT = struct.Struct('>LHL')
before = data.tell()
dec = _FAST_FORMAT.unpack(data.read(10))
if (dec[0]) != _FAST_IDS:
data.seek(before)
return None
return PhysicsDebrisPropertiesOrientationEnum(
enums.UnknownEnum1(dec[2]),
)
def _decode_orientation(data: typing.BinaryIO, property_size: int):
return enums.UnknownEnum1.from_stream(data)
_property_decoder: typing.Dict[int, typing.Tuple[str, typing.Callable[[typing.BinaryIO, int], typing.Any]]] = {
0xf4bf6637: ('orientation', _decode_orientation),
} | PypiClean |
/discordpack-2.0.0.tar.gz/discordpack-2.0.0/discord/http.py | import asyncio
import json
import logging
import sys
from urllib.parse import quote as _uriquote
import weakref
import aiohttp
from .errors import HTTPException, Forbidden, NotFound, LoginFailure, DiscordServerError, GatewayNotFound
from .gateway import DiscordClientWebSocketResponse
from . import __version__, utils
log = logging.getLogger(__name__)
async def json_or_text(response):
text = await response.text(encoding='utf-8')
try:
if response.headers['content-type'] == 'application/json':
return json.loads(text)
except KeyError:
# Thanks Cloudflare
pass
return text
class Route:
BASE = 'https://discord.com/api/v7'
def __init__(self, method, path, **parameters):
self.path = path
self.method = method
url = (self.BASE + self.path)
if parameters:
self.url = url.format(**{k: _uriquote(v) if isinstance(v, str) else v for k, v in parameters.items()})
else:
self.url = url
# major parameters:
self.channel_id = parameters.get('channel_id')
self.guild_id = parameters.get('guild_id')
@property
def bucket(self):
# the bucket is just method + path w/ major parameters
return '{0.channel_id}:{0.guild_id}:{0.path}'.format(self)
class MaybeUnlock:
def __init__(self, lock):
self.lock = lock
self._unlock = True
def __enter__(self):
return self
def defer(self):
self._unlock = False
def __exit__(self, type, value, traceback):
if self._unlock:
self.lock.release()
# For some reason, the Discord voice websocket expects this header to be
# completely lowercase while aiohttp respects spec and does it as case-insensitive
aiohttp.hdrs.WEBSOCKET = 'websocket'
class HTTPClient:
"""Represents an HTTP client sending HTTP requests to the Discord API."""
SUCCESS_LOG = '{method} {url} has received {text}'
REQUEST_LOG = '{method} {url} with {json} has returned {status}'
def __init__(self, connector=None, *, proxy=None, proxy_auth=None, loop=None, unsync_clock=True):
self.loop = asyncio.get_event_loop() if loop is None else loop
self.connector = connector
self.__session = None # filled in static_login
self._locks = weakref.WeakValueDictionary()
self._global_over = asyncio.Event()
self._global_over.set()
self.token = None
self.bot_token = False
self.proxy = proxy
self.proxy_auth = proxy_auth
self.use_clock = not unsync_clock
user_agent = 'DiscordBot (https://github.com/Rapptz/discord.py {0}) Python/{1[0]}.{1[1]} aiohttp/{2}'
self.user_agent = user_agent.format(__version__, sys.version_info, aiohttp.__version__)
def recreate(self):
if self.__session.closed:
self.__session = aiohttp.ClientSession(connector=self.connector, ws_response_class=DiscordClientWebSocketResponse)
async def ws_connect(self, url, *, compress=0):
kwargs = {
'proxy_auth': self.proxy_auth,
'proxy': self.proxy,
'max_msg_size': 0,
'timeout': 30.0,
'autoclose': False,
'headers': {
'User-Agent': self.user_agent,
},
'compress': compress
}
return await self.__session.ws_connect(url, **kwargs)
async def request(self, route, *, files=None, form=None, **kwargs):
bucket = route.bucket
method = route.method
url = route.url
lock = self._locks.get(bucket)
if lock is None:
lock = asyncio.Lock()
if bucket is not None:
self._locks[bucket] = lock
# header creation
headers = {
'User-Agent': self.user_agent,
'X-Ratelimit-Precision': 'millisecond',
}
if self.token is not None:
headers['Authorization'] = 'Bot ' + self.token if self.bot_token else self.token
# some checking if it's a JSON request
if 'json' in kwargs:
headers['Content-Type'] = 'application/json'
kwargs['data'] = utils.to_json(kwargs.pop('json'))
try:
reason = kwargs.pop('reason')
except KeyError:
pass
else:
if reason:
headers['X-Audit-Log-Reason'] = _uriquote(reason, safe='/ ')
kwargs['headers'] = headers
# Proxy support
if self.proxy is not None:
kwargs['proxy'] = self.proxy
if self.proxy_auth is not None:
kwargs['proxy_auth'] = self.proxy_auth
if not self._global_over.is_set():
# wait until the global lock is complete
await self._global_over.wait()
await lock.acquire()
with MaybeUnlock(lock) as maybe_lock:
for tries in range(5):
if files:
for f in files:
f.reset(seek=tries)
if form:
form_data = aiohttp.FormData()
for params in form:
form_data.add_field(**params)
kwargs['data'] = form_data
try:
async with self.__session.request(method, url, **kwargs) as r:
log.debug('%s %s with %s has returned %s', method, url, kwargs.get('data'), r.status)
# even errors have text involved in them so this is safe to call
data = await json_or_text(r)
# check if we have rate limit header information
remaining = r.headers.get('X-Ratelimit-Remaining')
if remaining == '0' and r.status != 429:
# we've depleted our current bucket
delta = utils._parse_ratelimit_header(r, use_clock=self.use_clock)
log.debug('A rate limit bucket has been exhausted (bucket: %s, retry: %s).', bucket, delta)
maybe_lock.defer()
self.loop.call_later(delta, lock.release)
# the request was successful so just return the text/json
if 300 > r.status >= 200:
log.debug('%s %s has received %s', method, url, data)
return data
# we are being rate limited
if r.status == 429:
if not r.headers.get('Via'):
# Banned by Cloudflare more than likely.
raise HTTPException(r, data)
fmt = 'We are being rate limited. Retrying in %.2f seconds. Handled under the bucket "%s"'
# sleep a bit
retry_after = data['retry_after'] / 1000.0
log.warning(fmt, retry_after, bucket)
# check if it's a global rate limit
is_global = data.get('global', False)
if is_global:
log.warning('Global rate limit has been hit. Retrying in %.2f seconds.', retry_after)
self._global_over.clear()
await asyncio.sleep(retry_after)
log.debug('Done sleeping for the rate limit. Retrying...')
# release the global lock now that the
# global rate limit has passed
if is_global:
self._global_over.set()
log.debug('Global rate limit is now over.')
continue
# we've received a 500 or 502, unconditional retry
if r.status in {500, 502}:
await asyncio.sleep(1 + tries * 2)
continue
# the usual error cases
if r.status == 403:
raise Forbidden(r, data)
elif r.status == 404:
raise NotFound(r, data)
elif r.status == 503:
raise DiscordServerError(r, data)
else:
raise HTTPException(r, data)
# This is handling exceptions from the request
except OSError as e:
# Connection reset by peer
if tries < 4 and e.errno in (54, 10054):
continue
raise
# We've run out of retries, raise.
if r.status >= 500:
raise DiscordServerError(r, data)
raise HTTPException(r, data)
async def get_from_cdn(self, url):
async with self.__session.get(url) as resp:
if resp.status == 200:
return await resp.read()
elif resp.status == 404:
raise NotFound(resp, 'asset not found')
elif resp.status == 403:
raise Forbidden(resp, 'cannot retrieve asset')
else:
raise HTTPException(resp, 'failed to get asset')
# state management
async def close(self):
if self.__session:
await self.__session.close()
def _token(self, token, *, bot=True):
self.token = token
self.bot_token = bot
self._ack_token = None
# login management
async def static_login(self, token, *, bot):
# Necessary to get aiohttp to stop complaining about session creation
self.__session = aiohttp.ClientSession(connector=self.connector, ws_response_class=DiscordClientWebSocketResponse)
old_token, old_bot = self.token, self.bot_token
self._token(token, bot=bot)
try:
data = await self.request(Route('GET', '/users/@me'))
except HTTPException as exc:
self._token(old_token, bot=old_bot)
if exc.response.status == 401:
raise LoginFailure('Improper token has been passed.') from exc
raise
return data
def logout(self):
return self.request(Route('POST', '/auth/logout'))
# Group functionality
def start_group(self, user_id, recipients):
payload = {
'recipients': recipients
}
return self.request(Route('POST', '/users/{user_id}/channels', user_id=user_id), json=payload)
def leave_group(self, channel_id):
return self.request(Route('DELETE', '/channels/{channel_id}', channel_id=channel_id))
def add_group_recipient(self, channel_id, user_id):
r = Route('PUT', '/channels/{channel_id}/recipients/{user_id}', channel_id=channel_id, user_id=user_id)
return self.request(r)
def remove_group_recipient(self, channel_id, user_id):
r = Route('DELETE', '/channels/{channel_id}/recipients/{user_id}', channel_id=channel_id, user_id=user_id)
return self.request(r)
def edit_group(self, channel_id, **options):
valid_keys = ('name', 'icon')
payload = {
k: v for k, v in options.items() if k in valid_keys
}
return self.request(Route('PATCH', '/channels/{channel_id}', channel_id=channel_id), json=payload)
def convert_group(self, channel_id):
return self.request(Route('POST', '/channels/{channel_id}/convert', channel_id=channel_id))
# Message management
def start_private_message(self, user_id):
payload = {
'recipient_id': user_id
}
return self.request(Route('POST', '/users/@me/channels'), json=payload)
def send_message(self, channel_id, content, *, tts=False, embed=None, nonce=None, allowed_mentions=None, message_reference=None):
r = Route('POST', '/channels/{channel_id}/messages', channel_id=channel_id)
payload = {}
if content:
payload['content'] = content
if tts:
payload['tts'] = True
if embed:
payload['embed'] = embed
if nonce:
payload['nonce'] = nonce
if allowed_mentions:
payload['allowed_mentions'] = allowed_mentions
if message_reference:
payload['message_reference'] = message_reference
return self.request(r, json=payload)
def send_typing(self, channel_id):
return self.request(Route('POST', '/channels/{channel_id}/typing', channel_id=channel_id))
def send_files(self, channel_id, *, files, content=None, tts=False, embed=None, nonce=None, allowed_mentions=None, message_reference=None):
r = Route('POST', '/channels/{channel_id}/messages', channel_id=channel_id)
form = []
payload = {'tts': tts}
if content:
payload['content'] = content
if embed:
payload['embed'] = embed
if nonce:
payload['nonce'] = nonce
if allowed_mentions:
payload['allowed_mentions'] = allowed_mentions
if message_reference:
payload['message_reference'] = message_reference
form.append({'name': 'payload_json', 'value': utils.to_json(payload)})
if len(files) == 1:
file = files[0]
form.append({
'name': 'file',
'value': file.fp,
'filename': file.filename,
'content_type': 'application/octet-stream'
})
else:
for index, file in enumerate(files):
form.append({
'name': 'file%s' % index,
'value': file.fp,
'filename': file.filename,
'content_type': 'application/octet-stream'
})
return self.request(r, form=form, files=files)
async def ack_message(self, channel_id, message_id):
r = Route('POST', '/channels/{channel_id}/messages/{message_id}/ack', channel_id=channel_id, message_id=message_id)
data = await self.request(r, json={'token': self._ack_token})
self._ack_token = data['token']
def ack_guild(self, guild_id):
return self.request(Route('POST', '/guilds/{guild_id}/ack', guild_id=guild_id))
def delete_message(self, channel_id, message_id, *, reason=None):
r = Route('DELETE', '/channels/{channel_id}/messages/{message_id}', channel_id=channel_id, message_id=message_id)
return self.request(r, reason=reason)
def delete_messages(self, channel_id, message_ids, *, reason=None):
r = Route('POST', '/channels/{channel_id}/messages/bulk_delete', channel_id=channel_id)
payload = {
'messages': message_ids
}
return self.request(r, json=payload, reason=reason)
def edit_message(self, channel_id, message_id, **fields):
r = Route('PATCH', '/channels/{channel_id}/messages/{message_id}', channel_id=channel_id, message_id=message_id)
return self.request(r, json=fields)
def add_reaction(self, channel_id, message_id, emoji):
r = Route('PUT', '/channels/{channel_id}/messages/{message_id}/reactions/{emoji}/@me',
channel_id=channel_id, message_id=message_id, emoji=emoji)
return self.request(r)
def remove_reaction(self, channel_id, message_id, emoji, member_id):
r = Route('DELETE', '/channels/{channel_id}/messages/{message_id}/reactions/{emoji}/{member_id}',
channel_id=channel_id, message_id=message_id, member_id=member_id, emoji=emoji)
return self.request(r)
def remove_own_reaction(self, channel_id, message_id, emoji):
r = Route('DELETE', '/channels/{channel_id}/messages/{message_id}/reactions/{emoji}/@me',
channel_id=channel_id, message_id=message_id, emoji=emoji)
return self.request(r)
def get_reaction_users(self, channel_id, message_id, emoji, limit, after=None):
r = Route('GET', '/channels/{channel_id}/messages/{message_id}/reactions/{emoji}',
channel_id=channel_id, message_id=message_id, emoji=emoji)
params = {'limit': limit}
if after:
params['after'] = after
return self.request(r, params=params)
def clear_reactions(self, channel_id, message_id):
r = Route('DELETE', '/channels/{channel_id}/messages/{message_id}/reactions',
channel_id=channel_id, message_id=message_id)
return self.request(r)
def clear_single_reaction(self, channel_id, message_id, emoji):
r = Route('DELETE', '/channels/{channel_id}/messages/{message_id}/reactions/{emoji}',
channel_id=channel_id, message_id=message_id, emoji=emoji)
return self.request(r)
def get_message(self, channel_id, message_id):
r = Route('GET', '/channels/{channel_id}/messages/{message_id}', channel_id=channel_id, message_id=message_id)
return self.request(r)
def get_channel(self, channel_id):
r = Route('GET', '/channels/{channel_id}', channel_id=channel_id)
return self.request(r)
def logs_from(self, channel_id, limit, before=None, after=None, around=None):
params = {
'limit': limit
}
if before is not None:
params['before'] = before
if after is not None:
params['after'] = after
if around is not None:
params['around'] = around
return self.request(Route('GET', '/channels/{channel_id}/messages', channel_id=channel_id), params=params)
def publish_message(self, channel_id, message_id):
return self.request(Route('POST', '/channels/{channel_id}/messages/{message_id}/crosspost',
channel_id=channel_id, message_id=message_id))
def pin_message(self, channel_id, message_id, reason=None):
return self.request(Route('PUT', '/channels/{channel_id}/pins/{message_id}',
channel_id=channel_id, message_id=message_id), reason=reason)
def unpin_message(self, channel_id, message_id, reason=None):
return self.request(Route('DELETE', '/channels/{channel_id}/pins/{message_id}',
channel_id=channel_id, message_id=message_id), reason=reason)
def pins_from(self, channel_id):
return self.request(Route('GET', '/channels/{channel_id}/pins', channel_id=channel_id))
# Member management
def kick(self, user_id, guild_id, reason=None):
r = Route('DELETE', '/guilds/{guild_id}/members/{user_id}', guild_id=guild_id, user_id=user_id)
if reason:
# thanks aiohttp
r.url = '{0.url}?reason={1}'.format(r, _uriquote(reason))
return self.request(r)
def ban(self, user_id, guild_id, delete_message_days=1, reason=None):
r = Route('PUT', '/guilds/{guild_id}/bans/{user_id}', guild_id=guild_id, user_id=user_id)
params = {
'delete_message_days': delete_message_days,
}
if reason:
# thanks aiohttp
r.url = '{0.url}?reason={1}'.format(r, _uriquote(reason))
return self.request(r, params=params)
def unban(self, user_id, guild_id, *, reason=None):
r = Route('DELETE', '/guilds/{guild_id}/bans/{user_id}', guild_id=guild_id, user_id=user_id)
return self.request(r, reason=reason)
def guild_voice_state(self, user_id, guild_id, *, mute=None, deafen=None, reason=None):
r = Route('PATCH', '/guilds/{guild_id}/members/{user_id}', guild_id=guild_id, user_id=user_id)
payload = {}
if mute is not None:
payload['mute'] = mute
if deafen is not None:
payload['deaf'] = deafen
return self.request(r, json=payload, reason=reason)
def edit_profile(self, password, username, avatar, **fields):
payload = {
'password': password,
'username': username,
'avatar': avatar
}
if 'email' in fields:
payload['email'] = fields['email']
if 'new_password' in fields:
payload['new_password'] = fields['new_password']
return self.request(Route('PATCH', '/users/@me'), json=payload)
def change_my_nickname(self, guild_id, nickname, *, reason=None):
r = Route('PATCH', '/guilds/{guild_id}/members/@me/nick', guild_id=guild_id)
payload = {
'nick': nickname
}
return self.request(r, json=payload, reason=reason)
def change_nickname(self, guild_id, user_id, nickname, *, reason=None):
r = Route('PATCH', '/guilds/{guild_id}/members/{user_id}', guild_id=guild_id, user_id=user_id)
payload = {
'nick': nickname
}
return self.request(r, json=payload, reason=reason)
def edit_my_voice_state(self, guild_id, payload):
r = Route('PATCH', '/guilds/{guild_id}/voice-states/@me', guild_id=guild_id)
return self.request(r, json=payload)
def edit_voice_state(self, guild_id, user_id, payload):
r = Route('PATCH', '/guilds/{guild_id}/voice-states/{user_id}', guild_id=guild_id, user_id=user_id)
return self.request(r, json=payload)
def edit_member(self, guild_id, user_id, *, reason=None, **fields):
r = Route('PATCH', '/guilds/{guild_id}/members/{user_id}', guild_id=guild_id, user_id=user_id)
return self.request(r, json=fields, reason=reason)
# Channel management
def edit_channel(self, channel_id, *, reason=None, **options):
r = Route('PATCH', '/channels/{channel_id}', channel_id=channel_id)
valid_keys = ('name', 'parent_id', 'topic', 'bitrate', 'nsfw',
'user_limit', 'position', 'permission_overwrites', 'rate_limit_per_user',
'type', 'rtc_region')
payload = {
k: v for k, v in options.items() if k in valid_keys
}
return self.request(r, reason=reason, json=payload)
def bulk_channel_update(self, guild_id, data, *, reason=None):
r = Route('PATCH', '/guilds/{guild_id}/channels', guild_id=guild_id)
return self.request(r, json=data, reason=reason)
def create_channel(self, guild_id, channel_type, *, reason=None, **options):
payload = {
'type': channel_type
}
valid_keys = ('name', 'parent_id', 'topic', 'bitrate', 'nsfw',
'user_limit', 'position', 'permission_overwrites', 'rate_limit_per_user',
'rtc_region')
payload.update({
k: v for k, v in options.items() if k in valid_keys and v is not None
})
return self.request(Route('POST', '/guilds/{guild_id}/channels', guild_id=guild_id), json=payload, reason=reason)
def delete_channel(self, channel_id, *, reason=None):
return self.request(Route('DELETE', '/channels/{channel_id}', channel_id=channel_id), reason=reason)
# Webhook management
def create_webhook(self, channel_id, *, name, avatar=None, reason=None):
payload = {
'name': name
}
if avatar is not None:
payload['avatar'] = avatar
r = Route('POST', '/channels/{channel_id}/webhooks', channel_id=channel_id)
return self.request(r, json=payload, reason=reason)
def channel_webhooks(self, channel_id):
return self.request(Route('GET', '/channels/{channel_id}/webhooks', channel_id=channel_id))
def guild_webhooks(self, guild_id):
return self.request(Route('GET', '/guilds/{guild_id}/webhooks', guild_id=guild_id))
def get_webhook(self, webhook_id):
return self.request(Route('GET', '/webhooks/{webhook_id}', webhook_id=webhook_id))
def follow_webhook(self, channel_id, webhook_channel_id, reason=None):
payload = {
'webhook_channel_id': str(webhook_channel_id)
}
return self.request(Route('POST', '/channels/{channel_id}/followers', channel_id=channel_id), json=payload, reason=reason)
# Guild management
def get_guilds(self, limit, before=None, after=None):
params = {
'limit': limit
}
if before:
params['before'] = before
if after:
params['after'] = after
return self.request(Route('GET', '/users/@me/guilds'), params=params)
def leave_guild(self, guild_id):
return self.request(Route('DELETE', '/users/@me/guilds/{guild_id}', guild_id=guild_id))
def get_guild(self, guild_id):
return self.request(Route('GET', '/guilds/{guild_id}', guild_id=guild_id))
def delete_guild(self, guild_id):
return self.request(Route('DELETE', '/guilds/{guild_id}', guild_id=guild_id))
def create_guild(self, name, region, icon):
payload = {
'name': name,
'icon': icon,
'region': region
}
return self.request(Route('POST', '/guilds'), json=payload)
def edit_guild(self, guild_id, *, reason=None, **fields):
valid_keys = ('name', 'region', 'icon', 'afk_timeout', 'owner_id',
'afk_channel_id', 'splash', 'verification_level',
'system_channel_id', 'default_message_notifications',
'description', 'explicit_content_filter', 'banner',
'system_channel_flags', 'rules_channel_id',
'public_updates_channel_id', 'preferred_locale',)
payload = {
k: v for k, v in fields.items() if k in valid_keys
}
return self.request(Route('PATCH', '/guilds/{guild_id}', guild_id=guild_id), json=payload, reason=reason)
def get_template(self, code):
return self.request(Route('GET', '/guilds/templates/{code}', code=code))
def guild_templates(self, guild_id):
return self.request(Route('GET', '/guilds/{guild_id}/templates', guild_id=guild_id))
def create_template(self, guild_id, payload):
return self.request(Route('POST', '/guilds/{guild_id}/templates', guild_id=guild_id), json=payload)
def sync_template(self, guild_id, code):
return self.request(Route('PUT', '/guilds/{guild_id}/templates/{code}', guild_id=guild_id, code=code))
def edit_template(self, guild_id, code, payload):
valid_keys = (
'name',
'description',
)
payload = {
k: v for k, v in payload.items() if k in valid_keys
}
return self.request(Route('PATCH', '/guilds/{guild_id}/templates/{code}', guild_id=guild_id, code=code), json=payload)
def delete_template(self, guild_id, code):
return self.request(Route('DELETE', '/guilds/{guild_id}/templates/{code}', guild_id=guild_id, code=code))
def create_from_template(self, code, name, region, icon):
payload = {
'name': name,
'icon': icon,
'region': region
}
return self.request(Route('POST', '/guilds/templates/{code}', code=code), json=payload)
def get_bans(self, guild_id):
return self.request(Route('GET', '/guilds/{guild_id}/bans', guild_id=guild_id))
def get_ban(self, user_id, guild_id):
return self.request(Route('GET', '/guilds/{guild_id}/bans/{user_id}', guild_id=guild_id, user_id=user_id))
def get_vanity_code(self, guild_id):
return self.request(Route('GET', '/guilds/{guild_id}/vanity-url', guild_id=guild_id))
def change_vanity_code(self, guild_id, code, *, reason=None):
payload = {'code': code}
return self.request(Route('PATCH', '/guilds/{guild_id}/vanity-url', guild_id=guild_id), json=payload, reason=reason)
def get_all_guild_channels(self, guild_id):
return self.request(Route('GET', '/guilds/{guild_id}/channels', guild_id=guild_id))
def get_members(self, guild_id, limit, after):
params = {
'limit': limit,
}
if after:
params['after'] = after
r = Route('GET', '/guilds/{guild_id}/members', guild_id=guild_id)
return self.request(r, params=params)
def get_member(self, guild_id, member_id):
return self.request(Route('GET', '/guilds/{guild_id}/members/{member_id}', guild_id=guild_id, member_id=member_id))
def prune_members(self, guild_id, days, compute_prune_count, roles, *, reason=None):
payload = {
'days': days,
'compute_prune_count': 'true' if compute_prune_count else 'false'
}
if roles:
payload['include_roles'] = ', '.join(roles)
return self.request(Route('POST', '/guilds/{guild_id}/prune', guild_id=guild_id), json=payload, reason=reason)
def estimate_pruned_members(self, guild_id, days, roles):
params = {
'days': days
}
if roles:
params['include_roles'] = ', '.join(roles)
return self.request(Route('GET', '/guilds/{guild_id}/prune', guild_id=guild_id), params=params)
def get_all_custom_emojis(self, guild_id):
return self.request(Route('GET', '/guilds/{guild_id}/emojis', guild_id=guild_id))
def get_custom_emoji(self, guild_id, emoji_id):
return self.request(Route('GET', '/guilds/{guild_id}/emojis/{emoji_id}', guild_id=guild_id, emoji_id=emoji_id))
def create_custom_emoji(self, guild_id, name, image, *, roles=None, reason=None):
payload = {
'name': name,
'image': image,
'roles': roles or []
}
r = Route('POST', '/guilds/{guild_id}/emojis', guild_id=guild_id)
return self.request(r, json=payload, reason=reason)
def delete_custom_emoji(self, guild_id, emoji_id, *, reason=None):
r = Route('DELETE', '/guilds/{guild_id}/emojis/{emoji_id}', guild_id=guild_id, emoji_id=emoji_id)
return self.request(r, reason=reason)
def edit_custom_emoji(self, guild_id, emoji_id, *, name, roles=None, reason=None):
payload = {
'name': name,
'roles': roles or []
}
r = Route('PATCH', '/guilds/{guild_id}/emojis/{emoji_id}', guild_id=guild_id, emoji_id=emoji_id)
return self.request(r, json=payload, reason=reason)
def get_all_integrations(self, guild_id):
r = Route('GET', '/guilds/{guild_id}/integrations', guild_id=guild_id)
return self.request(r)
def create_integration(self, guild_id, type, id):
payload = {
'type': type,
'id': id
}
r = Route('POST', '/guilds/{guild_id}/integrations', guild_id=guild_id)
return self.request(r, json=payload)
def edit_integration(self, guild_id, integration_id, **payload):
r = Route('PATCH', '/guilds/{guild_id}/integrations/{integration_id}', guild_id=guild_id,
integration_id=integration_id)
return self.request(r, json=payload)
def sync_integration(self, guild_id, integration_id):
r = Route('POST', '/guilds/{guild_id}/integrations/{integration_id}/sync', guild_id=guild_id,
integration_id=integration_id)
return self.request(r)
def delete_integration(self, guild_id, integration_id):
r = Route('DELETE', '/guilds/{guild_id}/integrations/{integration_id}', guild_id=guild_id,
integration_id=integration_id)
return self.request(r)
def get_audit_logs(self, guild_id, limit=100, before=None, after=None, user_id=None, action_type=None):
params = {'limit': limit}
if before:
params['before'] = before
if after:
params['after'] = after
if user_id:
params['user_id'] = user_id
if action_type:
params['action_type'] = action_type
r = Route('GET', '/guilds/{guild_id}/audit-logs', guild_id=guild_id)
return self.request(r, params=params)
def get_widget(self, guild_id):
return self.request(Route('GET', '/guilds/{guild_id}/widget.json', guild_id=guild_id))
# Invite management
def create_invite(self, channel_id, *, reason=None, **options):
r = Route('POST', '/channels/{channel_id}/invites', channel_id=channel_id)
payload = {
'max_age': options.get('max_age', 0),
'max_uses': options.get('max_uses', 0),
'temporary': options.get('temporary', False),
'unique': options.get('unique', True)
}
return self.request(r, reason=reason, json=payload)
def get_invite(self, invite_id, *, with_counts=True):
params = {
'with_counts': int(with_counts)
}
return self.request(Route('GET', '/invites/{invite_id}', invite_id=invite_id), params=params)
def invites_from(self, guild_id):
return self.request(Route('GET', '/guilds/{guild_id}/invites', guild_id=guild_id))
def invites_from_channel(self, channel_id):
return self.request(Route('GET', '/channels/{channel_id}/invites', channel_id=channel_id))
def delete_invite(self, invite_id, *, reason=None):
return self.request(Route('DELETE', '/invites/{invite_id}', invite_id=invite_id), reason=reason)
# Role management
def get_roles(self, guild_id):
return self.request(Route('GET', '/guilds/{guild_id}/roles', guild_id=guild_id))
def edit_role(self, guild_id, role_id, *, reason=None, **fields):
r = Route('PATCH', '/guilds/{guild_id}/roles/{role_id}', guild_id=guild_id, role_id=role_id)
valid_keys = ('name', 'permissions', 'color', 'hoist', 'mentionable')
payload = {
k: v for k, v in fields.items() if k in valid_keys
}
return self.request(r, json=payload, reason=reason)
def delete_role(self, guild_id, role_id, *, reason=None):
r = Route('DELETE', '/guilds/{guild_id}/roles/{role_id}', guild_id=guild_id, role_id=role_id)
return self.request(r, reason=reason)
def replace_roles(self, user_id, guild_id, role_ids, *, reason=None):
return self.edit_member(guild_id=guild_id, user_id=user_id, roles=role_ids, reason=reason)
def create_role(self, guild_id, *, reason=None, **fields):
r = Route('POST', '/guilds/{guild_id}/roles', guild_id=guild_id)
return self.request(r, json=fields, reason=reason)
def move_role_position(self, guild_id, positions, *, reason=None):
r = Route('PATCH', '/guilds/{guild_id}/roles', guild_id=guild_id)
return self.request(r, json=positions, reason=reason)
def add_role(self, guild_id, user_id, role_id, *, reason=None):
r = Route('PUT', '/guilds/{guild_id}/members/{user_id}/roles/{role_id}',
guild_id=guild_id, user_id=user_id, role_id=role_id)
return self.request(r, reason=reason)
def remove_role(self, guild_id, user_id, role_id, *, reason=None):
r = Route('DELETE', '/guilds/{guild_id}/members/{user_id}/roles/{role_id}',
guild_id=guild_id, user_id=user_id, role_id=role_id)
return self.request(r, reason=reason)
def edit_channel_permissions(self, channel_id, target, allow, deny, type, *, reason=None):
payload = {
'id': target,
'allow': allow,
'deny': deny,
'type': type
}
r = Route('PUT', '/channels/{channel_id}/permissions/{target}', channel_id=channel_id, target=target)
return self.request(r, json=payload, reason=reason)
def delete_channel_permissions(self, channel_id, target, *, reason=None):
r = Route('DELETE', '/channels/{channel_id}/permissions/{target}', channel_id=channel_id, target=target)
return self.request(r, reason=reason)
# Voice management
def move_member(self, user_id, guild_id, channel_id, *, reason=None):
return self.edit_member(guild_id=guild_id, user_id=user_id, channel_id=channel_id, reason=reason)
# Relationship related
def remove_relationship(self, user_id):
r = Route('DELETE', '/users/@me/relationships/{user_id}', user_id=user_id)
return self.request(r)
def add_relationship(self, user_id, type=None):
r = Route('PUT', '/users/@me/relationships/{user_id}', user_id=user_id)
payload = {}
if type is not None:
payload['type'] = type
return self.request(r, json=payload)
def send_friend_request(self, username, discriminator):
r = Route('POST', '/users/@me/relationships')
payload = {
'username': username,
'discriminator': int(discriminator)
}
return self.request(r, json=payload)
# Misc
def application_info(self):
return self.request(Route('GET', '/oauth2/applications/@me'))
async def get_gateway(self, *, encoding='json', v=6, zlib=True):
try:
data = await self.request(Route('GET', '/gateway'))
except HTTPException as exc:
raise GatewayNotFound() from exc
if zlib:
value = '{0}?encoding={1}&v={2}&compress=zlib-stream'
else:
value = '{0}?encoding={1}&v={2}'
return value.format(data['url'], encoding, v)
async def get_bot_gateway(self, *, encoding='json', v=6, zlib=True):
try:
data = await self.request(Route('GET', '/gateway/bot'))
except HTTPException as exc:
raise GatewayNotFound() from exc
if zlib:
value = '{0}?encoding={1}&v={2}&compress=zlib-stream'
else:
value = '{0}?encoding={1}&v={2}'
return data['shards'], value.format(data['url'], encoding, v)
def get_user(self, user_id):
return self.request(Route('GET', '/users/{user_id}', user_id=user_id))
def get_user_profile(self, user_id):
return self.request(Route('GET', '/users/{user_id}/profile', user_id=user_id))
def get_mutual_friends(self, user_id):
return self.request(Route('GET', '/users/{user_id}/relationships', user_id=user_id))
def change_hypesquad_house(self, house_id):
payload = {'house_id': house_id}
return self.request(Route('POST', '/hypesquad/online'), json=payload)
def leave_hypesquad_house(self):
return self.request(Route('DELETE', '/hypesquad/online'))
def edit_settings(self, **payload):
return self.request(Route('PATCH', '/users/@me/settings'), json=payload) | PypiClean |
/formification-1.2.0-py3-none-any.whl/formulaic/static/admin/formulaic/ember-formulaic/node_modules/bower/lib/node_modules/mout/doc/time.md | # time #
Utilities for time manipulation.
## convert(value, sourceUnit, [destinationUnit]):Number
Converts time between units.
Available units: `millisecond`, `second`, `minute`, `hour`, `day`, `week`.
Abbreviations: `ms`, `s`, `m`, `h`, `d`, `w`.
We do **not** support year and month as a time unit since their values are not
fixed.
The default `destinationUnit` is `ms`.
```js
convert(1, 'minute'); // 60000
convert(2.5, 's', 'ms'); // 2500
convert(2, 'm', 's'); // 120
convert(500, 'ms', 's'); // 0.5
```
## now():Number
Returns the number of milliseconds elapsed since 1 January 1970 00:00:00 UTC.
Uses `Date.now()` if available.
### Example
```js
now(); // 1335449614650
```
## parseMs(ms):Object
Parse timestamp (milliseconds) into an object `{milliseconds:number,
seconds:number, minutes:number, hours:number, days:number}`.
### Example
```js
// {days:27, hours:4, minutes:26, seconds:5, milliseconds:454}
parseMs(2348765454);
```
## toTimeString(ms):String
Convert timestamp (milliseconds) into a time string in the format "[H:]MM:SS".
### Example
```js
toTimeString(12513); // "00:12"
toTimeString(951233); // "15:51"
toTimeString(8765235); // "2:26:05"
```
| PypiClean |
/csle_system_identification-0.3.8.tar.gz/csle_system_identification-0.3.8/src/csle_system_identification/emulator.py | from typing import List, Tuple
import time
import os
import sys
import numpy as np
import csle_common.constants.constants as constants
from csle_common.dao.emulation_config.emulation_env_state import EmulationEnvState
from csle_common.dao.emulation_config.emulation_env_config import EmulationEnvConfig
from csle_common.dao.emulation_config.emulation_trace import EmulationTrace
from csle_common.util.experiment_util import ExperimentUtil
from csle_common.util.env_dynamics_util import EnvDynamicsUtil
from csle_common.metastore.metastore_facade import MetastoreFacade
from csle_common.logging.log import Logger
from csle_attacker.attacker import Attacker
from csle_defender.defender import Defender
from csle_common.dao.system_identification.emulation_statistics import EmulationStatistics
from csle_common.dao.jobs.data_collection_job_config import DataCollectionJobConfig
from csle_common.dao.emulation_action.attacker.emulation_attacker_action import EmulationAttackerAction
from csle_common.dao.emulation_action.defender.emulation_defender_action import EmulationDefenderAction
from csle_common.dao.emulation_action.attacker.emulation_attacker_stopping_actions \
import EmulationAttackerStoppingActions
from csle_common.dao.emulation_action.defender.emulation_defender_stopping_actions \
import EmulationDefenderStoppingActions
from csle_common.util.general_util import GeneralUtil
from csle_cluster.cluster_manager.cluster_controller import ClusterController
class Emulator:
"""
Class for running episodes in the emulation system
"""
@staticmethod
def run_action_sequences(
emulation_env_config: EmulationEnvConfig, attacker_sequence: List[EmulationAttackerAction],
defender_sequence: List[EmulationDefenderAction],
repeat_times: int = 1, sleep_time: int = 1, save_dir: str = "",
emulation_statistics: EmulationStatistics = None, descr: str = "", save: bool = True,
data_collection_job: DataCollectionJobConfig = None,
save_emulation_traces_every: int = 10,
emulation_traces_to_save_with_data_collection_job: int = 3,
intrusion_start_p: float = 0.1, intrusion_continue: float = 0.3, trace_len: int = 30,
restart_client_population: bool = False) -> None:
"""
Runs an attacker and defender sequence in the emulation <repeat_times> times
:param emulation_env_config: the configuration of the emulation
:param attacker_sequence: the sequence of attacker actions
:param defender_sequence: the sequence of defender actions
:param repeat_times: the number of times to repeat the sequences
:param sleep_time: the number of seconds to sleep between time-steps
:param save_dir: the directory to save the collected traces
:param emulation_statistics: the emulation statistics to update
:param descr: descr of the execution
:param save: boolean parameter indicating whether traces and statistics should be saved or not
:param data_collection_job: the system identification job configuration
:param save_emulation_traces_every: how frequently to save emulation traces
:param emulation_traces_to_save_with_data_collection_job: num traces to save with the job
:param intrusion_start_p: the p parameter for the geometric distribution that determines
when an intrusion starts
:param intrusion_continue: the p parameter for the geometric distribution that determines
when an intrusion continues
:param trace_len: fixed trace length
:param restart_client_population: whether to restart the client population after each trace.
:return: None
"""
logger = Logger.__call__().get_logger()
# Setup save dir
if save_dir == "":
save_dir = ExperimentUtil.default_output_dir() + "/results"
assert len(attacker_sequence) == len(defender_sequence)
# Setup emulation statistic
if emulation_statistics is None:
emulation_statistics = EmulationStatistics(emulation_name=emulation_env_config.name, descr=descr)
if emulation_statistics.id == -1 or emulation_statistics.id is None and save:
statistics_id = MetastoreFacade.save_emulation_statistic(emulation_statistics=emulation_statistics)
else:
statistics_id = -1
if emulation_statistics is not None:
statistics_id = emulation_statistics.id
# Setup data collection job
pid = os.getpid()
if data_collection_job is None:
data_collection_job = DataCollectionJobConfig(
emulation_env_name=emulation_env_config.name, num_collected_steps=0, progress_percentage=0.0,
attacker_sequence=attacker_sequence, defender_sequence=defender_sequence,
pid=pid, descr=descr, repeat_times=repeat_times, emulation_statistic_id=statistics_id, traces=[],
num_sequences_completed=0, save_emulation_traces_every=save_emulation_traces_every,
num_cached_traces=emulation_traces_to_save_with_data_collection_job,
log_file_path=Logger.__call__().get_log_file_path(), physical_host_ip=GeneralUtil.get_host_ip())
job_id = MetastoreFacade.save_data_collection_job(
data_collection_job=data_collection_job)
data_collection_job.id = job_id
else:
data_collection_job.pid = pid
data_collection_job.num_collected_steps = 0
data_collection_job.progress_percentage = 0.0
data_collection_job.num_sequences_completed = 0
data_collection_job.traces = []
data_collection_job.log_file_path = Logger.__call__().get_log_file_path()
MetastoreFacade.update_data_collection_job(data_collection_job=data_collection_job,
id=data_collection_job.id)
# Start the collection
s = EmulationEnvState(emulation_env_config=emulation_env_config)
s.initialize_defender_machines()
emulation_statistics.initialize_machines(s=s)
emulation_traces: List[EmulationTrace] = []
collected_steps = 0
for i in range(repeat_times):
intrusion_start_time = -1
if intrusion_start_p > 0:
intrusion_start_time = np.random.geometric(p=intrusion_start_p, size=1)[0]
attacker_wait_seq = [EmulationAttackerStoppingActions.CONTINUE(index=-1)] * intrusion_start_time
defender_wait_seq = [EmulationDefenderStoppingActions.CONTINUE(index=-1)] * intrusion_start_time
full_attacker_sequence = attacker_wait_seq
full_defender_sequence = defender_wait_seq
for j in range(len(attacker_sequence)):
num_wait_steps = np.random.geometric(p=intrusion_continue, size=1)[0] - 1
wait_steps = [EmulationAttackerStoppingActions.CONTINUE(index=-1)] * num_wait_steps
full_attacker_sequence = full_attacker_sequence + wait_steps
full_attacker_sequence = full_attacker_sequence + [attacker_sequence[j]]
full_defender_sequence = full_defender_sequence + [
EmulationDefenderStoppingActions.CONTINUE(index=-1)] * (num_wait_steps + 1)
else:
full_attacker_sequence = [EmulationAttackerStoppingActions.CONTINUE(index=-1)] * trace_len
full_defender_sequence = [EmulationDefenderStoppingActions.CONTINUE(index=-1)] * trace_len
T = len(full_attacker_sequence)
assert len(full_defender_sequence) == len(full_attacker_sequence)
logger.info(f"Starting execution of static action sequences, iteration:{i}, T:{T}, "
f"I_t:{intrusion_start_time}")
sys.stdout.flush()
s.reset()
emulation_trace = EmulationTrace(initial_attacker_observation_state=s.attacker_obs_state,
initial_defender_observation_state=s.defender_obs_state,
emulation_name=emulation_env_config.name)
s.defender_obs_state.reset_metric_lists()
time.sleep(sleep_time)
s.defender_obs_state.average_metric_lists()
emulation_statistics.update_initial_statistics(s=s)
traces = emulation_traces + [emulation_trace]
if len(traces) > data_collection_job.num_cached_traces:
data_collection_job.traces = traces[-data_collection_job.num_cached_traces:]
else:
data_collection_job.traces = traces
if restart_client_population:
ClusterController.stop_kafka_client_producer(
ip=emulation_env_config.traffic_config.client_population_config.physical_host_ip,
port=constants.GRPC_SERVERS.CLUSTER_MANAGER_PORT, ip_first_octet=emulation_env_config.execution_id,
emulation=emulation_env_config.name)
ClusterController.stop_client_population(
ip=emulation_env_config.traffic_config.client_population_config.physical_host_ip,
port=constants.GRPC_SERVERS.CLUSTER_MANAGER_PORT, ip_first_octet=emulation_env_config.execution_id,
emulation=emulation_env_config.name)
time.sleep(5)
ClusterController.start_client_population(
ip=emulation_env_config.traffic_config.client_population_config.physical_host_ip,
port=constants.GRPC_SERVERS.CLUSTER_MANAGER_PORT, ip_first_octet=emulation_env_config.execution_id,
emulation=emulation_env_config.name
)
time.sleep(5)
ClusterController.start_kafka_client_producer(
ip=emulation_env_config.traffic_config.client_population_config.physical_host_ip,
port=constants.GRPC_SERVERS.CLUSTER_MANAGER_PORT, ip_first_octet=emulation_env_config.execution_id,
emulation=emulation_env_config.name
)
time.sleep(15)
for t in range(T):
old_state = s.copy()
a1 = full_defender_sequence[t]
a2 = full_attacker_sequence[t]
logger.info(f"t:{t}, a1: {a1}, a2: {a2}")
s.defender_obs_state.reset_metric_lists()
emulation_trace, s = Emulator.run_actions(
emulation_env_config=emulation_env_config, attacker_action=a2, defender_action=a1,
sleep_time=sleep_time, trace=emulation_trace, s=s)
emulation_statistics.update_delta_statistics(s=old_state, s_prime=s, a1=a1, a2=a2)
if intrusion_start_p > 0:
total_steps = (1 / intrusion_start_p) * repeat_times
else:
total_steps = trace_len
collected_steps += 1
data_collection_job.num_collected_steps = collected_steps
data_collection_job.progress_percentage = (round(collected_steps / total_steps, 2))
data_collection_job.num_sequences_completed = i
data_collection_job.traces[-1] = emulation_trace
logger.debug(f"job updated, steps collected: {data_collection_job.num_collected_steps}, "
f"progress: {data_collection_job.progress_percentage}, "
f"sequences completed: {i}/{repeat_times}")
sys.stdout.flush()
MetastoreFacade.update_data_collection_job(data_collection_job=data_collection_job,
id=data_collection_job.id)
MetastoreFacade.update_emulation_statistic(emulation_statistics=emulation_statistics, id=statistics_id)
if restart_client_population:
ClusterController.stop_kafka_client_producer(
ip=emulation_env_config.traffic_config.client_population_config.physical_host_ip,
port=constants.GRPC_SERVERS.CLUSTER_MANAGER_PORT,
ip_first_octet=emulation_env_config.execution_id,
emulation=emulation_env_config.name)
ClusterController.stop_client_population(
ip=emulation_env_config.traffic_config.client_population_config.physical_host_ip,
port=constants.GRPC_SERVERS.CLUSTER_MANAGER_PORT,
ip_first_octet=emulation_env_config.execution_id,
emulation=emulation_env_config.name)
time.sleep(5)
ClusterController.start_client_population(
ip=emulation_env_config.traffic_config.client_population_config.physical_host_ip,
port=constants.GRPC_SERVERS.CLUSTER_MANAGER_PORT,
ip_first_octet=emulation_env_config.execution_id,
emulation=emulation_env_config.name
)
time.sleep(5)
ClusterController.start_kafka_client_producer(
ip=emulation_env_config.traffic_config.client_population_config.physical_host_ip,
port=constants.GRPC_SERVERS.CLUSTER_MANAGER_PORT,
ip_first_octet=emulation_env_config.execution_id,
emulation=emulation_env_config.name
)
time.sleep(15)
if save and i % save_emulation_traces_every == 0:
MetastoreFacade.save_emulation_trace(emulation_trace)
emulation_traces.append(emulation_trace)
logger.info("All sequences completed, saving traces and emulation statistics")
sys.stdout.flush()
if save:
EmulationTrace.save_traces_to_disk(traces_save_dir=save_dir, traces=emulation_traces)
MetastoreFacade.update_emulation_statistic(emulation_statistics=emulation_statistics, id=statistics_id)
s.cleanup()
MetastoreFacade.remove_data_collection_job(data_collection_job=data_collection_job)
@staticmethod
def run_actions(emulation_env_config: EmulationEnvConfig, attacker_action: EmulationAttackerAction,
s: EmulationEnvState,
defender_action: EmulationDefenderAction, trace: EmulationTrace,
sleep_time: int = 1) -> Tuple[EmulationTrace, EmulationEnvState]:
"""
Runs a pair of actions in the emulation and updates a provided trace
:param emulation_env_config: configuration of the emulation environment
:param attacker_action: the attacker action
:param s: the current emulation state
:param defender_action: the defender action
:param trace: the trace to update
:param sleep_time: the time-step length
:return: the updated trace and state
"""
logger = Logger.__call__().get_logger()
attacker_action.ips = s.attacker_obs_state.get_action_ips(a=attacker_action,
emulation_env_config=emulation_env_config)
defender_action.ips = s.defender_obs_state.get_action_ips(a=defender_action,
emulation_env_config=emulation_env_config)
logger.info(f"Executing attacker action:{attacker_action.name} on machine index: {attacker_action.index}, "
f"ips:{attacker_action.ips}")
logger.info(f"Machines: {list(map(lambda x: x.ips[0], s.attacker_obs_state.machines))}")
s_prime = Attacker.attacker_transition(s=s, attacker_action=attacker_action)
logger.debug(f"Attacker action complete, attacker state:{s_prime.attacker_obs_state}")
EnvDynamicsUtil.cache_attacker_action(a=attacker_action, s=s_prime)
logger.debug(f"Executing defender action:{defender_action.name} on machine index: {defender_action.index}")
s_prime_prime = Defender.defender_transition(s=s_prime, defender_action=defender_action)
logger.debug(f"Defender action complete, defender state:{s_prime.defender_obs_state}, "
f"ips:{defender_action.ips}")
sys.stdout.flush()
EnvDynamicsUtil.cache_defender_action(a=defender_action, s=s_prime_prime)
time.sleep(sleep_time)
s_prime_prime.defender_obs_state.average_metric_lists()
trace.attacker_observation_states.append(s_prime_prime.attacker_obs_state.copy())
trace.defender_observation_states.append(s_prime_prime.defender_obs_state.copy())
trace.attacker_actions.append(attacker_action)
trace.defender_actions.append(defender_action)
s = s_prime_prime
return trace, s | PypiClean |
/HarmoniaCosmo-0.1.2-py3-none-any.whl/harmonia/reader/likelihoods.py | import logging
import warnings
# pylint: disable=no-name-in-module
import numpy as np
from scipy.special import loggamma
from harmonia.utils import (
PositiveDefinitenessWarning,
is_positive_definite,
mat_logdet,
)
# Probability distributions
# -----------------------------------------------------------------------------
def _are_valid_moments(first_moment, second_moment):
# Check dimensions of the expectation and variance are consistent.
criterion1 = (np.squeeze(first_moment).ndim == 1)
criterion2 = (np.squeeze(second_moment).ndim == 2)
criterion3 = (np.shape(first_moment) * 2 == np.shape(second_moment))
return all([criterion1, criterion2, criterion3])
def chi_square(data_vector, covariance_matrix):
"""Calculate chi-square from zero-centred data vector and its
covariance matrix.
Parameters
----------
data_vector : complex, array_like
Zero-centred data vector.
covariance_matrix : complex, array_like
Covariance matrix.
Returns
-------
chi_sq : float :class:`numpy.ndarray`
Chi-square value.
"""
data_vector = np.squeeze(data_vector)
covariance_matrix = np.squeeze(covariance_matrix)
if not _are_valid_moments(data_vector, covariance_matrix):
raise ValueError("Check input dimensions.")
# pylint: disable=unexpected-keyword-arg
chi_sq = np.dot(
np.conj(data_vector), np.linalg.solve(covariance_matrix, data_vector)
)
return chi_sq
def complex_normal_pdf(data_vector, covariance_matrix, ret_log=True,
downscale=None):
"""Compute the complex normal probability density function or its
natural logarithm given the zero-centred data vector and its
covariance matrix.
Parameters
----------
data_vector : complex, array_like
Zero-centred data vector.
covariance_matrix : complex, array_like
Covariance matrix.
ret_log : bool, optional
If `True` (default), return logarithmic probability density.
downscale : float or None, optional
If not `None` (default), the data vector and covariance matrix are
simultaneous downscaled to avoid numerical issue.
Returns
-------
float, array_like
(Logarithmic) probability density.
"""
data_vector = np.squeeze(data_vector)
covariance_matrix = np.squeeze(covariance_matrix)
if not _are_valid_moments(data_vector, covariance_matrix):
raise ValueError("Check input dimensions.")
dim = np.size(data_vector)
log_normalisation = dim * np.log(np.pi)
if downscale is not None:
data_vector = data_vector / downscale
covariance_matrix = covariance_matrix / downscale ** 2
log_normalisation -= 2 * dim * np.log(downscale)
if not is_positive_definite(covariance_matrix):
warnings.warn(
"`covariance_matrix` is not positive definite.",
PositiveDefinitenessWarning
)
density = \
- log_normalisation \
- mat_logdet(covariance_matrix) \
- np.real_if_close(
chi_square(data_vector, covariance_matrix), tol=10**10
)
return density if ret_log else np.exp(density)
def multivariate_normal_pdf(data_vector, expectation_vector, covariance_matrix,
ret_log=True):
"""Compute the multivariate normal probability density function or its
natural logarithm given the data vector, its mean vector and covariance
matrix.
Parameters
----------
data_vector : float, array_like
Data vector.
expectation_vector : float, array_like
Mean vector.
covariance_matrix : float, array_like
Covariance matrix.
ret_log : bool, optional
If `True` (default), return logarithmic probability density.
Returns
-------
density : float, array_like
Logarithmic probability density value.
"""
data_vector = np.squeeze(data_vector)
expectation_vector = np.squeeze(expectation_vector)
covariance_matrix = np.squeeze(covariance_matrix)
if not _are_valid_moments(data_vector, covariance_matrix) or \
not _are_valid_moments(expectation_vector, covariance_matrix):
raise ValueError("Check input dimensions.")
dim = np.size(data_vector)
log_normalisation = dim * np.log(2*np.pi)
log_determinant = mat_logdet(covariance_matrix)
chi_sq = chi_square(data_vector - expectation_vector, covariance_matrix)
density = - (log_normalisation + log_determinant + chi_sq) / 2
return density if ret_log else np.exp(density)
def modified_student_pdf(data_vector, expectation_vector, covariance_matrix,
degree, ret_log=True):
"""Compute the multivariate modified Student probability density
function or its natural logarithm given the data vector, its mean
vector and covariance matrix.
Parameters
----------
data_vector : float, array_like
Data vector.
expectation_vector : float, array_like
Mean vector.
covariance_matrix : float, array_like
Covariance matrix.
degree : int
The degree number. This could be the number of empirical
covariance matrices used to obtain the estimated
`covariance_matrix`.
ret_log : bool, optional
If `True` (default), return logarithmic probability density.
Returns
-------
float, array_like
(Logarithmic) probability density value.
"""
data_vector = np.squeeze(data_vector)
expectation_vector = np.squeeze(expectation_vector)
covariance_matrix = np.squeeze(covariance_matrix)
if not _are_valid_moments(data_vector, covariance_matrix) \
or not _are_valid_moments(expectation_vector, covariance_matrix):
raise ValueError("Check input dimensions.")
dim = np.size(data_vector)
log_normalisation = dim / 2. * np.log((degree - 1) * np.pi) \
+ loggamma((degree - dim) / 2.) \
- loggamma(degree / 2.)
log_determinant = mat_logdet(covariance_matrix)
log_pseudo_chisq = degree / 2. * np.log(
1 + chi_square(
data_vector - expectation_vector, covariance_matrix
) / (degree - 1)
)
density = - (log_normalisation + log_determinant / 2 + log_pseudo_chisq)
return density if ret_log else np.exp(density)
# Likelihoods
# -----------------------------------------------------------------------------
class LikelihoodWarning(UserWarning):
"""Likelihood evaluation warning.
"""
def spherical_covariance(pivot, spherical_model, **kwargs):
r"""Compute the parametrised covariance matrix of spherical Fourier
coefficients.
Parameters
----------
pivot : {'natural', 'spectral'}
Pivot order for vectorisation.
spherical_model : :class:`~harmonia.reader.models.SphericalCorrelator`
Spherical correlator base model.
**kwargs
Parameters (other than `pivot`) to be passed to |correlator_matrix|
of `spherical_correlator`.
Returns
-------
covariance_matrix : complex :class:`numpy.ndarray`
Covariance matrix.
See Also
--------
:class:`~harmonia.reader.models.SphericalCorrelator`
"""
covariance_matrix = spherical_model.correlator_matrix(pivot, **kwargs)
return covariance_matrix
def cartesian_moments(pivot, orders, cartesian_model,
covariance_estimator=None, mode_counts=None, **kwargs):
"""Compute the parametrised mean and covariance of Cartesian
power spectrum multipoles.
Parameters
----------
pivot : {'order', 'wavenumber'}
Pivot order for vectorisation.
orders : list of int
Orders of the power spectrum multipoles.
cartesian_model : :class:`~.models.CartesianMultipoles`
Cartesian power multipoles base model.
covariance_estimator : :class:`~.CovarianceEstimator` *or None, optional*
Cartesian power multipole covariance estimator. Its
:attr:`wavenumbers` must match wavenumbers associated
with `cartesian_model`. If `None`, no correlation between power
spectrum multipoles is assumed but `mode_counts` must be provided
for calculating the power spectrum variance.
mode_counts : int, array_like or None, optional
Number of independent modes for each power spectrum measurement
(default is `None`) used to calculate the power spectrum variance.
Ignored if `covariance_estimator` is provided.
**kwargs
Parameters (other than `orders`) to be passed to
|convolved_power_multipoles| of `cartesian_model`.
Returns
-------
expectation : float :class:`numpy.ndarray`
Power spectrum expectation at the specified wavenumbers.
covariance : float :class:`numpy.ndarray`
Power spectrum variance at the specified wavenumbers.
"""
expectation = cartesian_model.convolved_power_multipoles(
orders, **kwargs
).vectorise(pivot)
# Check model and estimator wavenumbers agree.
if covariance_estimator is None:
covariance = expectation ** 2 / np.asarray(mode_counts)
else:
assert np.allclose(
cartesian_model.attrs['wavenumbers'],
covariance_estimator.wavenumbers,
atol=0.001
), (
"The wavenumbers at which the Cartesian power multipole model "
"is evaluated must match the wavenumbers at which "
"the fiducial covariance matrix is estimated."
)
fiducial_expectation = \
covariance_estimator.get_fiducial_vector(pivot)
fiducial_covariance = \
covariance_estimator.get_fiducial_covariance(pivot)
covariance = np.linalg.multi_dot([
np.diag(expectation / fiducial_expectation),
fiducial_covariance,
np.diag(expectation / fiducial_expectation)
])
return expectation, covariance
class LogLikelihood:
"""Construct the logarithmic likelihood function from
cosmological data.
Parameters
----------
spherical_data : :class:`~.arrays.SphericalArray` *or None, optional*
Spherical Fourier coefficient data (default is `None`).
cartesian_data : :class:`~.arrays.CartesianArray` *or None, optional*
Spherical Fourier coefficient data (default is `None`).
covariance_estimator : :class:`~.CovarianceEstimator` *or None, optional*
Cartesian multipole covariance estimator (default is `None`).
mode_counts : int, array_like or None, optional
Number of independent modes for each Cartesian data point (default
is `None`) as an alternative to `covariance_estimator`. Ignored
if `covariance_estimator` is provided.
base_spherical_model : :class:`~.SphericalCorrelator` *or None, optional*
Baseline spherical correlator model (default is `None`).
base_cartesian_model : :class:`~.CartesianMultipoles` *or None, optional*
Baseline Cartesian multipole model (default is `None`).
spherical_pivot : {'natural', 'spectral'}, optional
Pivot order for spherical map data vectorisation (default is
'natural').
cartesian_pivot : {'order', 'wavenumber'}, optional
Pivot order for Cartesian map data vectorisation (default is
'order').
nbar : float or None, optional
Mean particle number density (in cubic :math:`h`/Mpc). If
`None` (default), shot noise is neglected.
contrast : float or None, optional
If not `None` (default), this adds additional shot noise level
``1 / (contrast * nbar)`` due to a FKP-style random catalogue.
tracer_p : float, optional
Tracer-dependent parameter for bias modulation by `f_nl`
(default is 1.).
comm : :class:`mpi4py.MPI.Comm` *or None, optional*
MPI communicator (default is `None`).
Attributes
----------
attrs : dict
Directory holding input parameters not corresponding to any of
the following attributes.
spherical_data : :class:`~.algorithms.arrays.SphericalArray` or None
Spherical Fourier coefficient data.
cartesian_data : :class:`~.algorithms.arrays.CartesianArray` or None
Spherical Fourier coefficient data.
covariance_estimator : :class:`~.CovarianceEstimator` or None
Cartesian multipole covariance estimator.
base_spherical_model : :class:`~.SphericalCorrelator` or None
Baseline spherical correlator model.
base_cartesian_model : :class:`~.CartesianMultipoles` or None
Baseline Cartesian multipole model.
"""
def __init__(self, spherical_data=None, cartesian_data=None,
covariance_estimator=None, mode_counts=None,
base_spherical_model=None, base_cartesian_model=None,
spherical_pivot='natural', cartesian_pivot='order',
nbar=None, contrast=None, tracer_p=1., comm=None):
self.logger = logging.getLogger(self.__class__.__name__)
self.comm = comm
self.attrs = {
'spherical_pivot': spherical_pivot,
'cartesian_pivot': cartesian_pivot,
'mode_counts': mode_counts,
'nbar': nbar,
'contrast': contrast,
'tracer_p': tracer_p,
}
self.spherical_data = spherical_data
self.cartesian_data = cartesian_data
self.covariance_estimator = covariance_estimator
self.base_spherical_model = base_spherical_model
self.base_cartesian_model = base_cartesian_model
def spherical_map_likelihood(self, b_1, f_nl, exclude_degrees=(),
compression_matrix=None, **kwargs):
"""Evaluate the spherical map logarithmic likelihood.
Parameters
----------
b_1 : float
Scale-independent linear bias.
f_nl : float or None
Local primordial non-Gaussianity.
exclude_degrees : tuple of int, optional
If not empty (default), modes whose spherical degree
match one of its elements are removed from the likelihood.
compression_matrix : :class:`numpy.ndarray` *or None*, optional
If not `None` (default), both the data vector and the model
covariance matrix are processed for data compression. This
must be compatible with `exclude_degrees`, i.e. it accounts
for elements removed from the data vector and covariance
matrix by `exclude_degrees`.
**kwargs
Additional parameters to be passed to |correlator_matrix| of
:attr:`base_spherical_model`.
Returns
-------
log_likelihood : float
Logarithmic likelihood.
See Also
--------
:class:`~harmonia.surveyor.synthesis.generate_compression_matrix`
:class:`~harmonia.reader.likelihoods.spherical_covariance`
|correlator_matrix|
"""
_OVERFLOW_DOWNSCALE = 10**4
data_vector = \
self.spherical_data.vectorise(self.attrs['spherical_pivot'])
covariance_matrix = spherical_covariance(
self.attrs['spherical_pivot'], self.base_spherical_model,
b_1=b_1, f_nl=f_nl,
nbar=self.attrs['nbar'],
contrast=self.attrs['contrast'],
tracer_p=self.attrs['tracer_p'],
**kwargs
)
# pylint: disable=no-member
if exclude_degrees:
deselector = np.logical_and.reduce([
self.spherical_data.array['index'][:, 0] == deg
for deg in exclude_degrees
])
data_vector = data_vector[~deselector]
covariance_matrix = \
covariance_matrix[~deselector, :][:, ~deselector]
if compression_matrix is not None:
data_vector = np.linalg.multi_dot([
compression_matrix, data_vector
])
covariance_matrix = np.linalg.multi_dot([
compression_matrix,
covariance_matrix,
np.conj(compression_matrix.T)
])
log_likelihood = complex_normal_pdf(
data_vector, covariance_matrix, downscale=_OVERFLOW_DOWNSCALE,
)
return log_likelihood
def cartesian_map_likelihood(self, b_1, f_nl, orders=None,
num_samples=None, **kwargs):
"""Evaluate the Cartesian map logarithmic likelihood.
Parameters
----------
b_1 : float
Scale-independent linear bias of the tracer particles.
f_nl : float or None
Local primordial non-Gaussianity.
orders : list of int or None, optional
Orders of the power spectrum multipoles. If `None` (default),
only the monopole is used.
num_samples : int or None, optional
If `None` (default), the normal distribution is used without
correction for covariance estimation uncertainty; otherwise
it is passed as `degree` to :func:`modified_student_pdf` for
covariance estimation uncertainty correction [1]_.
**kwargs
Additional parameters to be passed to
|convolved_power_multipoles| of :attr:`base_cartesian_model`.
Returns
-------
log_likelihood : float
Logarithmic likelihood.
See Also
--------
:class:`~harmonia.reader.likelihoods.cartesian_moments`
|convolved_power_multipoles|
.. [1] Sellentin E. & Heavens A. F., 2016. MNRAS 456(1), L132–L136.
[arXiv: `1511.05969 <https://arxiv.org/abs/1511.05969>`_]
"""
orders = orders or [0]
data_vector = \
self.cartesian_data.vectorise(self.attrs['cartesian_pivot'])
expectation_vector, covariance_matrix = cartesian_moments(
self.attrs['cartesian_pivot'], orders, self.base_cartesian_model,
covariance_estimator=self.covariance_estimator,
mode_counts=self.attrs['mode_counts'],
b_1=b_1, f_nl=f_nl,
nbar=self.attrs['nbar'],
contrast=self.attrs['contrast'],
tracer_p=self.attrs['tracer_p'],
**kwargs
)
if self.covariance_estimator is not None and num_samples is not None:
log_likelihood = modified_student_pdf(
data_vector, expectation_vector, covariance_matrix,
degree=num_samples
)
else:
log_likelihood = multivariate_normal_pdf(
data_vector, expectation_vector, covariance_matrix
)
return log_likelihood | PypiClean |
/django-dojo-0.0.1.tar.gz/django-dojo-0.0.1/dojo/static/dojo/dojox/geo/openlayers/WidgetFeature.js | define([
"dojo/_base/declare",
"dojo/dom-style",
"dojo/_base/lang",
"dijit/registry",
"./Feature"
], function(declare, style, lang, registry, Feature){
/*=====
dojox.geo.openlayers.__WidgetFeatureArgs = {
// summary:
// The keyword arguments that can be passed in a WidgetFeature constructor.
// You must define a least one widget retrieval parameter and the geo-localization parameters.
// createWidget: Function?
// Function for widget creation. Must return a `dijit._Widget.
// dojoType: String?
// The class of a widget to create.
// dijitId: String?
// The digitId of an existing widget.
// widget: dijit._Widget?
// An already created widget.
// width: Number?
// The width of the widget.
// height: Number?
// The height of the widget.
// longitude: Number
// The longitude, in decimal degrees where to place the widget.
// latitude: Number
// The latitude, in decimal degrees where to place the widget.
};
=====*/
return declare("dojox.geo.openlayers.WidgetFeature", Feature, {
// summary:
// Wraps a Dojo widget, provide geolocalisation of the widget and interface
// to Layer class.
// description:
// This class allows to add a widget in a `dojox.geo.openlayers.Layer`.
_widget: null,
_bbox: null,
constructor: function(params){
// summary:
// Constructs a new `dojox.geo.openlayers.WidgetFeature`
// params: dojox.geo.openlayers.__WidgetFeatureArgs
// The parameters describing the widget.
this._params = params;
},
setParameters: function(params){
// summary:
// Sets the parameters describing the widget.
// params: dojox.geo.openlayers.__WidgetFeatureArgs
// The parameters describing the widget.
this._params = params;
},
getParameters: function(){
// summary:
// Returns the parameters describing the widget.
// returns: dojox.geo.openlayers.__WidgetFeatureArgs
// The parameters describing the widget.
return this._params;
},
_getWidget: function(){
// summary:
// Creates, if necessary the widget and returns it
// tags:
// private
var params = this._params;
if((this._widget == null) && (params != null)){
var w = null;
if(typeof (params.createWidget) == "function"){
w = params.createWidget.call(this);
}else if(params.dojoType){
dojo["require"](params.dojoType);
var c = lang.getObject(params.dojoType);
w = new c(params);
}else if(params.dijitId){
w = registry.byId(params.dijitId);
}else if(params.widget){
w = params.widget;
}
if(w != null){
this._widget = w;
if(typeof (w.startup) == "function"){
w.startup();
}
var n = w.domNode;
if(n != null){
style.set(n, {
position: "absolute"
});
}
}
this._widget = w;
}
return this._widget;
},
_getWidgetWidth: function(){
// summary:
// gets the widget width
// tags:
// private
var p = this._params;
if(p.width){
return p.width;
}
var w = this._getWidget();
if(w){
return style.get(w.domNode, "width");
}
return 10;
},
_getWidgetHeight: function(){
// summary:
// gets the widget height
// tags:
// private
var p = this._params;
if(p.height){
return p.height;
}
var w = this._getWidget();
if(w){
return style.get(w.domNode, "height");
}
return 10;
},
render: function(){
// summary:
// renders the widget.
// description:
// Places the widget accordingly to longitude and latitude defined in parameters.
// This function is called when the center of the maps or zoom factor changes.
var layer = this.getLayer();
var widget = this._getWidget();
if(widget == null){
return;
}
var params = this._params;
var lon = params.longitude;
var lat = params.latitude;
var from = this.getCoordinateSystem();
var map = layer.getDojoMap();
var p = map.transformXY(lon, lat, from);
var a = this._getLocalXY(p);
var width = this._getWidgetWidth();
var height = this._getWidgetHeight();
var x = a[0] - width / 2;
var y = a[1] - height / 2;
var dom = widget.domNode;
var pa = layer.olLayer.div;
if(dom.parentNode != pa){
if(dom.parentNode){
dom.parentNode.removeChild(dom);
}
pa.appendChild(dom);
}
this._updateWidgetPosition({
x: x,
y: y,
width: width,
height: height
});
},
_updateWidgetPosition: function(box){
// summary:
// Places the widget with the computed x and y values
// tags:
// private
// var box = this._params;
var w = this._widget;
var dom = w.domNode;
style.set(dom, {
position: "absolute",
left: box.x + "px",
top: box.y + "px",
width: box.width + "px",
height: box.height + "px"
});
if(w.srcNodeRef){
style.set(w.srcNodeRef, {
position: "absolute",
left: box.x + "px",
top: box.y + "px",
width: box.width + "px",
height: box.height + "px"
});
}
if(lang.isFunction(w.resize)){
w.resize({
w: box.width,
h: box.height
});
}
},
remove: function(){
// summary:
// removes this feature.
// description:
// Remove this feature by disconnecting the widget from the dom.
var w = this.getWidget();
if(!w){
return;
}
var dom = w.domNode;
if(dom.parentNode){
dom.parentNode.removeChild(dom);
}
}
});
}); | PypiClean |
/jupyros-0.7.0a0.tar.gz/jupyros-0.7.0a0/js/node_modules/moment/dist/locale/be.js |
import moment from '../moment';
function plural(word, num) {
var forms = word.split('_');
return num % 10 === 1 && num % 100 !== 11
? forms[0]
: num % 10 >= 2 && num % 10 <= 4 && (num % 100 < 10 || num % 100 >= 20)
? forms[1]
: forms[2];
}
function relativeTimeWithPlural(number, withoutSuffix, key) {
var format = {
ss: withoutSuffix ? 'секунда_секунды_секунд' : 'секунду_секунды_секунд',
mm: withoutSuffix ? 'хвіліна_хвіліны_хвілін' : 'хвіліну_хвіліны_хвілін',
hh: withoutSuffix ? 'гадзіна_гадзіны_гадзін' : 'гадзіну_гадзіны_гадзін',
dd: 'дзень_дні_дзён',
MM: 'месяц_месяцы_месяцаў',
yy: 'год_гады_гадоў',
};
if (key === 'm') {
return withoutSuffix ? 'хвіліна' : 'хвіліну';
} else if (key === 'h') {
return withoutSuffix ? 'гадзіна' : 'гадзіну';
} else {
return number + ' ' + plural(format[key], +number);
}
}
export default moment.defineLocale('be', {
months: {
format: 'студзеня_лютага_сакавіка_красавіка_траўня_чэрвеня_ліпеня_жніўня_верасня_кастрычніка_лістапада_снежня'.split(
'_'
),
standalone:
'студзень_люты_сакавік_красавік_травень_чэрвень_ліпень_жнівень_верасень_кастрычнік_лістапад_снежань'.split(
'_'
),
},
monthsShort:
'студ_лют_сак_крас_трав_чэрв_ліп_жнів_вер_каст_ліст_снеж'.split('_'),
weekdays: {
format: 'нядзелю_панядзелак_аўторак_сераду_чацвер_пятніцу_суботу'.split(
'_'
),
standalone:
'нядзеля_панядзелак_аўторак_серада_чацвер_пятніца_субота'.split(
'_'
),
isFormat: /\[ ?[Ууў] ?(?:мінулую|наступную)? ?\] ?dddd/,
},
weekdaysShort: 'нд_пн_ат_ср_чц_пт_сб'.split('_'),
weekdaysMin: 'нд_пн_ат_ср_чц_пт_сб'.split('_'),
longDateFormat: {
LT: 'HH:mm',
LTS: 'HH:mm:ss',
L: 'DD.MM.YYYY',
LL: 'D MMMM YYYY г.',
LLL: 'D MMMM YYYY г., HH:mm',
LLLL: 'dddd, D MMMM YYYY г., HH:mm',
},
calendar: {
sameDay: '[Сёння ў] LT',
nextDay: '[Заўтра ў] LT',
lastDay: '[Учора ў] LT',
nextWeek: function () {
return '[У] dddd [ў] LT';
},
lastWeek: function () {
switch (this.day()) {
case 0:
case 3:
case 5:
case 6:
return '[У мінулую] dddd [ў] LT';
case 1:
case 2:
case 4:
return '[У мінулы] dddd [ў] LT';
}
},
sameElse: 'L',
},
relativeTime: {
future: 'праз %s',
past: '%s таму',
s: 'некалькі секунд',
m: relativeTimeWithPlural,
mm: relativeTimeWithPlural,
h: relativeTimeWithPlural,
hh: relativeTimeWithPlural,
d: 'дзень',
dd: relativeTimeWithPlural,
M: 'месяц',
MM: relativeTimeWithPlural,
y: 'год',
yy: relativeTimeWithPlural,
},
meridiemParse: /ночы|раніцы|дня|вечара/,
isPM: function (input) {
return /^(дня|вечара)$/.test(input);
},
meridiem: function (hour, minute, isLower) {
if (hour < 4) {
return 'ночы';
} else if (hour < 12) {
return 'раніцы';
} else if (hour < 17) {
return 'дня';
} else {
return 'вечара';
}
},
dayOfMonthOrdinalParse: /\d{1,2}-(і|ы|га)/,
ordinal: function (number, period) {
switch (period) {
case 'M':
case 'd':
case 'DDD':
case 'w':
case 'W':
return (number % 10 === 2 || number % 10 === 3) &&
number % 100 !== 12 &&
number % 100 !== 13
? number + '-і'
: number + '-ы';
case 'D':
return number + '-га';
default:
return number;
}
},
week: {
dow: 1, // Monday is the first day of the week.
doy: 7, // The week that contains Jan 7th is the first week of the year.
},
}); | PypiClean |
/LaMark-0.2.1.tar.gz/LaMark-0.2.1/lamark/latexgen.py | import subprocess
import tempfile
import os
import shutil
import re
import logging
import sys
import lamarkargumenterror
import lmast
import textwrap
MATH_NAME = "math"
DISPLAYMATH_NAME = "displaymath"
PREAMBLE_NAME = "pre"
DOC_NAME = "latex"
class LatexGen(object):
"""Given a peice of Latex, generate an image, and the markdown
necessary to display the image.
"""
DICT_image_zoom = "imgZoom"
DICT_fn_prefix = "path"
DICT_fn = "imgName"
DICT_alt_txt = "alt"
prefs_dict = {
"imgZoom": None,
"path": None,
"imgName": None,
"alt": None,
"func_name": None,
}
TEX_TMP_NAME = "textemp.tex"
def __init__(self, args, shared_dict):
""" This initializer SHOULD NOT be used by itself.
Use the `with` keyword so the __exit__ and __enter__
methods get used.
"""
# Find where the filename extension begins in args.f
if args.f is not None:
ext_period = args.f.rfind(".")
# Remove the filename extension
if ext_period > 0:
self.img_prefix = args.f[:ext_period]
else:
self.img_prefix = args.f
else:
self.img_prefix = None
self._default_zoom = args.zoom
self._default_path = getattr(args, "img_path", "")
self._gen_images = getattr(args, "gen_images", True)
self._fn_gen = self._gen_name()
self._reset_prefs()
self._tex_tmp_dir = self._create_tmp_dir()
self._image_dir = "."
if args.o and len(os.path.dirname(args.o)) > 0:
self._image_dir = os.path.dirname(args.o)
if args.i:
self._image_dir = args.i
if self._image_dir[-1] != "/":
self._image_dir = self._image_dir + "/"
self._debug_flag = args.debug
self._check_preconditions()
self.latex_preamble = None
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.tear_down()
def tear_down(self):
shutil.rmtree(self._tex_tmp_dir)
def _create_tmp_dir(self):
return tempfile.mkdtemp()
def _check_preconditions(self):
if not os.path.exists(self._image_dir):
raise IOError("ERROR: Image dir does not exist.")
def _reset_prefs(self):
prefs_dict = {
"imgZoom": None,
"path": None,
"imgName": None,
"alt": None,
"title": None,
#"x": 0,
#"y": 0,
#"unitlength": None,
}
def generate(self, children, lineno, args, kwargs):
latex_body = reduce(
lambda string, child: string + child.string,
children,
"")
# Ignore empty strings
#if not latex_body.strip():
# return lmast.Markdown("", lineno)
self._reset_prefs()
self._process_tag_args(lineno, args, kwargs)
self._validate_args(args, kwargs)
if (
#kwargs["func_name"] == PICTURE_NAME or
kwargs["func_name"] == MATH_NAME or
kwargs["func_name"] == DISPLAYMATH_NAME or
kwargs["func_name"] == DOC_NAME):
image_name = self._compile_latex(latex_body)
if self.prefs_dict["alt"] is None:
alt_text = image_name
else:
alt_text = self.prefs_dict["alt"]
if self.prefs_dict["title"] is not None:
new_node = lmast.Markdown(
'![%s](%s "%s")' % (alt_text, image_name, self.prefs_dict["title"]),
lineno)
else:
new_node = lmast.Markdown(
"![%s](%s)" % (alt_text, image_name),
lineno)
elif kwargs["func_name"] == PREAMBLE_NAME:
self.latex_preamble = latex_body
new_node = lmast.Markdown("", lineno)
else:
raise Exception("Oops. Something broke in the LaTeX code gen.")
return new_node
def _process_math_args(self, lineno, args, kwargs):
self.prefs_dict["func_name"] = kwargs["func_name"]
self.prefs_dict["path"] = args[0] if len(args) > 0 else self._default_path
self.prefs_dict["alt"] = args[1] if len(args) > 1 else None
self.prefs_dict["title"] = args[2] if len(args) > 2 else None
self.prefs_dict["imgName"] = args[3] if len(args) > 3 else None
self.prefs_dict["imgZoom"] = args[4] if len(args) > 4 else self._default_zoom
for key, value in kwargs.items():
if key not in self.prefs_dict:
raise lamarkargumenterror.LaMarkArgumentError(
"Unrecognized argument: %s" % key,
lineno)
self.prefs_dict[key] = value
if (
self.prefs_dict["path"] is not None and
len(self.prefs_dict["path"]) > 0 and
self.prefs_dict["path"][-1] != "/"):
self.prefs_dict["path"] += "/"
def _process_doc_args(self, lineno, args, kwargs):
self._process_math_args(lineno, args, kwargs)
#def _process_picture_args(self, lineno, args, kwargs):
#self.prefs_dict["func_name"] = kwargs["func_name"]
#self.prefs_dict["x"] = args[0] if len(args) > 0 else "0"
#self.prefs_dict["y"] = args[1] if len(args) > 1 else "0"
#self.prefs_dict["unitlength"] = args[2] if len(args) > 2 else None
#self.prefs_dict["path"] = args[3] if len(args) > 3 else ""
#self.prefs_dict["alt"] = args[4] if len(args) > 4 else ""
#self.prefs_dict["title"] = args[5] if len(args) > 5 else None
#self.prefs_dict["imgZoom"] = args[6] if len(args) > 6 else "2000"
#self.prefs_dict["imgName"] = args[7] if len(args) > 7 else ""
#for key, value in kwargs.items():
#if key not in self.prefs_dict:
#raise lamarkargumenterror.LaMarkArgumentError(
#"Unrecognized argument: %s" % key,
#lineno)
#self.prefs_dict[key] = value
#if len(self.prefs_dict["path"]) > 0 and self.prefs_dict["path"][-1] != "/":
#self.prefs_dict["path"] += "/"
def _process_pre_args(self, lineno, args, kwargs):
self.prefs_dict["func_name"] = kwargs["func_name"]
def _process_tag_args(self, lineno, args, kwargs):
logging.debug(args)
if (
kwargs["func_name"] == MATH_NAME or
kwargs["func_name"] == DISPLAYMATH_NAME):
self._process_math_args(lineno, args, kwargs)
#elif kwargs["func_name"] == PICTURE_NAME:
#self._process_picture_args(lineno, args, kwargs)
elif kwargs["func_name"] == DOC_NAME:
self._process_doc_args(lineno, args, kwargs)
elif kwargs["func_name"] == PREAMBLE_NAME:
self._process_pre_args(lineno, args, kwargs)
else:
raise Exception("Oops. Something broke in the latex gen.")
def _validate_args(self, args, kwargs):
if self.prefs_dict["imgZoom"] is None:
self.prefs_dict["imgZoom"] = "2000"
if int(self.prefs_dict["imgZoom"]) > 3000:
logging.warn("imgZoom is very large: %d", int(self.prefs_dict["imgZoom"]))
if int(self.prefs_dict["imgZoom"]) < 1000:
logging.warn("imgZoom is very small: %d", int(self.prefs_dict["imgZoom"]))
def _gen_name(self):
counter = 0
if self.img_prefix is None:
img_prefix = ""
else:
img_prefix = self.img_prefix + "-"
while True:
yield img_prefix + str(counter) + ".png"
counter += 1
def _gen_latex(self, latex_body):
if self.prefs_dict["func_name"] == DOC_NAME:
return latex_body.strip()
latex_string = "\documentclass[fleqn]{standalone}\n"
if self.latex_preamble is not None:
latex_string += self.latex_preamble + "\n"
if self.prefs_dict["func_name"] == MATH_NAME:
latex_string += "\usepackage{mathtools}\n"
latex_string += "\\begin{document}\n"
latex_string += "\\begin{math}\n"
latex_string += latex_body.strip()
latex_string += "\\end{math}\n"
latex_string += "\\end{document}\n"
elif self.prefs_dict["func_name"] == DISPLAYMATH_NAME:
latex_string += "\usepackage{mathtools}\n"
latex_string += "\\begin{document}\n"
latex_string += "\\begin{displaymath}\n"
latex_string += latex_body.strip()
latex_string += "\\end{displaymath}\n"
latex_string += "\\end{document}\n"
#elif self.prefs_dict["func_name"] == PICTURE_NAME:
#latex_string += "\\begin{document}\n"
#if self.prefs_dict["unitlength"] is not None:
#latex_string += "\\setlength{\\unitlength}{%s}\n" % self.prefs_dict["unitlength"]
#latex_string += "\\begin{picture}(%s,%s)\n" % (
#self.prefs_dict["x"], self.prefs_dict["y"])
#latex_string += latex_body + "\n"
#latex_string += "\\end{picture}\n"
#latex_string += "\\end{document}\n"
else:
raise Exception("Oops.Something broke in the LaTeX code gen.")
return latex_string
def _compile_latex(self, latex_body):
if self.prefs_dict["imgName"] is None:
image_name = self._fn_gen.next()
else:
image_name = self.prefs_dict["imgName"]
if self.prefs_dict["path"] is not None:
full_path_image_name = self.prefs_dict["path"] + image_name
else:
full_path_image_name = image_name
if self._gen_images == False:
return full_path_image_name
latex_string = self._gen_latex(latex_body)
logging.debug("Latex String: " + repr(latex_string))
# Create tmp dir and tmp file to write LaTeX to.
tex_tmp = open(self._tex_tmp_dir + "/" + self.TEX_TMP_NAME, "w")
#tex_tmp.write(boilerplate_header + latex_string + boilerplate_footer)
tex_tmp.write(latex_string)
tex_tmp.close()
command_out = subprocess.PIPE
# Call latex to convert tmp tex file to dvi.
latex_call = [
"latex",
"-output-directory=" + self._tex_tmp_dir,
"-halt-on-error",
"-interaction=batchmode",
tex_tmp.name,
]
p = subprocess.Popen(
latex_call,
stderr=command_out,
stdout=command_out)
out,err = p.communicate()
logging.debug(out)
if p.returncode:
raise CommandException('Error trying to render LaTeX: "' +
str(latex_body) + '".\nLaTeX threw error: "' + str(out)+ '".')
# Generate file for png and convert dvi to png
if self.prefs_dict["imgZoom"] is None:
image_zoom = "2000"
else:
image_zoom = str(self.prefs_dict["imgZoom"])
dvipng_call = [
"dvipng",
"-T", "tight",
"-bg", "Transparent",
"-x", image_zoom,
"-z", "6",
tex_tmp.name[0:-3] + "dvi",
"-o", self._image_dir + ("%s" % image_name),
]
p = subprocess.Popen(
dvipng_call,
stdout=command_out,
stderr=command_out)
out,err = p.communicate()
logging.debug(out)
if p.returncode:
raise CommandException("Error in call to dvipng: " + str(out))
return full_path_image_name
class CommandException(Exception):
def __init__(self, msg=""):
self.msg = msg
def __str__(self):
return str(self.msg)
def __repr(self):
return "CommandException(%s)" % repr(self.msg) | PypiClean |
/dschmidt-cdktf-provider-google-0.0.1.tar.gz/dschmidt-cdktf-provider-google-0.0.1/src/dschmidt_cdktf_provider_google/sql_database/__init__.py | import abc
import builtins
import datetime
import enum
import typing
import jsii
import publication
import typing_extensions
from typeguard import check_type
from .._jsii import *
import cdktf
import constructs
class SqlDatabase(
cdktf.TerraformResource,
metaclass=jsii.JSIIMeta,
jsii_type="@dschmidt/provider-google.sqlDatabase.SqlDatabase",
):
'''Represents a {@link https://www.terraform.io/docs/providers/google/r/sql_database google_sql_database}.'''
def __init__(
self,
scope: constructs.Construct,
id_: builtins.str,
*,
instance: builtins.str,
name: builtins.str,
charset: typing.Optional[builtins.str] = None,
collation: typing.Optional[builtins.str] = None,
id: typing.Optional[builtins.str] = None,
project: typing.Optional[builtins.str] = None,
timeouts: typing.Optional[typing.Union["SqlDatabaseTimeouts", typing.Dict[str, typing.Any]]] = None,
connection: typing.Optional[typing.Union[typing.Union[cdktf.SSHProvisionerConnection, typing.Dict[str, typing.Any]], typing.Union[cdktf.WinrmProvisionerConnection, typing.Dict[str, typing.Any]]]] = None,
count: typing.Optional[jsii.Number] = None,
depends_on: typing.Optional[typing.Sequence[cdktf.ITerraformDependable]] = None,
for_each: typing.Optional[cdktf.ITerraformIterator] = None,
lifecycle: typing.Optional[typing.Union[cdktf.TerraformResourceLifecycle, typing.Dict[str, typing.Any]]] = None,
provider: typing.Optional[cdktf.TerraformProvider] = None,
provisioners: typing.Optional[typing.Sequence[typing.Union[typing.Union[cdktf.FileProvisioner, typing.Dict[str, typing.Any]], typing.Union[cdktf.LocalExecProvisioner, typing.Dict[str, typing.Any]], typing.Union[cdktf.RemoteExecProvisioner, typing.Dict[str, typing.Any]]]]] = None,
) -> None:
'''Create a new {@link https://www.terraform.io/docs/providers/google/r/sql_database google_sql_database} Resource.
:param scope: The scope in which to define this construct.
:param id_: The scoped construct ID. Must be unique amongst siblings in the same scope
:param instance: The name of the Cloud SQL instance. This does not include the project ID. Docs at Terraform Registry: {@link https://www.terraform.io/docs/providers/google/r/sql_database#instance SqlDatabase#instance}
:param name: The name of the database in the Cloud SQL instance. This does not include the project ID or instance name. Docs at Terraform Registry: {@link https://www.terraform.io/docs/providers/google/r/sql_database#name SqlDatabase#name}
:param charset: The charset value. See MySQL's `Supported Character Sets and Collations <https://dev.mysql.com/doc/refman/5.7/en/charset-charsets.html>`_ and Postgres' `Character Set Support <https://www.postgresql.org/docs/9.6/static/multibyte.html>`_ for more details and supported values. Postgres databases only support a value of 'UTF8' at creation time. Docs at Terraform Registry: {@link https://www.terraform.io/docs/providers/google/r/sql_database#charset SqlDatabase#charset}
:param collation: The collation value. See MySQL's `Supported Character Sets and Collations <https://dev.mysql.com/doc/refman/5.7/en/charset-charsets.html>`_ and Postgres' `Collation Support <https://www.postgresql.org/docs/9.6/static/collation.html>`_ for more details and supported values. Postgres databases only support a value of 'en_US.UTF8' at creation time. Docs at Terraform Registry: {@link https://www.terraform.io/docs/providers/google/r/sql_database#collation SqlDatabase#collation}
:param id: Docs at Terraform Registry: {@link https://www.terraform.io/docs/providers/google/r/sql_database#id SqlDatabase#id}. Please be aware that the id field is automatically added to all resources in Terraform providers using a Terraform provider SDK version below 2. If you experience problems setting this value it might not be settable. Please take a look at the provider documentation to ensure it should be settable.
:param project: Docs at Terraform Registry: {@link https://www.terraform.io/docs/providers/google/r/sql_database#project SqlDatabase#project}.
:param timeouts: timeouts block. Docs at Terraform Registry: {@link https://www.terraform.io/docs/providers/google/r/sql_database#timeouts SqlDatabase#timeouts}
:param connection:
:param count:
:param depends_on:
:param for_each:
:param lifecycle:
:param provider:
:param provisioners:
'''
if __debug__:
type_hints = typing.get_type_hints(SqlDatabase.__init__)
check_type(argname="argument scope", value=scope, expected_type=type_hints["scope"])
check_type(argname="argument id_", value=id_, expected_type=type_hints["id_"])
config = SqlDatabaseConfig(
instance=instance,
name=name,
charset=charset,
collation=collation,
id=id,
project=project,
timeouts=timeouts,
connection=connection,
count=count,
depends_on=depends_on,
for_each=for_each,
lifecycle=lifecycle,
provider=provider,
provisioners=provisioners,
)
jsii.create(self.__class__, self, [scope, id_, config])
@jsii.member(jsii_name="putTimeouts")
def put_timeouts(
self,
*,
create: typing.Optional[builtins.str] = None,
delete: typing.Optional[builtins.str] = None,
update: typing.Optional[builtins.str] = None,
) -> None:
'''
:param create: Docs at Terraform Registry: {@link https://www.terraform.io/docs/providers/google/r/sql_database#create SqlDatabase#create}.
:param delete: Docs at Terraform Registry: {@link https://www.terraform.io/docs/providers/google/r/sql_database#delete SqlDatabase#delete}.
:param update: Docs at Terraform Registry: {@link https://www.terraform.io/docs/providers/google/r/sql_database#update SqlDatabase#update}.
'''
value = SqlDatabaseTimeouts(create=create, delete=delete, update=update)
return typing.cast(None, jsii.invoke(self, "putTimeouts", [value]))
@jsii.member(jsii_name="resetCharset")
def reset_charset(self) -> None:
return typing.cast(None, jsii.invoke(self, "resetCharset", []))
@jsii.member(jsii_name="resetCollation")
def reset_collation(self) -> None:
return typing.cast(None, jsii.invoke(self, "resetCollation", []))
@jsii.member(jsii_name="resetId")
def reset_id(self) -> None:
return typing.cast(None, jsii.invoke(self, "resetId", []))
@jsii.member(jsii_name="resetProject")
def reset_project(self) -> None:
return typing.cast(None, jsii.invoke(self, "resetProject", []))
@jsii.member(jsii_name="resetTimeouts")
def reset_timeouts(self) -> None:
return typing.cast(None, jsii.invoke(self, "resetTimeouts", []))
@jsii.member(jsii_name="synthesizeAttributes")
def _synthesize_attributes(self) -> typing.Mapping[builtins.str, typing.Any]:
return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.invoke(self, "synthesizeAttributes", []))
@jsii.python.classproperty
@jsii.member(jsii_name="tfResourceType")
def TF_RESOURCE_TYPE(cls) -> builtins.str:
return typing.cast(builtins.str, jsii.sget(cls, "tfResourceType"))
@builtins.property
@jsii.member(jsii_name="selfLink")
def self_link(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "selfLink"))
@builtins.property
@jsii.member(jsii_name="timeouts")
def timeouts(self) -> "SqlDatabaseTimeoutsOutputReference":
return typing.cast("SqlDatabaseTimeoutsOutputReference", jsii.get(self, "timeouts"))
@builtins.property
@jsii.member(jsii_name="charsetInput")
def charset_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "charsetInput"))
@builtins.property
@jsii.member(jsii_name="collationInput")
def collation_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "collationInput"))
@builtins.property
@jsii.member(jsii_name="idInput")
def id_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "idInput"))
@builtins.property
@jsii.member(jsii_name="instanceInput")
def instance_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "instanceInput"))
@builtins.property
@jsii.member(jsii_name="nameInput")
def name_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "nameInput"))
@builtins.property
@jsii.member(jsii_name="projectInput")
def project_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "projectInput"))
@builtins.property
@jsii.member(jsii_name="timeoutsInput")
def timeouts_input(
self,
) -> typing.Optional[typing.Union["SqlDatabaseTimeouts", cdktf.IResolvable]]:
return typing.cast(typing.Optional[typing.Union["SqlDatabaseTimeouts", cdktf.IResolvable]], jsii.get(self, "timeoutsInput"))
@builtins.property
@jsii.member(jsii_name="charset")
def charset(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "charset"))
@charset.setter
def charset(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(getattr(SqlDatabase, "charset").fset)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "charset", value)
@builtins.property
@jsii.member(jsii_name="collation")
def collation(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "collation"))
@collation.setter
def collation(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(getattr(SqlDatabase, "collation").fset)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "collation", value)
@builtins.property
@jsii.member(jsii_name="id")
def id(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "id"))
@id.setter
def id(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(getattr(SqlDatabase, "id").fset)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "id", value)
@builtins.property
@jsii.member(jsii_name="instance")
def instance(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "instance"))
@instance.setter
def instance(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(getattr(SqlDatabase, "instance").fset)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "instance", value)
@builtins.property
@jsii.member(jsii_name="name")
def name(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "name"))
@name.setter
def name(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(getattr(SqlDatabase, "name").fset)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "name", value)
@builtins.property
@jsii.member(jsii_name="project")
def project(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "project"))
@project.setter
def project(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(getattr(SqlDatabase, "project").fset)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "project", value)
@jsii.data_type(
jsii_type="@dschmidt/provider-google.sqlDatabase.SqlDatabaseConfig",
jsii_struct_bases=[cdktf.TerraformMetaArguments],
name_mapping={
"connection": "connection",
"count": "count",
"depends_on": "dependsOn",
"for_each": "forEach",
"lifecycle": "lifecycle",
"provider": "provider",
"provisioners": "provisioners",
"instance": "instance",
"name": "name",
"charset": "charset",
"collation": "collation",
"id": "id",
"project": "project",
"timeouts": "timeouts",
},
)
class SqlDatabaseConfig(cdktf.TerraformMetaArguments):
def __init__(
self,
*,
connection: typing.Optional[typing.Union[typing.Union[cdktf.SSHProvisionerConnection, typing.Dict[str, typing.Any]], typing.Union[cdktf.WinrmProvisionerConnection, typing.Dict[str, typing.Any]]]] = None,
count: typing.Optional[jsii.Number] = None,
depends_on: typing.Optional[typing.Sequence[cdktf.ITerraformDependable]] = None,
for_each: typing.Optional[cdktf.ITerraformIterator] = None,
lifecycle: typing.Optional[typing.Union[cdktf.TerraformResourceLifecycle, typing.Dict[str, typing.Any]]] = None,
provider: typing.Optional[cdktf.TerraformProvider] = None,
provisioners: typing.Optional[typing.Sequence[typing.Union[typing.Union[cdktf.FileProvisioner, typing.Dict[str, typing.Any]], typing.Union[cdktf.LocalExecProvisioner, typing.Dict[str, typing.Any]], typing.Union[cdktf.RemoteExecProvisioner, typing.Dict[str, typing.Any]]]]] = None,
instance: builtins.str,
name: builtins.str,
charset: typing.Optional[builtins.str] = None,
collation: typing.Optional[builtins.str] = None,
id: typing.Optional[builtins.str] = None,
project: typing.Optional[builtins.str] = None,
timeouts: typing.Optional[typing.Union["SqlDatabaseTimeouts", typing.Dict[str, typing.Any]]] = None,
) -> None:
'''
:param connection:
:param count:
:param depends_on:
:param for_each:
:param lifecycle:
:param provider:
:param provisioners:
:param instance: The name of the Cloud SQL instance. This does not include the project ID. Docs at Terraform Registry: {@link https://www.terraform.io/docs/providers/google/r/sql_database#instance SqlDatabase#instance}
:param name: The name of the database in the Cloud SQL instance. This does not include the project ID or instance name. Docs at Terraform Registry: {@link https://www.terraform.io/docs/providers/google/r/sql_database#name SqlDatabase#name}
:param charset: The charset value. See MySQL's `Supported Character Sets and Collations <https://dev.mysql.com/doc/refman/5.7/en/charset-charsets.html>`_ and Postgres' `Character Set Support <https://www.postgresql.org/docs/9.6/static/multibyte.html>`_ for more details and supported values. Postgres databases only support a value of 'UTF8' at creation time. Docs at Terraform Registry: {@link https://www.terraform.io/docs/providers/google/r/sql_database#charset SqlDatabase#charset}
:param collation: The collation value. See MySQL's `Supported Character Sets and Collations <https://dev.mysql.com/doc/refman/5.7/en/charset-charsets.html>`_ and Postgres' `Collation Support <https://www.postgresql.org/docs/9.6/static/collation.html>`_ for more details and supported values. Postgres databases only support a value of 'en_US.UTF8' at creation time. Docs at Terraform Registry: {@link https://www.terraform.io/docs/providers/google/r/sql_database#collation SqlDatabase#collation}
:param id: Docs at Terraform Registry: {@link https://www.terraform.io/docs/providers/google/r/sql_database#id SqlDatabase#id}. Please be aware that the id field is automatically added to all resources in Terraform providers using a Terraform provider SDK version below 2. If you experience problems setting this value it might not be settable. Please take a look at the provider documentation to ensure it should be settable.
:param project: Docs at Terraform Registry: {@link https://www.terraform.io/docs/providers/google/r/sql_database#project SqlDatabase#project}.
:param timeouts: timeouts block. Docs at Terraform Registry: {@link https://www.terraform.io/docs/providers/google/r/sql_database#timeouts SqlDatabase#timeouts}
'''
if isinstance(lifecycle, dict):
lifecycle = cdktf.TerraformResourceLifecycle(**lifecycle)
if isinstance(timeouts, dict):
timeouts = SqlDatabaseTimeouts(**timeouts)
if __debug__:
type_hints = typing.get_type_hints(SqlDatabaseConfig.__init__)
check_type(argname="argument connection", value=connection, expected_type=type_hints["connection"])
check_type(argname="argument count", value=count, expected_type=type_hints["count"])
check_type(argname="argument depends_on", value=depends_on, expected_type=type_hints["depends_on"])
check_type(argname="argument for_each", value=for_each, expected_type=type_hints["for_each"])
check_type(argname="argument lifecycle", value=lifecycle, expected_type=type_hints["lifecycle"])
check_type(argname="argument provider", value=provider, expected_type=type_hints["provider"])
check_type(argname="argument provisioners", value=provisioners, expected_type=type_hints["provisioners"])
check_type(argname="argument instance", value=instance, expected_type=type_hints["instance"])
check_type(argname="argument name", value=name, expected_type=type_hints["name"])
check_type(argname="argument charset", value=charset, expected_type=type_hints["charset"])
check_type(argname="argument collation", value=collation, expected_type=type_hints["collation"])
check_type(argname="argument id", value=id, expected_type=type_hints["id"])
check_type(argname="argument project", value=project, expected_type=type_hints["project"])
check_type(argname="argument timeouts", value=timeouts, expected_type=type_hints["timeouts"])
self._values: typing.Dict[str, typing.Any] = {
"instance": instance,
"name": name,
}
if connection is not None:
self._values["connection"] = connection
if count is not None:
self._values["count"] = count
if depends_on is not None:
self._values["depends_on"] = depends_on
if for_each is not None:
self._values["for_each"] = for_each
if lifecycle is not None:
self._values["lifecycle"] = lifecycle
if provider is not None:
self._values["provider"] = provider
if provisioners is not None:
self._values["provisioners"] = provisioners
if charset is not None:
self._values["charset"] = charset
if collation is not None:
self._values["collation"] = collation
if id is not None:
self._values["id"] = id
if project is not None:
self._values["project"] = project
if timeouts is not None:
self._values["timeouts"] = timeouts
@builtins.property
def connection(
self,
) -> typing.Optional[typing.Union[cdktf.SSHProvisionerConnection, cdktf.WinrmProvisionerConnection]]:
'''
:stability: experimental
'''
result = self._values.get("connection")
return typing.cast(typing.Optional[typing.Union[cdktf.SSHProvisionerConnection, cdktf.WinrmProvisionerConnection]], result)
@builtins.property
def count(self) -> typing.Optional[jsii.Number]:
'''
:stability: experimental
'''
result = self._values.get("count")
return typing.cast(typing.Optional[jsii.Number], result)
@builtins.property
def depends_on(self) -> typing.Optional[typing.List[cdktf.ITerraformDependable]]:
'''
:stability: experimental
'''
result = self._values.get("depends_on")
return typing.cast(typing.Optional[typing.List[cdktf.ITerraformDependable]], result)
@builtins.property
def for_each(self) -> typing.Optional[cdktf.ITerraformIterator]:
'''
:stability: experimental
'''
result = self._values.get("for_each")
return typing.cast(typing.Optional[cdktf.ITerraformIterator], result)
@builtins.property
def lifecycle(self) -> typing.Optional[cdktf.TerraformResourceLifecycle]:
'''
:stability: experimental
'''
result = self._values.get("lifecycle")
return typing.cast(typing.Optional[cdktf.TerraformResourceLifecycle], result)
@builtins.property
def provider(self) -> typing.Optional[cdktf.TerraformProvider]:
'''
:stability: experimental
'''
result = self._values.get("provider")
return typing.cast(typing.Optional[cdktf.TerraformProvider], result)
@builtins.property
def provisioners(
self,
) -> typing.Optional[typing.List[typing.Union[cdktf.FileProvisioner, cdktf.LocalExecProvisioner, cdktf.RemoteExecProvisioner]]]:
'''
:stability: experimental
'''
result = self._values.get("provisioners")
return typing.cast(typing.Optional[typing.List[typing.Union[cdktf.FileProvisioner, cdktf.LocalExecProvisioner, cdktf.RemoteExecProvisioner]]], result)
@builtins.property
def instance(self) -> builtins.str:
'''The name of the Cloud SQL instance. This does not include the project ID.
Docs at Terraform Registry: {@link https://www.terraform.io/docs/providers/google/r/sql_database#instance SqlDatabase#instance}
'''
result = self._values.get("instance")
assert result is not None, "Required property 'instance' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def name(self) -> builtins.str:
'''The name of the database in the Cloud SQL instance. This does not include the project ID or instance name.
Docs at Terraform Registry: {@link https://www.terraform.io/docs/providers/google/r/sql_database#name SqlDatabase#name}
'''
result = self._values.get("name")
assert result is not None, "Required property 'name' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def charset(self) -> typing.Optional[builtins.str]:
'''The charset value.
See MySQL's
`Supported Character Sets and Collations <https://dev.mysql.com/doc/refman/5.7/en/charset-charsets.html>`_
and Postgres' `Character Set Support <https://www.postgresql.org/docs/9.6/static/multibyte.html>`_
for more details and supported values. Postgres databases only support
a value of 'UTF8' at creation time.
Docs at Terraform Registry: {@link https://www.terraform.io/docs/providers/google/r/sql_database#charset SqlDatabase#charset}
'''
result = self._values.get("charset")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def collation(self) -> typing.Optional[builtins.str]:
'''The collation value.
See MySQL's
`Supported Character Sets and Collations <https://dev.mysql.com/doc/refman/5.7/en/charset-charsets.html>`_
and Postgres' `Collation Support <https://www.postgresql.org/docs/9.6/static/collation.html>`_
for more details and supported values. Postgres databases only support
a value of 'en_US.UTF8' at creation time.
Docs at Terraform Registry: {@link https://www.terraform.io/docs/providers/google/r/sql_database#collation SqlDatabase#collation}
'''
result = self._values.get("collation")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def id(self) -> typing.Optional[builtins.str]:
'''Docs at Terraform Registry: {@link https://www.terraform.io/docs/providers/google/r/sql_database#id SqlDatabase#id}.
Please be aware that the id field is automatically added to all resources in Terraform providers using a Terraform provider SDK version below 2.
If you experience problems setting this value it might not be settable. Please take a look at the provider documentation to ensure it should be settable.
'''
result = self._values.get("id")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def project(self) -> typing.Optional[builtins.str]:
'''Docs at Terraform Registry: {@link https://www.terraform.io/docs/providers/google/r/sql_database#project SqlDatabase#project}.'''
result = self._values.get("project")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def timeouts(self) -> typing.Optional["SqlDatabaseTimeouts"]:
'''timeouts block.
Docs at Terraform Registry: {@link https://www.terraform.io/docs/providers/google/r/sql_database#timeouts SqlDatabase#timeouts}
'''
result = self._values.get("timeouts")
return typing.cast(typing.Optional["SqlDatabaseTimeouts"], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "SqlDatabaseConfig(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="@dschmidt/provider-google.sqlDatabase.SqlDatabaseTimeouts",
jsii_struct_bases=[],
name_mapping={"create": "create", "delete": "delete", "update": "update"},
)
class SqlDatabaseTimeouts:
def __init__(
self,
*,
create: typing.Optional[builtins.str] = None,
delete: typing.Optional[builtins.str] = None,
update: typing.Optional[builtins.str] = None,
) -> None:
'''
:param create: Docs at Terraform Registry: {@link https://www.terraform.io/docs/providers/google/r/sql_database#create SqlDatabase#create}.
:param delete: Docs at Terraform Registry: {@link https://www.terraform.io/docs/providers/google/r/sql_database#delete SqlDatabase#delete}.
:param update: Docs at Terraform Registry: {@link https://www.terraform.io/docs/providers/google/r/sql_database#update SqlDatabase#update}.
'''
if __debug__:
type_hints = typing.get_type_hints(SqlDatabaseTimeouts.__init__)
check_type(argname="argument create", value=create, expected_type=type_hints["create"])
check_type(argname="argument delete", value=delete, expected_type=type_hints["delete"])
check_type(argname="argument update", value=update, expected_type=type_hints["update"])
self._values: typing.Dict[str, typing.Any] = {}
if create is not None:
self._values["create"] = create
if delete is not None:
self._values["delete"] = delete
if update is not None:
self._values["update"] = update
@builtins.property
def create(self) -> typing.Optional[builtins.str]:
'''Docs at Terraform Registry: {@link https://www.terraform.io/docs/providers/google/r/sql_database#create SqlDatabase#create}.'''
result = self._values.get("create")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def delete(self) -> typing.Optional[builtins.str]:
'''Docs at Terraform Registry: {@link https://www.terraform.io/docs/providers/google/r/sql_database#delete SqlDatabase#delete}.'''
result = self._values.get("delete")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def update(self) -> typing.Optional[builtins.str]:
'''Docs at Terraform Registry: {@link https://www.terraform.io/docs/providers/google/r/sql_database#update SqlDatabase#update}.'''
result = self._values.get("update")
return typing.cast(typing.Optional[builtins.str], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "SqlDatabaseTimeouts(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
class SqlDatabaseTimeoutsOutputReference(
cdktf.ComplexObject,
metaclass=jsii.JSIIMeta,
jsii_type="@dschmidt/provider-google.sqlDatabase.SqlDatabaseTimeoutsOutputReference",
):
def __init__(
self,
terraform_resource: cdktf.IInterpolatingParent,
terraform_attribute: builtins.str,
) -> None:
'''
:param terraform_resource: The parent resource.
:param terraform_attribute: The attribute on the parent resource this class is referencing.
'''
if __debug__:
type_hints = typing.get_type_hints(SqlDatabaseTimeoutsOutputReference.__init__)
check_type(argname="argument terraform_resource", value=terraform_resource, expected_type=type_hints["terraform_resource"])
check_type(argname="argument terraform_attribute", value=terraform_attribute, expected_type=type_hints["terraform_attribute"])
jsii.create(self.__class__, self, [terraform_resource, terraform_attribute])
@jsii.member(jsii_name="resetCreate")
def reset_create(self) -> None:
return typing.cast(None, jsii.invoke(self, "resetCreate", []))
@jsii.member(jsii_name="resetDelete")
def reset_delete(self) -> None:
return typing.cast(None, jsii.invoke(self, "resetDelete", []))
@jsii.member(jsii_name="resetUpdate")
def reset_update(self) -> None:
return typing.cast(None, jsii.invoke(self, "resetUpdate", []))
@builtins.property
@jsii.member(jsii_name="createInput")
def create_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "createInput"))
@builtins.property
@jsii.member(jsii_name="deleteInput")
def delete_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "deleteInput"))
@builtins.property
@jsii.member(jsii_name="updateInput")
def update_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "updateInput"))
@builtins.property
@jsii.member(jsii_name="create")
def create(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "create"))
@create.setter
def create(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(getattr(SqlDatabaseTimeoutsOutputReference, "create").fset)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "create", value)
@builtins.property
@jsii.member(jsii_name="delete")
def delete(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "delete"))
@delete.setter
def delete(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(getattr(SqlDatabaseTimeoutsOutputReference, "delete").fset)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "delete", value)
@builtins.property
@jsii.member(jsii_name="update")
def update(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "update"))
@update.setter
def update(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(getattr(SqlDatabaseTimeoutsOutputReference, "update").fset)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "update", value)
@builtins.property
@jsii.member(jsii_name="internalValue")
def internal_value(
self,
) -> typing.Optional[typing.Union[SqlDatabaseTimeouts, cdktf.IResolvable]]:
return typing.cast(typing.Optional[typing.Union[SqlDatabaseTimeouts, cdktf.IResolvable]], jsii.get(self, "internalValue"))
@internal_value.setter
def internal_value(
self,
value: typing.Optional[typing.Union[SqlDatabaseTimeouts, cdktf.IResolvable]],
) -> None:
if __debug__:
type_hints = typing.get_type_hints(getattr(SqlDatabaseTimeoutsOutputReference, "internal_value").fset)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "internalValue", value)
__all__ = [
"SqlDatabase",
"SqlDatabaseConfig",
"SqlDatabaseTimeouts",
"SqlDatabaseTimeoutsOutputReference",
]
publication.publish() | PypiClean |
/linkuxit-portafolio-0.14.tar.gz/linkuxit-portafolio-0.14/linkuxit/portafolio/models.py | from django.conf import settings
from django.db import models
from django.utils.html import strip_tags
from django.utils.translation import ugettext_lazy as _
from cms.models.pluginmodel import CMSPlugin
from djangocms_text_ckeditor.fields import HTMLField
from filer.fields.image import FilerImageField
SOCIAL_NETWORK_CHOICES = (
('facebook', 'Facebook'),
('google-plus', 'Google+'),
('linkedin', 'LinkedIn'),
('rss', 'RSS'),
('share-this', 'ShareThis'),
('skype', 'Skype'),
('twitter', 'Twitter'),
('stackoverflow', 'Stack Overflow'),
('github', 'Github'),
)
class TeamMember(models.Model):
""" Member of a team"""
photo = FilerImageField(verbose_name=_('Photo'), null=True, blank=True, default=None)
full_name = models.CharField(_('Full Name'), max_length=250)
slug = models.SlugField()
is_active = models.BooleanField(_('Is active'), default=True)
position = models.CharField(verbose_name=_('Position'), max_length=100)
strong_skills = models.CharField(_('Strong skills'), max_length=50)
summary = HTMLField(verbose_name=_('Summary'))
created_at = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return self.full_name
class Meta:
ordering = ('created_at',)
class SocialLink(models.Model):
"""Social link"""
member = models.ForeignKey(TeamMember, related_name='social_links', verbose_name=_('Member'))
website = models.CharField(verbose_name=_('Website'), max_length=250, choices=SOCIAL_NETWORK_CHOICES)
url = models.URLField(verbose_name=_('URL'))
def __unicode__(self):
return u'%s\'s %s' % (self.member.name, self.website)
class Service(models.Model):
title = models.CharField(verbose_name=_('Title'), max_length=100)
slug = models.SlugField()
html_class = models.CharField(verbose_name=_('Html Class'), blank=True, max_length=30, help_text=_('Use to add some font icon or to customize the item'), default='')
description = HTMLField(verbose_name=_('Description'))
is_featured = models.BooleanField(verbose_name=_('Is featured'), default=False)
order = models.PositiveIntegerField(default=0)
created_at = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return self.title
class Meta:
ordering = ('order', 'created_at')
class Client(models.Model):
name = models.CharField(max_length=100)
slug = models.SlugField()
logo = FilerImageField(verbose_name=_('Logo'), null=True)
description = HTMLField(verbose_name=_('Description'))
is_featured = models.BooleanField(verbose_name=_('Is featured'), default=False)
def __unicode__(self):
return self.name
class Country(models.Model):
name = models.CharField(max_length=100)
def __unicode__(self):
return self.name
class Meta:
verbose_name_plural = 'Countries'
class Project(models.Model):
name = models.CharField(max_length=100, verbose_name=_('name'))
slug = models.SlugField()
description = HTMLField(verbose_name=_('Description'))
url = models.URLField(blank=True)
created_at = models.DateTimeField(auto_now_add=True)
is_featured = models.BooleanField(verbose_name=_('Is featured'), default=False)
main_photo = FilerImageField(verbose_name=_('Photo'))
country = models.ForeignKey(Country, related_name="projects", verbose_name=_('Country'))
client = models.ForeignKey(Client, related_name="projects", verbose_name=_('Client'))
developers = models.ManyToManyField(TeamMember, related_name='projects')
services = models.ManyToManyField(Service, related_name="projects", verbose_name=_('Service'))
def __unicode__(self):
return self.name
class Meta:
ordering = ('created_at',)
class Image(models.Model):
image = FilerImageField(null=True, blank=True, default=None, verbose_name=_('Photo'))
description = HTMLField(verbose_name=_('Description'))
project = models.ForeignKey(Project, related_name="images")
created_at = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ('created_at',)
class Team(models.Model):
name = models.CharField(verbose_name=_('Team name'), max_length=250)
slug = models.SlugField(default='')
description = HTMLField(verbose_name=_('Description'), default='')
members = models.ManyToManyField(TeamMember, related_name='teams', verbose_name=_('Members'))
def __unicode__(self):
return self.name
class TeamPlugin(CMSPlugin):
"""Team plugin"""
team = models.ForeignKey(Team, related_name='plugins', verbose_name=_('team'))
def __unicode__(self):
return self.team.name
class PortfolioPlugin(CMSPlugin):
pass
class Testimonial(models.Model):
testimonial = models.CharField(verbose_name=_('Testimonial'), max_length=300)
client = models.ForeignKey(Client, related_name="testimonials", verbose_name=_('Client'))
is_active = models.BooleanField(default=False)
class Meta:
verbose_name = _('Testimonial')
verbose_name_plural = _('Testimonials')
def __unicode__(self):
return self.testimonial[:30] | PypiClean |
/gamification-engine-0.4.0.tar.gz/gamification-engine-0.4.0/gengine/app/jsscripts/node_modules/ajv/lib/dotjs/custom.js | 'use strict';
module.exports = function generate_custom(it, $keyword) {
var out = ' ';
var $lvl = it.level;
var $dataLvl = it.dataLevel;
var $schema = it.schema[$keyword];
var $schemaPath = it.schemaPath + it.util.getProperty($keyword);
var $errSchemaPath = it.errSchemaPath + '/' + $keyword;
var $breakOnError = !it.opts.allErrors;
var $errorKeyword;
var $data = 'data' + ($dataLvl || '');
var $valid = 'valid' + $lvl;
var $errs = 'errs__' + $lvl;
var $isData = it.opts.v5 && $schema && $schema.$data,
$schemaValue;
if ($isData) {
out += ' var schema' + ($lvl) + ' = ' + (it.util.getData($schema.$data, $dataLvl, it.dataPathArr)) + '; ';
$schemaValue = 'schema' + $lvl;
} else {
$schemaValue = $schema;
}
var $rule = this,
$definition = 'definition' + $lvl,
$rDef = $rule.definition;
var $compile, $inline, $macro, $ruleValidate, $validateCode;
if ($isData && $rDef.$data) {
$validateCode = 'keywordValidate' + $lvl;
var $validateSchema = $rDef.validateSchema;
out += ' var ' + ($definition) + ' = RULES.custom[\'' + ($keyword) + '\'].definition; var ' + ($validateCode) + ' = ' + ($definition) + '.validate;';
} else {
$ruleValidate = it.useCustomRule($rule, $schema, it.schema, it);
$schemaValue = 'validate.schema' + $schemaPath;
$validateCode = $ruleValidate.code;
$compile = $rDef.compile;
$inline = $rDef.inline;
$macro = $rDef.macro;
}
var $ruleErrs = $validateCode + '.errors',
$i = 'i' + $lvl,
$ruleErr = 'ruleErr' + $lvl,
$asyncKeyword = $rDef.async;
if ($asyncKeyword && !it.async) throw new Error('async keyword in sync schema');
if (!($inline || $macro)) {
out += '' + ($ruleErrs) + ' = null;';
}
out += 'var ' + ($errs) + ' = errors;var ' + ($valid) + ';';
if ($validateSchema) {
out += ' ' + ($valid) + ' = ' + ($definition) + '.validateSchema(' + ($schemaValue) + '); if (' + ($valid) + ') {';
}
if ($inline) {
if ($rDef.statements) {
out += ' ' + ($ruleValidate.validate) + ' ';
} else {
out += ' ' + ($valid) + ' = ' + ($ruleValidate.validate) + '; ';
}
} else if ($macro) {
var $it = it.util.copy(it);
$it.level++;
var $nextValid = 'valid' + $it.level;
$it.schema = $ruleValidate.validate;
$it.schemaPath = '';
var $wasComposite = it.compositeRule;
it.compositeRule = $it.compositeRule = true;
var $code = it.validate($it).replace(/validate\.schema/g, $validateCode);
it.compositeRule = $it.compositeRule = $wasComposite;
out += ' ' + ($code);
} else {
var $$outStack = $$outStack || [];
$$outStack.push(out);
out = '';
out += ' ' + ($validateCode) + '.call( ';
if (it.opts.passContext) {
out += 'this';
} else {
out += 'self';
}
if ($compile || $rDef.schema === false) {
out += ' , ' + ($data) + ' ';
} else {
out += ' , ' + ($schemaValue) + ' , ' + ($data) + ' , validate.schema' + (it.schemaPath) + ' ';
}
out += ' , (dataPath || \'\')';
if (it.errorPath != '""') {
out += ' + ' + (it.errorPath);
}
var $parentData = $dataLvl ? 'data' + (($dataLvl - 1) || '') : 'parentData',
$parentDataProperty = $dataLvl ? it.dataPathArr[$dataLvl] : 'parentDataProperty';
out += ' , ' + ($parentData) + ' , ' + ($parentDataProperty) + ' , rootData ) ';
var def_callRuleValidate = out;
out = $$outStack.pop();
if ($rDef.errors === false) {
out += ' ' + ($valid) + ' = ';
if ($asyncKeyword) {
out += '' + (it.yieldAwait);
}
out += '' + (def_callRuleValidate) + '; ';
} else {
if ($asyncKeyword) {
$ruleErrs = 'customErrors' + $lvl;
out += ' var ' + ($ruleErrs) + ' = null; try { ' + ($valid) + ' = ' + (it.yieldAwait) + (def_callRuleValidate) + '; } catch (e) { ' + ($valid) + ' = false; if (e instanceof ValidationError) ' + ($ruleErrs) + ' = e.errors; else throw e; } ';
} else {
out += ' ' + ($ruleErrs) + ' = null; ' + ($valid) + ' = ' + (def_callRuleValidate) + '; ';
}
}
}
if ($rDef.modifying) {
out += ' ' + ($data) + ' = ' + ($parentData) + '[' + ($parentDataProperty) + '];';
}
if ($validateSchema) {
out += ' }';
}
if ($rDef.valid) {
if ($breakOnError) {
out += ' if (true) { ';
}
} else {
out += ' if ( ';
if ($rDef.valid === undefined) {
out += ' !';
if ($macro) {
out += '' + ($nextValid);
} else {
out += '' + ($valid);
}
} else {
out += ' ' + (!$rDef.valid) + ' ';
}
out += ') { ';
$errorKeyword = $rule.keyword;
var $$outStack = $$outStack || [];
$$outStack.push(out);
out = '';
var $$outStack = $$outStack || [];
$$outStack.push(out);
out = ''; /* istanbul ignore else */
if (it.createErrors !== false) {
out += ' { keyword: \'' + ($errorKeyword || 'custom') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { keyword: \'' + ($rule.keyword) + '\' } ';
if (it.opts.messages !== false) {
out += ' , message: \'should pass "' + ($rule.keyword) + '" keyword validation\' ';
}
if (it.opts.verbose) {
out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' ';
}
out += ' } ';
} else {
out += ' {} ';
}
var __err = out;
out = $$outStack.pop();
if (!it.compositeRule && $breakOnError) { /* istanbul ignore if */
if (it.async) {
out += ' throw new ValidationError([' + (__err) + ']); ';
} else {
out += ' validate.errors = [' + (__err) + ']; return false; ';
}
} else {
out += ' var err = ' + (__err) + '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; ';
}
var def_customError = out;
out = $$outStack.pop();
if ($inline) {
if ($rDef.errors) {
if ($rDef.errors != 'full') {
out += ' for (var ' + ($i) + '=' + ($errs) + '; ' + ($i) + '<errors; ' + ($i) + '++) { var ' + ($ruleErr) + ' = vErrors[' + ($i) + ']; if (' + ($ruleErr) + '.dataPath === undefined) ' + ($ruleErr) + '.dataPath = (dataPath || \'\') + ' + (it.errorPath) + '; if (' + ($ruleErr) + '.schemaPath === undefined) { ' + ($ruleErr) + '.schemaPath = "' + ($errSchemaPath) + '"; } ';
if (it.opts.verbose) {
out += ' ' + ($ruleErr) + '.schema = ' + ($schemaValue) + '; ' + ($ruleErr) + '.data = ' + ($data) + '; ';
}
out += ' } ';
}
} else {
if ($rDef.errors === false) {
out += ' ' + (def_customError) + ' ';
} else {
out += ' if (' + ($errs) + ' == errors) { ' + (def_customError) + ' } else { for (var ' + ($i) + '=' + ($errs) + '; ' + ($i) + '<errors; ' + ($i) + '++) { var ' + ($ruleErr) + ' = vErrors[' + ($i) + ']; if (' + ($ruleErr) + '.dataPath === undefined) ' + ($ruleErr) + '.dataPath = (dataPath || \'\') + ' + (it.errorPath) + '; if (' + ($ruleErr) + '.schemaPath === undefined) { ' + ($ruleErr) + '.schemaPath = "' + ($errSchemaPath) + '"; } ';
if (it.opts.verbose) {
out += ' ' + ($ruleErr) + '.schema = ' + ($schemaValue) + '; ' + ($ruleErr) + '.data = ' + ($data) + '; ';
}
out += ' } } ';
}
}
} else if ($macro) {
out += ' var err = '; /* istanbul ignore else */
if (it.createErrors !== false) {
out += ' { keyword: \'' + ($errorKeyword || 'custom') + '\' , dataPath: (dataPath || \'\') + ' + (it.errorPath) + ' , schemaPath: ' + (it.util.toQuotedString($errSchemaPath)) + ' , params: { keyword: \'' + ($rule.keyword) + '\' } ';
if (it.opts.messages !== false) {
out += ' , message: \'should pass "' + ($rule.keyword) + '" keyword validation\' ';
}
if (it.opts.verbose) {
out += ' , schema: validate.schema' + ($schemaPath) + ' , parentSchema: validate.schema' + (it.schemaPath) + ' , data: ' + ($data) + ' ';
}
out += ' } ';
} else {
out += ' {} ';
}
out += '; if (vErrors === null) vErrors = [err]; else vErrors.push(err); errors++; ';
if (!it.compositeRule && $breakOnError) { /* istanbul ignore if */
if (it.async) {
out += ' throw new ValidationError(vErrors); ';
} else {
out += ' validate.errors = vErrors; return false; ';
}
}
} else {
if ($rDef.errors === false) {
out += ' ' + (def_customError) + ' ';
} else {
out += ' if (Array.isArray(' + ($ruleErrs) + ')) { if (vErrors === null) vErrors = ' + ($ruleErrs) + '; else vErrors = vErrors.concat(' + ($ruleErrs) + '); errors = vErrors.length; for (var ' + ($i) + '=' + ($errs) + '; ' + ($i) + '<errors; ' + ($i) + '++) { var ' + ($ruleErr) + ' = vErrors[' + ($i) + ']; if (' + ($ruleErr) + '.dataPath === undefined) ' + ($ruleErr) + '.dataPath = (dataPath || \'\') + ' + (it.errorPath) + '; ' + ($ruleErr) + '.schemaPath = "' + ($errSchemaPath) + '"; ';
if (it.opts.verbose) {
out += ' ' + ($ruleErr) + '.schema = ' + ($schemaValue) + '; ' + ($ruleErr) + '.data = ' + ($data) + '; ';
}
out += ' } } else { ' + (def_customError) + ' } ';
}
}
out += ' } ';
if ($breakOnError) {
out += ' else { ';
}
}
return out;
} | PypiClean |
/google-cloud-scheduler-2.11.1.tar.gz/google-cloud-scheduler-2.11.1/google/cloud/scheduler_v1beta1/services/cloud_scheduler/transports/rest.py |
import dataclasses
import json # type: ignore
import re
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import warnings
from google.api_core import gapic_v1, path_template, rest_helpers, rest_streaming
from google.api_core import exceptions as core_exceptions
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.transport.requests import AuthorizedSession # type: ignore
from google.cloud.location import locations_pb2 # type: ignore
from google.protobuf import json_format
import grpc # type: ignore
from requests import __version__ as requests_version
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.protobuf import empty_pb2 # type: ignore
from google.cloud.scheduler_v1beta1.types import cloudscheduler
from google.cloud.scheduler_v1beta1.types import job
from google.cloud.scheduler_v1beta1.types import job as gcs_job
from .base import CloudSchedulerTransport
from .base import DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version,
grpc_version=None,
rest_version=requests_version,
)
class CloudSchedulerRestInterceptor:
"""Interceptor for CloudScheduler.
Interceptors are used to manipulate requests, request metadata, and responses
in arbitrary ways.
Example use cases include:
* Logging
* Verifying requests according to service or custom semantics
* Stripping extraneous information from responses
These use cases and more can be enabled by injecting an
instance of a custom subclass when constructing the CloudSchedulerRestTransport.
.. code-block:: python
class MyCustomCloudSchedulerInterceptor(CloudSchedulerRestInterceptor):
def pre_create_job(self, request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_create_job(self, response):
logging.log(f"Received response: {response}")
return response
def pre_delete_job(self, request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def pre_get_job(self, request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_get_job(self, response):
logging.log(f"Received response: {response}")
return response
def pre_list_jobs(self, request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_list_jobs(self, response):
logging.log(f"Received response: {response}")
return response
def pre_pause_job(self, request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_pause_job(self, response):
logging.log(f"Received response: {response}")
return response
def pre_resume_job(self, request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_resume_job(self, response):
logging.log(f"Received response: {response}")
return response
def pre_run_job(self, request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_run_job(self, response):
logging.log(f"Received response: {response}")
return response
def pre_update_job(self, request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_update_job(self, response):
logging.log(f"Received response: {response}")
return response
transport = CloudSchedulerRestTransport(interceptor=MyCustomCloudSchedulerInterceptor())
client = CloudSchedulerClient(transport=transport)
"""
def pre_create_job(
self,
request: cloudscheduler.CreateJobRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[cloudscheduler.CreateJobRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for create_job
Override in a subclass to manipulate the request or metadata
before they are sent to the CloudScheduler server.
"""
return request, metadata
def post_create_job(self, response: gcs_job.Job) -> gcs_job.Job:
"""Post-rpc interceptor for create_job
Override in a subclass to manipulate the response
after it is returned by the CloudScheduler server but before
it is returned to user code.
"""
return response
def pre_delete_job(
self,
request: cloudscheduler.DeleteJobRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[cloudscheduler.DeleteJobRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for delete_job
Override in a subclass to manipulate the request or metadata
before they are sent to the CloudScheduler server.
"""
return request, metadata
def pre_get_job(
self, request: cloudscheduler.GetJobRequest, metadata: Sequence[Tuple[str, str]]
) -> Tuple[cloudscheduler.GetJobRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for get_job
Override in a subclass to manipulate the request or metadata
before they are sent to the CloudScheduler server.
"""
return request, metadata
def post_get_job(self, response: job.Job) -> job.Job:
"""Post-rpc interceptor for get_job
Override in a subclass to manipulate the response
after it is returned by the CloudScheduler server but before
it is returned to user code.
"""
return response
def pre_list_jobs(
self,
request: cloudscheduler.ListJobsRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[cloudscheduler.ListJobsRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for list_jobs
Override in a subclass to manipulate the request or metadata
before they are sent to the CloudScheduler server.
"""
return request, metadata
def post_list_jobs(
self, response: cloudscheduler.ListJobsResponse
) -> cloudscheduler.ListJobsResponse:
"""Post-rpc interceptor for list_jobs
Override in a subclass to manipulate the response
after it is returned by the CloudScheduler server but before
it is returned to user code.
"""
return response
def pre_pause_job(
self,
request: cloudscheduler.PauseJobRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[cloudscheduler.PauseJobRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for pause_job
Override in a subclass to manipulate the request or metadata
before they are sent to the CloudScheduler server.
"""
return request, metadata
def post_pause_job(self, response: job.Job) -> job.Job:
"""Post-rpc interceptor for pause_job
Override in a subclass to manipulate the response
after it is returned by the CloudScheduler server but before
it is returned to user code.
"""
return response
def pre_resume_job(
self,
request: cloudscheduler.ResumeJobRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[cloudscheduler.ResumeJobRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for resume_job
Override in a subclass to manipulate the request or metadata
before they are sent to the CloudScheduler server.
"""
return request, metadata
def post_resume_job(self, response: job.Job) -> job.Job:
"""Post-rpc interceptor for resume_job
Override in a subclass to manipulate the response
after it is returned by the CloudScheduler server but before
it is returned to user code.
"""
return response
def pre_run_job(
self, request: cloudscheduler.RunJobRequest, metadata: Sequence[Tuple[str, str]]
) -> Tuple[cloudscheduler.RunJobRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for run_job
Override in a subclass to manipulate the request or metadata
before they are sent to the CloudScheduler server.
"""
return request, metadata
def post_run_job(self, response: job.Job) -> job.Job:
"""Post-rpc interceptor for run_job
Override in a subclass to manipulate the response
after it is returned by the CloudScheduler server but before
it is returned to user code.
"""
return response
def pre_update_job(
self,
request: cloudscheduler.UpdateJobRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[cloudscheduler.UpdateJobRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for update_job
Override in a subclass to manipulate the request or metadata
before they are sent to the CloudScheduler server.
"""
return request, metadata
def post_update_job(self, response: gcs_job.Job) -> gcs_job.Job:
"""Post-rpc interceptor for update_job
Override in a subclass to manipulate the response
after it is returned by the CloudScheduler server but before
it is returned to user code.
"""
return response
def pre_get_location(
self,
request: locations_pb2.GetLocationRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[locations_pb2.GetLocationRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for get_location
Override in a subclass to manipulate the request or metadata
before they are sent to the CloudScheduler server.
"""
return request, metadata
def post_get_location(
self, response: locations_pb2.Location
) -> locations_pb2.Location:
"""Post-rpc interceptor for get_location
Override in a subclass to manipulate the response
after it is returned by the CloudScheduler server but before
it is returned to user code.
"""
return response
def pre_list_locations(
self,
request: locations_pb2.ListLocationsRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[locations_pb2.ListLocationsRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for list_locations
Override in a subclass to manipulate the request or metadata
before they are sent to the CloudScheduler server.
"""
return request, metadata
def post_list_locations(
self, response: locations_pb2.ListLocationsResponse
) -> locations_pb2.ListLocationsResponse:
"""Post-rpc interceptor for list_locations
Override in a subclass to manipulate the response
after it is returned by the CloudScheduler server but before
it is returned to user code.
"""
return response
@dataclasses.dataclass
class CloudSchedulerRestStub:
_session: AuthorizedSession
_host: str
_interceptor: CloudSchedulerRestInterceptor
class CloudSchedulerRestTransport(CloudSchedulerTransport):
"""REST backend transport for CloudScheduler.
The Cloud Scheduler API allows external entities to reliably
schedule asynchronous jobs.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends JSON representations of protocol buffers over HTTP/1.1
"""
def __init__(
self,
*,
host: str = "cloudscheduler.googleapis.com",
credentials: Optional[ga_credentials.Credentials] = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
url_scheme: str = "https",
interceptor: Optional[CloudSchedulerRestInterceptor] = None,
api_audience: Optional[str] = None,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client
certificate to configure mutual TLS HTTP channel. It is ignored
if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you are developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
url_scheme: the protocol scheme for the API endpoint. Normally
"https", but for testing or local servers,
"http" can be specified.
"""
# Run the base constructor
# TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc.
# TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the
# credentials object
maybe_url_match = re.match("^(?P<scheme>http(?:s)?://)?(?P<host>.*)$", host)
if maybe_url_match is None:
raise ValueError(
f"Unexpected hostname structure: {host}"
) # pragma: NO COVER
url_match_items = maybe_url_match.groupdict()
host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host
super().__init__(
host=host,
credentials=credentials,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
api_audience=api_audience,
)
self._session = AuthorizedSession(
self._credentials, default_host=self.DEFAULT_HOST
)
if client_cert_source_for_mtls:
self._session.configure_mtls_channel(client_cert_source_for_mtls)
self._interceptor = interceptor or CloudSchedulerRestInterceptor()
self._prep_wrapped_messages(client_info)
class _CreateJob(CloudSchedulerRestStub):
def __hash__(self):
return hash("CreateJob")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: cloudscheduler.CreateJobRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gcs_job.Job:
r"""Call the create job method over HTTP.
Args:
request (~.cloudscheduler.CreateJobRequest):
The request object. Request message for
[CreateJob][google.cloud.scheduler.v1beta1.CloudScheduler.CreateJob].
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.gcs_job.Job:
Configuration for a job.
The maximum allowed size for a job is
1MB.
"""
http_options: List[Dict[str, str]] = [
{
"method": "post",
"uri": "/v1beta1/{parent=projects/*/locations/*}/jobs",
"body": "job",
},
]
request, metadata = self._interceptor.pre_create_job(request, metadata)
pb_request = cloudscheduler.CreateJobRequest.pb(request)
transcoded_request = path_template.transcode(http_options, pb_request)
# Jsonify the request body
body = json_format.MessageToJson(
transcoded_request["body"],
including_default_value_fields=False,
use_integers_for_enums=True,
)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
json_format.MessageToJson(
transcoded_request["query_params"],
including_default_value_fields=False,
use_integers_for_enums=True,
)
)
query_params.update(self._get_unset_required_fields(query_params))
query_params["$alt"] = "json;enum-encoding=int"
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params, strict=True),
data=body,
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = gcs_job.Job()
pb_resp = gcs_job.Job.pb(resp)
json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
resp = self._interceptor.post_create_job(resp)
return resp
class _DeleteJob(CloudSchedulerRestStub):
def __hash__(self):
return hash("DeleteJob")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: cloudscheduler.DeleteJobRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
):
r"""Call the delete job method over HTTP.
Args:
request (~.cloudscheduler.DeleteJobRequest):
The request object. Request message for deleting a job using
[DeleteJob][google.cloud.scheduler.v1beta1.CloudScheduler.DeleteJob].
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
http_options: List[Dict[str, str]] = [
{
"method": "delete",
"uri": "/v1beta1/{name=projects/*/locations/*/jobs/*}",
},
]
request, metadata = self._interceptor.pre_delete_job(request, metadata)
pb_request = cloudscheduler.DeleteJobRequest.pb(request)
transcoded_request = path_template.transcode(http_options, pb_request)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
json_format.MessageToJson(
transcoded_request["query_params"],
including_default_value_fields=False,
use_integers_for_enums=True,
)
)
query_params.update(self._get_unset_required_fields(query_params))
query_params["$alt"] = "json;enum-encoding=int"
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params, strict=True),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
class _GetJob(CloudSchedulerRestStub):
def __hash__(self):
return hash("GetJob")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: cloudscheduler.GetJobRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> job.Job:
r"""Call the get job method over HTTP.
Args:
request (~.cloudscheduler.GetJobRequest):
The request object. Request message for
[GetJob][google.cloud.scheduler.v1beta1.CloudScheduler.GetJob].
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.job.Job:
Configuration for a job.
The maximum allowed size for a job is
1MB.
"""
http_options: List[Dict[str, str]] = [
{
"method": "get",
"uri": "/v1beta1/{name=projects/*/locations/*/jobs/*}",
},
]
request, metadata = self._interceptor.pre_get_job(request, metadata)
pb_request = cloudscheduler.GetJobRequest.pb(request)
transcoded_request = path_template.transcode(http_options, pb_request)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
json_format.MessageToJson(
transcoded_request["query_params"],
including_default_value_fields=False,
use_integers_for_enums=True,
)
)
query_params.update(self._get_unset_required_fields(query_params))
query_params["$alt"] = "json;enum-encoding=int"
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params, strict=True),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = job.Job()
pb_resp = job.Job.pb(resp)
json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
resp = self._interceptor.post_get_job(resp)
return resp
class _ListJobs(CloudSchedulerRestStub):
def __hash__(self):
return hash("ListJobs")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: cloudscheduler.ListJobsRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> cloudscheduler.ListJobsResponse:
r"""Call the list jobs method over HTTP.
Args:
request (~.cloudscheduler.ListJobsRequest):
The request object. Request message for listing jobs using
[ListJobs][google.cloud.scheduler.v1beta1.CloudScheduler.ListJobs].
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.cloudscheduler.ListJobsResponse:
Response message for listing jobs using
[ListJobs][google.cloud.scheduler.v1beta1.CloudScheduler.ListJobs].
"""
http_options: List[Dict[str, str]] = [
{
"method": "get",
"uri": "/v1beta1/{parent=projects/*/locations/*}/jobs",
},
]
request, metadata = self._interceptor.pre_list_jobs(request, metadata)
pb_request = cloudscheduler.ListJobsRequest.pb(request)
transcoded_request = path_template.transcode(http_options, pb_request)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
json_format.MessageToJson(
transcoded_request["query_params"],
including_default_value_fields=False,
use_integers_for_enums=True,
)
)
query_params.update(self._get_unset_required_fields(query_params))
query_params["$alt"] = "json;enum-encoding=int"
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params, strict=True),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = cloudscheduler.ListJobsResponse()
pb_resp = cloudscheduler.ListJobsResponse.pb(resp)
json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
resp = self._interceptor.post_list_jobs(resp)
return resp
class _PauseJob(CloudSchedulerRestStub):
def __hash__(self):
return hash("PauseJob")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: cloudscheduler.PauseJobRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> job.Job:
r"""Call the pause job method over HTTP.
Args:
request (~.cloudscheduler.PauseJobRequest):
The request object. Request message for
[PauseJob][google.cloud.scheduler.v1beta1.CloudScheduler.PauseJob].
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.job.Job:
Configuration for a job.
The maximum allowed size for a job is
1MB.
"""
http_options: List[Dict[str, str]] = [
{
"method": "post",
"uri": "/v1beta1/{name=projects/*/locations/*/jobs/*}:pause",
"body": "*",
},
]
request, metadata = self._interceptor.pre_pause_job(request, metadata)
pb_request = cloudscheduler.PauseJobRequest.pb(request)
transcoded_request = path_template.transcode(http_options, pb_request)
# Jsonify the request body
body = json_format.MessageToJson(
transcoded_request["body"],
including_default_value_fields=False,
use_integers_for_enums=True,
)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
json_format.MessageToJson(
transcoded_request["query_params"],
including_default_value_fields=False,
use_integers_for_enums=True,
)
)
query_params.update(self._get_unset_required_fields(query_params))
query_params["$alt"] = "json;enum-encoding=int"
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params, strict=True),
data=body,
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = job.Job()
pb_resp = job.Job.pb(resp)
json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
resp = self._interceptor.post_pause_job(resp)
return resp
class _ResumeJob(CloudSchedulerRestStub):
def __hash__(self):
return hash("ResumeJob")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: cloudscheduler.ResumeJobRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> job.Job:
r"""Call the resume job method over HTTP.
Args:
request (~.cloudscheduler.ResumeJobRequest):
The request object. Request message for
[ResumeJob][google.cloud.scheduler.v1beta1.CloudScheduler.ResumeJob].
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.job.Job:
Configuration for a job.
The maximum allowed size for a job is
1MB.
"""
http_options: List[Dict[str, str]] = [
{
"method": "post",
"uri": "/v1beta1/{name=projects/*/locations/*/jobs/*}:resume",
"body": "*",
},
]
request, metadata = self._interceptor.pre_resume_job(request, metadata)
pb_request = cloudscheduler.ResumeJobRequest.pb(request)
transcoded_request = path_template.transcode(http_options, pb_request)
# Jsonify the request body
body = json_format.MessageToJson(
transcoded_request["body"],
including_default_value_fields=False,
use_integers_for_enums=True,
)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
json_format.MessageToJson(
transcoded_request["query_params"],
including_default_value_fields=False,
use_integers_for_enums=True,
)
)
query_params.update(self._get_unset_required_fields(query_params))
query_params["$alt"] = "json;enum-encoding=int"
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params, strict=True),
data=body,
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = job.Job()
pb_resp = job.Job.pb(resp)
json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
resp = self._interceptor.post_resume_job(resp)
return resp
class _RunJob(CloudSchedulerRestStub):
def __hash__(self):
return hash("RunJob")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: cloudscheduler.RunJobRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> job.Job:
r"""Call the run job method over HTTP.
Args:
request (~.cloudscheduler.RunJobRequest):
The request object. Request message for forcing a job to run now using
[RunJob][google.cloud.scheduler.v1beta1.CloudScheduler.RunJob].
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.job.Job:
Configuration for a job.
The maximum allowed size for a job is
1MB.
"""
http_options: List[Dict[str, str]] = [
{
"method": "post",
"uri": "/v1beta1/{name=projects/*/locations/*/jobs/*}:run",
"body": "*",
},
]
request, metadata = self._interceptor.pre_run_job(request, metadata)
pb_request = cloudscheduler.RunJobRequest.pb(request)
transcoded_request = path_template.transcode(http_options, pb_request)
# Jsonify the request body
body = json_format.MessageToJson(
transcoded_request["body"],
including_default_value_fields=False,
use_integers_for_enums=True,
)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
json_format.MessageToJson(
transcoded_request["query_params"],
including_default_value_fields=False,
use_integers_for_enums=True,
)
)
query_params.update(self._get_unset_required_fields(query_params))
query_params["$alt"] = "json;enum-encoding=int"
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params, strict=True),
data=body,
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = job.Job()
pb_resp = job.Job.pb(resp)
json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
resp = self._interceptor.post_run_job(resp)
return resp
class _UpdateJob(CloudSchedulerRestStub):
def __hash__(self):
return hash("UpdateJob")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: cloudscheduler.UpdateJobRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gcs_job.Job:
r"""Call the update job method over HTTP.
Args:
request (~.cloudscheduler.UpdateJobRequest):
The request object. Request message for
[UpdateJob][google.cloud.scheduler.v1beta1.CloudScheduler.UpdateJob].
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.gcs_job.Job:
Configuration for a job.
The maximum allowed size for a job is
1MB.
"""
http_options: List[Dict[str, str]] = [
{
"method": "patch",
"uri": "/v1beta1/{job.name=projects/*/locations/*/jobs/*}",
"body": "job",
},
]
request, metadata = self._interceptor.pre_update_job(request, metadata)
pb_request = cloudscheduler.UpdateJobRequest.pb(request)
transcoded_request = path_template.transcode(http_options, pb_request)
# Jsonify the request body
body = json_format.MessageToJson(
transcoded_request["body"],
including_default_value_fields=False,
use_integers_for_enums=True,
)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
json_format.MessageToJson(
transcoded_request["query_params"],
including_default_value_fields=False,
use_integers_for_enums=True,
)
)
query_params.update(self._get_unset_required_fields(query_params))
query_params["$alt"] = "json;enum-encoding=int"
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params, strict=True),
data=body,
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = gcs_job.Job()
pb_resp = gcs_job.Job.pb(resp)
json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
resp = self._interceptor.post_update_job(resp)
return resp
@property
def create_job(self) -> Callable[[cloudscheduler.CreateJobRequest], gcs_job.Job]:
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return self._CreateJob(self._session, self._host, self._interceptor) # type: ignore
@property
def delete_job(
self,
) -> Callable[[cloudscheduler.DeleteJobRequest], empty_pb2.Empty]:
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return self._DeleteJob(self._session, self._host, self._interceptor) # type: ignore
@property
def get_job(self) -> Callable[[cloudscheduler.GetJobRequest], job.Job]:
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return self._GetJob(self._session, self._host, self._interceptor) # type: ignore
@property
def list_jobs(
self,
) -> Callable[[cloudscheduler.ListJobsRequest], cloudscheduler.ListJobsResponse]:
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return self._ListJobs(self._session, self._host, self._interceptor) # type: ignore
@property
def pause_job(self) -> Callable[[cloudscheduler.PauseJobRequest], job.Job]:
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return self._PauseJob(self._session, self._host, self._interceptor) # type: ignore
@property
def resume_job(self) -> Callable[[cloudscheduler.ResumeJobRequest], job.Job]:
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return self._ResumeJob(self._session, self._host, self._interceptor) # type: ignore
@property
def run_job(self) -> Callable[[cloudscheduler.RunJobRequest], job.Job]:
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return self._RunJob(self._session, self._host, self._interceptor) # type: ignore
@property
def update_job(self) -> Callable[[cloudscheduler.UpdateJobRequest], gcs_job.Job]:
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return self._UpdateJob(self._session, self._host, self._interceptor) # type: ignore
@property
def get_location(self):
return self._GetLocation(self._session, self._host, self._interceptor) # type: ignore
class _GetLocation(CloudSchedulerRestStub):
def __call__(
self,
request: locations_pb2.GetLocationRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> locations_pb2.Location:
r"""Call the get location method over HTTP.
Args:
request (locations_pb2.GetLocationRequest):
The request object for GetLocation method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
locations_pb2.Location: Response from GetLocation method.
"""
http_options: List[Dict[str, str]] = [
{
"method": "get",
"uri": "/v1beta1/{name=projects/*/locations/*}",
},
]
request, metadata = self._interceptor.pre_get_location(request, metadata)
request_kwargs = json_format.MessageToDict(request)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(json.dumps(transcoded_request["query_params"]))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
resp = locations_pb2.Location()
resp = json_format.Parse(response.content.decode("utf-8"), resp)
resp = self._interceptor.post_get_location(resp)
return resp
@property
def list_locations(self):
return self._ListLocations(self._session, self._host, self._interceptor) # type: ignore
class _ListLocations(CloudSchedulerRestStub):
def __call__(
self,
request: locations_pb2.ListLocationsRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> locations_pb2.ListLocationsResponse:
r"""Call the list locations method over HTTP.
Args:
request (locations_pb2.ListLocationsRequest):
The request object for ListLocations method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
locations_pb2.ListLocationsResponse: Response from ListLocations method.
"""
http_options: List[Dict[str, str]] = [
{
"method": "get",
"uri": "/v1beta1/{name=projects/*}/locations",
},
]
request, metadata = self._interceptor.pre_list_locations(request, metadata)
request_kwargs = json_format.MessageToDict(request)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(json.dumps(transcoded_request["query_params"]))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
resp = locations_pb2.ListLocationsResponse()
resp = json_format.Parse(response.content.decode("utf-8"), resp)
resp = self._interceptor.post_list_locations(resp)
return resp
@property
def kind(self) -> str:
return "rest"
def close(self):
self._session.close()
__all__ = ("CloudSchedulerRestTransport",) | PypiClean |
/Rare-1.10.3.tar.gz/Rare-1.10.3/rare/shared/rare_core.py | import configparser
import os
import time
from argparse import Namespace
from itertools import chain
from logging import getLogger
from typing import Dict, Iterator, Callable, Optional, List, Union, Iterable, Tuple
from PyQt5.QtCore import QObject, pyqtSignal, QSettings, pyqtSlot, QThreadPool, QRunnable, QTimer
from legendary.lfs.eos import EOSOverlayApp
from legendary.models.game import Game, SaveGameFile
from requests import HTTPError
from rare.lgndr.core import LegendaryCore
from rare.models.base_game import RareSaveGame
from rare.models.game import RareGame, RareEosOverlay
from rare.models.signals import GlobalSignals
from .image_manager import ImageManager
from .workers import (
QueueWorker,
VerifyWorker,
MoveWorker,
FetchWorker,
OriginWineWorker,
)
from .workers.uninstall import uninstall_game
from .workers.worker import QueueWorkerInfo, QueueWorkerState
logger = getLogger("RareCore")
class RareCore(QObject):
progress = pyqtSignal(int, str)
completed = pyqtSignal()
# lk: these are unused but remain if case they are become relevant
# completed_saves = pyqtSignal()
# completed_origin = pyqtSignal()
# completed_entitlements = pyqtSignal()
# lk: special case class attribute, this has to be here
__instance: Optional['RareCore'] = None
def __init__(self, args: Namespace):
if self.__instance is not None:
raise RuntimeError("RareCore already initialized")
super(RareCore, self).__init__()
self.__args: Optional[Namespace] = None
self.__signals: Optional[GlobalSignals] = None
self.__core: Optional[LegendaryCore] = None
self.__image_manager: Optional[ImageManager] = None
self.__start_time = time.time()
self.args(args)
self.signals(init=True)
self.core(init=True)
self.image_manager(init=True)
self.settings = QSettings()
self.queue_workers: List[QueueWorker] = []
self.queue_threadpool = QThreadPool()
self.queue_threadpool.setMaxThreadCount(2)
self.__library: Dict[str, RareGame] = {}
self.__eos_overlay = RareEosOverlay(self.__core, EOSOverlayApp)
RareCore.__instance = self
def enqueue_worker(self, rgame: RareGame, worker: QueueWorker):
if isinstance(worker, VerifyWorker):
rgame.state = RareGame.State.VERIFYING
if isinstance(worker, MoveWorker):
rgame.state = RareGame.State.MOVING
rgame.set_worker(worker)
worker.feedback.started.connect(self.__signals.application.update_statusbar)
worker.feedback.finished.connect(lambda: rgame.set_worker(None))
worker.feedback.finished.connect(lambda: self.queue_workers.remove(worker))
worker.feedback.finished.connect(self.__signals.application.update_statusbar)
self.queue_workers.append(worker)
self.queue_threadpool.start(worker, priority=0)
self.__signals.application.update_statusbar.emit()
def dequeue_worker(self, worker: QueueWorker):
rgame = self.__library[worker.worker_info().app_name]
rgame.set_worker(None)
self.queue_workers.remove(worker)
self.__signals.application.update_statusbar.emit()
def active_workers(self) -> Iterable[QueueWorker]:
return list(filter(lambda w: w.state == QueueWorkerState.ACTIVE, self.queue_workers))
def queued_workers(self) -> Iterable[QueueWorker]:
return list(filter(lambda w: w.state == QueueWorkerState.QUEUED, self.queue_workers))
def queue_info(self) -> List[QueueWorkerInfo]:
return [w.worker_info() for w in self.queue_workers]
@staticmethod
def instance() -> 'RareCore':
if RareCore.__instance is None:
raise RuntimeError("Uninitialized use of RareCore")
return RareCore.__instance
def signals(self, init: bool = False) -> GlobalSignals:
if self.__signals is None and not init:
raise RuntimeError("Uninitialized use of GlobalSignalsSingleton")
if self.__signals is not None and init:
raise RuntimeError("GlobalSignals already initialized")
if init:
self.__signals = GlobalSignals()
return self.__signals
def args(self, args: Namespace = None) -> Optional[Namespace]:
if self.__args is None and args is None:
raise RuntimeError("Uninitialized use of ArgumentsSingleton")
if self.__args is not None and args is not None:
raise RuntimeError("Arguments already initialized")
if args is not None:
self.__args = args
return self.__args
def core(self, init: bool = False) -> LegendaryCore:
if self.__core is None and not init:
raise RuntimeError("Uninitialized use of LegendaryCoreSingleton")
if self.__core is not None and init:
raise RuntimeError("LegendaryCore already initialized")
if init:
try:
self.__core = LegendaryCore()
except configparser.MissingSectionHeaderError as e:
logger.warning(f"Config is corrupt: {e}")
if config_path := os.environ.get("XDG_CONFIG_HOME"):
path = os.path.join(config_path, "legendary")
else:
path = os.path.expanduser("~/.config/legendary")
with open(os.path.join(path, "config.ini"), "w") as config_file:
config_file.write("[Legendary]")
self.__core = LegendaryCore()
for section in ["Legendary", "default", "default.env"]:
if section not in self.__core.lgd.config.sections():
self.__core.lgd.config.add_section(section)
# workaround if egl sync enabled, but no programdata_path
# programdata_path might be unset if logging in through the browser
if self.__core.egl_sync_enabled:
if self.__core.egl.programdata_path is None:
self.__core.lgd.config.remove_option("Legendary", "egl_sync")
else:
if not os.path.exists(self.__core.egl.programdata_path):
self.__core.lgd.config.remove_option("Legendary", "egl_sync")
self.__core.lgd.save_config()
return self.__core
def image_manager(self, init: bool = False) -> ImageManager:
if self.__image_manager is None and not init:
raise RuntimeError("Uninitialized use of ImageManagerSingleton")
if self.__image_manager is not None and init:
raise RuntimeError("ImageManager already initialized")
if self.__image_manager is None:
self.__image_manager = ImageManager(self.signals(), self.core())
return self.__image_manager
def deleteLater(self) -> None:
self.__image_manager.deleteLater()
del self.__image_manager
self.__image_manager = None
self.__core.exit()
del self.__core
self.__core = None
self.__signals.deleteLater()
del self.__signals
self.__signals = None
del self.__args
self.__args = None
RareCore.__instance = None
super(RareCore, self).deleteLater()
def __validate_install(self, rgame: RareGame):
if not os.path.exists(rgame.igame.install_path):
# lk: since install_path is lost anyway, set keep_files to True
# lk: to avoid spamming the log with "file not found" errors
for dlc in rgame.owned_dlcs:
if dlc.is_installed:
logger.info(f'Uninstalling DLC "{dlc.app_name}" ({dlc.app_title})...')
uninstall_game(self.__core, dlc.app_name, keep_files=True, keep_config=True)
dlc.igame = None
logger.info(
f'Removing "{rgame.app_title}" because "{rgame.igame.install_path}" does not exist...'
)
uninstall_game(self.__core, rgame.app_name, keep_files=True, keep_config=True)
logger.info(f"Uninstalled {rgame.app_title}, because no game files exist")
rgame.igame = None
return
# lk: games that don't have an override and can't find their executable due to case sensitivity
# lk: will still erroneously require verification. This might need to be removed completely
# lk: or be decoupled from the verification requirement
if override_exe := self.__core.lgd.config.get(rgame.app_name, "override_exe", fallback=""):
igame_executable = override_exe
else:
igame_executable = rgame.igame.executable
# lk: Case-insensitive search for the game's executable (example: Brothers - A Tale of two Sons)
executable_path = os.path.join(rgame.igame.install_path, igame_executable.replace("\\", "/").lstrip("/"))
file_list = map(str.lower, os.listdir(os.path.dirname(executable_path)))
if not os.path.basename(executable_path).lower() in file_list:
rgame.igame.needs_verification = True
self.__core.lgd.set_installed_game(rgame.app_name, rgame.igame)
rgame.update_igame()
logger.info(f"{rgame.app_title} needs verification")
def get_game(self, app_name: str) -> Union[RareEosOverlay, RareGame]:
if app_name == EOSOverlayApp.app_name:
return self.__eos_overlay
return self.__library[app_name]
def __add_game(self, rgame: RareGame) -> None:
rgame.signals.download.enqueue.connect(self.__signals.download.enqueue)
rgame.signals.download.dequeue.connect(self.__signals.download.dequeue)
rgame.signals.game.install.connect(self.__signals.game.install)
rgame.signals.game.installed.connect(self.__signals.game.installed)
rgame.signals.game.uninstall.connect(self.__signals.game.uninstall)
rgame.signals.game.uninstalled.connect(self.__signals.game.uninstalled)
rgame.signals.game.finished.connect(self.__signals.application.update_tray)
rgame.signals.game.finished.connect(lambda: self.__signals.discord_rpc.set_title.emit(""))
self.__library[rgame.app_name] = rgame
def __filter_games(self, condition: Callable[[RareGame], bool]) -> Iterator[RareGame]:
return filter(condition, self.__library.values())
def __create_or_update_rgame(self, game: Game) -> RareGame:
if rgame := self.__library.get(game.app_name, False):
logger.warning(f"{rgame.app_name} already present in {type(self).__name__}")
logger.info(f"Updating Game for {rgame.app_name}")
rgame.update_rgame()
else:
rgame = RareGame(self.__core, self.__image_manager, game)
return rgame
def __add_games_and_dlcs(self, games: List[Game], dlcs_dict: Dict[str, List]) -> None:
length = len(games)
for idx, game in enumerate(games):
rgame = self.__create_or_update_rgame(game)
# lk: since loading has to know about game state,
# validate installation just adding each RareGame
# TODO: this should probably be moved into RareGame
if rgame.is_installed and not (rgame.is_dlc or rgame.is_non_asset):
self.__validate_install(rgame)
if game_dlcs := dlcs_dict.get(rgame.game.catalog_item_id, False):
for dlc in game_dlcs:
rdlc = self.__create_or_update_rgame(dlc)
# lk: plug dlc progress signals to the game's
rdlc.signals.progress.start.connect(rgame.signals.progress.start)
rdlc.signals.progress.update.connect(rgame.signals.progress.update)
rdlc.signals.progress.finish.connect(rgame.signals.progress.finish)
rgame.owned_dlcs.add(rdlc)
self.__add_game(rdlc)
self.__add_game(rgame)
self.progress.emit(int(idx/length * 80) + 20, self.tr("Loaded <b>{}</b>").format(rgame.app_title))
@pyqtSlot(object, int)
def __on_fetch_result(self, result: Tuple[List, Dict], res_type: int):
logger.info(f"Got API results for {FetchWorker.Result(res_type).name}")
self.progress.emit(15, self.tr("Preparing library"))
self.__add_games_and_dlcs(*result)
self.progress.emit(100, self.tr("Launching Rare"))
logger.debug(f"Fetch time {time.time() - self.__start_time} seconds")
QTimer.singleShot(100, self.__post_init)
self.completed.emit()
def fetch(self):
self.__start_time = time.time()
fetch_worker = FetchWorker(self.__core, self.__args)
fetch_worker.signals.progress.connect(self.progress)
fetch_worker.signals.result.connect(self.__on_fetch_result)
QThreadPool.globalInstance().start(fetch_worker)
def fetch_saves(self):
def __fetch() -> None:
start_time = time.time()
saves_dict: Dict[str, List[SaveGameFile]] = {}
try:
saves_list = self.__core.get_save_games()
for s in saves_list:
if s.app_name not in saves_dict.keys():
saves_dict[s.app_name] = [s]
else:
saves_dict[s.app_name].append(s)
for app_name, saves in saves_dict.items():
if app_name not in self.__library.keys():
continue
self.__library[app_name].load_saves(saves)
except (HTTPError, ConnectionError) as e:
logger.error(f"Exception while fetching saves from EGS: {e}")
return
logger.debug(f"Saves: {len(saves_dict)}")
logger.debug(f"Request saves: {time.time() - start_time} seconds")
saves_worker = QRunnable.create(__fetch)
QThreadPool.globalInstance().start(saves_worker)
def fetch_entitlements(self) -> None:
def __fetch() -> None:
start_time = time.time()
try:
entitlements = self.__core.egs.get_user_entitlements()
self.__core.lgd.entitlements = entitlements
for game in self.__library.values():
game.grant_date()
except (HTTPError, ConnectionError) as e:
logger.error(f"Failed to retrieve user entitlements from EGS: {e}")
return
logger.debug(f"Entitlements: {len(list(entitlements))}")
logger.debug(f"Request Entitlements: {time.time() - start_time} seconds")
entitlements_worker = QRunnable.create(__fetch)
QThreadPool.globalInstance().start(entitlements_worker)
def resolve_origin(self) -> None:
origin_worker = OriginWineWorker(self.__core, list(self.origin_games))
QThreadPool.globalInstance().start(origin_worker)
def __post_init(self) -> None:
if not self.__args.offline:
self.fetch_saves()
self.fetch_entitlements()
self.resolve_origin()
def load_pixmaps(self) -> None:
"""
Load pixmaps for all games
This exists here solely to fight signal and possibly threading issues.
The initial image loading at startup should not be done in the RareGame class
for two reasons. It will delay startup due to widget updates and the image
might become availabe before the UI is brought up. In case of the second, we
will get both a long queue of signals to be serviced and some of them might
be not connected yet so the widget won't be updated. So do the loading here
by calling this after the MainWindow has finished initializing.
@return: None
"""
def __load_pixmaps() -> None:
# time.sleep(0.1)
for rgame in self.__library.values():
# self.__image_manager.download_image(rgame.game, rgame.set_pixmap, 0, False)
rgame.load_pixmap()
# lk: activity perception delay
time.sleep(0.0005)
pixmap_worker = QRunnable.create(__load_pixmaps)
QThreadPool.globalInstance().start(pixmap_worker)
@property
def games_and_dlcs(self) -> Iterator[RareGame]:
for app_name in self.__library:
yield self.__library[app_name]
@property
def games(self) -> Iterator[RareGame]:
return self.__filter_games(lambda game: not game.is_dlc)
@property
def installed_games(self) -> Iterator[RareGame]:
return self.__filter_games(lambda game: game.is_installed and not game.is_dlc)
@property
def origin_games(self) -> Iterator[RareGame]:
return self.__filter_games(lambda game: game.is_origin and not game.is_dlc)
@property
def game_list(self) -> Iterator[Game]:
for game in self.games:
yield game.game
@property
def dlcs(self) -> Dict[str, Game]:
"""!
RareGames that ARE DLCs themselves
"""
return {game.game.catalog_item_id: game.owned_dlcs for game in self.has_dlcs}
# return self.__filter_games(lambda game: game.is_dlc)
@property
def has_dlcs(self) -> Iterator[RareGame]:
"""!
RareGames that HAVE DLCs associated with them
"""
return self.__filter_games(lambda game: bool(game.owned_dlcs))
@property
def bit32_games(self) -> Iterator[RareGame]:
return self.__filter_games(lambda game: game.is_win32)
@property
def mac_games(self) -> Iterator[RareGame]:
return self.__filter_games(lambda game: game.is_mac)
@property
def non_asset_games(self) -> Iterator[RareGame]:
return self.__filter_games(lambda game: game.is_non_asset)
@property
def unreal_engine(self) -> Iterator[RareGame]:
return self.__filter_games(lambda game: game.is_unreal)
@property
def updates(self) -> Iterator[RareGame]:
return self.__filter_games(lambda game: game.has_update)
@property
def saves(self) -> Iterator[RareSaveGame]:
"""!
SaveGameFiles across games
"""
return chain.from_iterable([game.saves for game in self.has_saves])
@property
def has_saves(self) -> Iterator[RareGame]:
"""!
RareGames that have SaveGameFiles associated with them
"""
return self.__filter_games(lambda game: bool(game.saves)) | PypiClean |
/ckan-2.10.1.tar.gz/ckan-2.10.1/ckanext/reclineview/theme/public/vendor/mustache/0.5.0-dev/mustache.min.js | var Mustache=(typeof module!=="undefined"&&module.exports)||{};(function(exports){exports.name="mustache.js";exports.version="0.5.0-dev";exports.tags=["{{","}}"];exports.parse=parse;exports.compile=compile;exports.render=render;exports.clearCache=clearCache;exports.to_html=function(template,view,partials,send){var result=render(template,view,partials);if(typeof send==="function"){send(result);}else{return result;}};var _toString=Object.prototype.toString;var _isArray=Array.isArray;var _forEach=Array.prototype.forEach;var _trim=String.prototype.trim;var isArray;if(_isArray){isArray=_isArray;}else{isArray=function(obj){return _toString.call(obj)==="[object Array]";};}
var forEach;if(_forEach){forEach=function(obj,callback,scope){return _forEach.call(obj,callback,scope);};}else{forEach=function(obj,callback,scope){for(var i=0,len=obj.length;i<len;++i){callback.call(scope,obj[i],i,obj);}};}
var spaceRe=/^\s*$/;function isWhitespace(string){return spaceRe.test(string);}
var trim;if(_trim){trim=function(string){return string==null?"":_trim.call(string);};}else{var trimLeft,trimRight;if(isWhitespace("\xA0")){trimLeft=/^\s+/;trimRight=/\s+$/;}else{trimLeft=/^[\s\xA0]+/;trimRight=/[\s\xA0]+$/;}
trim=function(string){return string==null?"":String(string).replace(trimLeft,"").replace(trimRight,"");};}
var escapeMap={"&":"&","<":"<",">":">",'"':'"',"'":'''};function escapeHTML(string){return String(string).replace(/&(?!\w+;)|[<>"']/g,function(s){return escapeMap[s]||s;});}
function debug(e,template,line,file){file=file||"<template>";var lines=template.split("\n"),start=Math.max(line-3,0),end=Math.min(lines.length,line+3),context=lines.slice(start,end);var c;for(var i=0,len=context.length;i<len;++i){c=i+start+1;context[i]=(c===line?" >> ":" ")+context[i];}
e.template=template;e.line=line;e.file=file;e.message=[file+":"+line,context.join("\n"),"",e.message].join("\n");return e;}
function lookup(name,stack,defaultValue){if(name==="."){return stack[stack.length-1];}
var names=name.split(".");var lastIndex=names.length-1;var target=names[lastIndex];var value,context,i=stack.length,j,localStack;while(i){localStack=stack.slice(0);context=stack[--i];j=0;while(j<lastIndex){context=context[names[j++]];if(context==null){break;}
localStack.push(context);}
if(context&&typeof context==="object"&&target in context){value=context[target];break;}}
if(typeof value==="function"){value=value.call(localStack[localStack.length-1]);}
if(value==null){return defaultValue;}
return value;}
function renderSection(name,stack,callback,inverted){var buffer="";var value=lookup(name,stack);if(inverted){if(value==null||value===false||(isArray(value)&&value.length===0)){buffer+=callback();}}else if(isArray(value)){forEach(value,function(value){stack.push(value);buffer+=callback();stack.pop();});}else if(typeof value==="object"){stack.push(value);buffer+=callback();stack.pop();}else if(typeof value==="function"){var scope=stack[stack.length-1];var scopedRender=function(template){return render(template,scope);};buffer+=value.call(scope,callback(),scopedRender)||"";}else if(value){buffer+=callback();}
return buffer;}
function parse(template,options){options=options||{};var tags=options.tags||exports.tags,openTag=tags[0],closeTag=tags[tags.length-1];var code=['var buffer = "";',"\nvar line = 1;","\ntry {",'\nbuffer += "'];var spaces=[],hasTag=false,nonSpace=false;var stripSpace=function(){if(hasTag&&!nonSpace&&!options.space){while(spaces.length){code.splice(spaces.pop(),1);}}else{spaces=[];}
hasTag=false;nonSpace=false;};var sectionStack=[],updateLine,nextOpenTag,nextCloseTag;var setTags=function(source){tags=trim(source).split(/\s+/);nextOpenTag=tags[0];nextCloseTag=tags[tags.length-1];};var includePartial=function(source){code.push('";',updateLine,'\nvar partial = partials["'+trim(source)+'"];','\nif (partial) {','\n buffer += render(partial,stack[stack.length - 1],partials);','\n}','\nbuffer += "');};var openSection=function(source,inverted){var name=trim(source);if(name===""){throw debug(new Error("Section name may not be empty"),template,line,options.file);}
sectionStack.push({name:name,inverted:inverted});code.push('";',updateLine,'\nvar name = "'+name+'";','\nvar callback = (function () {','\n return function () {','\n var buffer = "";','\nbuffer += "');};var openInvertedSection=function(source){openSection(source,true);};var closeSection=function(source){var name=trim(source);var openName=sectionStack.length!=0&§ionStack[sectionStack.length-1].name;if(!openName||name!=openName){throw debug(new Error('Section named "'+name+'" was never opened'),template,line,options.file);}
var section=sectionStack.pop();code.push('";','\n return buffer;','\n };','\n})();');if(section.inverted){code.push("\nbuffer += renderSection(name,stack,callback,true);");}else{code.push("\nbuffer += renderSection(name,stack,callback);");}
code.push('\nbuffer += "');};var sendPlain=function(source){code.push('";',updateLine,'\nbuffer += lookup("'+trim(source)+'",stack,"");','\nbuffer += "');};var sendEscaped=function(source){code.push('";',updateLine,'\nbuffer += escapeHTML(lookup("'+trim(source)+'",stack,""));','\nbuffer += "');};var line=1,c,callback;for(var i=0,len=template.length;i<len;++i){if(template.slice(i,i+openTag.length)===openTag){i+=openTag.length;c=template.substr(i,1);updateLine='\nline = '+line+';';nextOpenTag=openTag;nextCloseTag=closeTag;hasTag=true;switch(c){case"!":i++;callback=null;break;case"=":i++;closeTag="="+closeTag;callback=setTags;break;case">":i++;callback=includePartial;break;case"#":i++;callback=openSection;break;case"^":i++;callback=openInvertedSection;break;case"/":i++;callback=closeSection;break;case"{":closeTag="}"+closeTag;case"&":i++;nonSpace=true;callback=sendPlain;break;default:nonSpace=true;callback=sendEscaped;}
var end=template.indexOf(closeTag,i);if(end===-1){throw debug(new Error('Tag "'+openTag+'" was not closed properly'),template,line,options.file);}
var source=template.substring(i,end);if(callback){callback(source);}
var n=0;while(~(n=source.indexOf("\n",n))){line++;n++;}
i=end+closeTag.length-1;openTag=nextOpenTag;closeTag=nextCloseTag;}else{c=template.substr(i,1);switch(c){case'"':case"\\":nonSpace=true;code.push("\\"+c);break;case"\r":break;case"\n":spaces.push(code.length);code.push("\\n");stripSpace();line++;break;default:if(isWhitespace(c)){spaces.push(code.length);}else{nonSpace=true;}
code.push(c);}}}
if(sectionStack.length!=0){throw debug(new Error('Section "'+sectionStack[sectionStack.length-1].name+'" was not closed properly'),template,line,options.file);}
stripSpace();code.push('";',"\nreturn buffer;","\n} catch (e) { throw {error: e, line: line}; }");var body=code.join("").replace(/buffer \+= "";\n/g,"");if(options.debug){if(typeof console!="undefined"&&console.log){console.log(body);}else if(typeof print==="function"){print(body);}}
return body;}
function _compile(template,options){var args="view,partials,stack,lookup,escapeHTML,renderSection,render";var body=parse(template,options);var fn=new Function(args,body);return function(view,partials){partials=partials||{};var stack=[view];try{return fn(view,partials,stack,lookup,escapeHTML,renderSection,render);}catch(e){throw debug(e.error,template,e.line,options.file);}};}
var _cache={};function clearCache(){_cache={};}
function compile(template,options){options=options||{};if(options.cache!==false){if(!_cache[template]){_cache[template]=_compile(template,options);}
return _cache[template];}
return _compile(template,options);}
function render(template,view,partials){return compile(template)(view,partials);}})(Mustache); | PypiClean |
/rsl-0.2.1.tar.gz/rsl-0.2.1/README.txt |
RSL - Remote Service Library
============================
This module provides a collection of interfaces and a "plugin" mechanism to
access remote services with different protocols and technology in a unified
way.
The library has been developed as part of a "command line shell service
integration". It has been separated into its own package to allow a modular
installation and if may be useful for other projects too.
RSL is a pure client side library which allows easy access to web services. It
provides a full abstraction of service protocol and technology and tries to map
each remote service to Python methods. Hence, from a programmers point of view,
there should be almost no difference (except some minimal boilerplate) between
a local method call and a remote call. Even the fact, whether SOAP, JSON or
whatever protocol in use, should be completely hidden (but not inaccessible).
One of the main goals of the library is, to make it easily possible to add more
web service and transport protocols, without changing all the other parts. This
allows to extend the library bit by bit and makes each package much more
maintainable. It also allows to keep the installation foot print at a minimum
(just install only required modules).
Documentation
-------------
Documentation is currently in a very bad shape, but this will change soon, as
the API stabilises. Furthermore, a growing user base which provides feedback,
will shorten this time frame further ;).
As mentioned above the core of this modules are the interface definitions, and
the discovery mechanism for "protocol-plugins" using Setuptools and Zope
interfaces. In general the philosophy behind the given interfaces is, that
a web-service client can be split into various components.
**Transport**:
Every remote access needs some kind of transport. Transport protocols
are plug-able and can easily be replaced to allow easier integration
into other frameworks like Twisted.
**Description**:
As there are many different service description formats, which often
support multiple service protocols, it is just logical to implement
service descriptions as separate components.
**Proxy**:
These kind of components have the knowledge to convert the information
from a description and protocols standards into a usable Python object,
which acts as proxy to invoke remote services.
**De/Serialiser**:
As different protocols allow different encoding formats, de/serialisers,
are implemented as independent components, which allows to reuse them
wherever appropriate.
**Typesystem**:
Often XML based encoding standards, feature an own type system (mostly
expressed as XML-Schema). Such Typeystems, will also be used for JSON,
and probably other future encoding standards will follow. A Typesystem,
tries to map between Python data types and wire-representation, and
allows the actual user, to ignore the technical details.
All these components above are implemented as independent from each other as
possible, to allow easy replacement, high reusability, great extensibility
and flexibility, and of course easy installation.
The following modules are developed along with this modules:
* `rsl.wsdl <http://cheeseshop.python.org/pypi/rsl.wsdl>`_ : common WSDL module
* `rsl.wsdl1 <http://cheeseshop.python.org/pypi/rsl.wsdl1>`_ : WSDL 1
* `rsl.soap11 <http://cheeseshop.python.org/pypi/rsl.soap11>`_ : SOAP 1.1 + WSDL 1 SOAP 1.1 extension
* `rsl.mime <http://cheeseshop.python.org/pypi/rsl.mime>`_ : WSDL 1 MIME extension
* `rsl.http <http://cheeseshop.python.org/pypi/rsl.http>`_ : WSDL 1 HTTP extension
* `rsl.rest <http://cheeseshop.python.org/pypi/rsl.rest>`_ : REST like services
* `rsl.jsonrpc10 <http://cheeseshop.python.org/pypi/rsl.jsonrpc10>`_ : JSON-RPC 1.0 implementation
* `rsl.smd01 <http://cheeseshop.python.org/pypi/rsl.smd01>`_ : SMD 0.1 implementation
* `rsl.upnp <http://cheeseshop.python.org/pypi/rsl.upnp>`_ : UPnP description
* `rsl.xmlrpc <http://cheeseshop.python.org/pypi/rsl.xmlrpc>`_ : XML-RPC
* `rsl.xsd <http://cheeseshop.python.org/pypi/rsl.xsd>`_ : XML Schema type system
For examples on how to use the library, please see the additional modules,
and tests included there.
Status
------
The library works quite well for the command line service integration.
The installation is quite effortless with Python eggs and easyinstall.
However, as the whole project grows quite huge, and it is in an early
development stage, there are many things, which are not solved that elegant,
and it is very likely, that some refactoring will be happen in the near future.
However, most of the refactoring will make the code base more conformant to
PEP-08. The high likeliness in API changes is also the reason why this
release is currently classified *alpha*.
An important to-do is, to remove as much inconveniences for the library
as possible, which will also greatly improve the overall library design.
The type system component is a bit cumbersome, and currently there is only one
implementation (XML-Schema) available for it. I hope, that the situation here,
will improve, as at least a second type system implementation becomes available.
Comment
-------
As this library also supports SOAP, there may be the question: "Why another
SOAP library?". The simple answer is, that there is no working client side
SOAP library available. I tested all three (or four?) major SOAP libraries,
and not one of them worked with the web service I needed. Most of those
SOAP libraries are just too simple, for my use cases, and others have some
major interoperability problems. (So two reasons: extending an already
available library would be about the same effort, and fixing a full-featured
library, would be even more effort because of ugly code base.)
| PypiClean |
/alastria-identity-0.4.0.tar.gz/alastria-identity-0.4.0/alastria_identity/types/__init__.py | import time
from typing import List
from dataclasses import dataclass, field
from web3 import Web3
from .alastria_session import AlastriaSession
from .alastria_token import AlastriaToken
from .alastria_identity_creation import AlastriaIdentityCreation
from .transaction import Transaction
from .entity import Entity
from .credential import Credential
from .presentation_request import PresentationRequest, PresentationRequestData
DEFAULT_GAS_LIMIT = 600000
DEFAULT_NONCE = '0x0'
@dataclass
class PublicKeyStatus:
exists: bool
status: int
startDate: int
endDate: int
@dataclass
class JwtToken:
header: dict
payload: dict
@dataclass
class Transaction:
to: str = '0x0000000000000000000000000000000000000000'
data: str = '0x0'
gasPrice: int = 0
nonce: str = DEFAULT_NONCE
gas: int = DEFAULT_GAS_LIMIT
@dataclass
class IdentityConfig:
identity_manager: str
credential_registry: str
presentation_registry: str
publickey_registry: str
basic_transaction: Transaction
contracts_abi: dict
zeroValue: str = '00000000000000000000000000000000000000000000000000000000000000000000'
@dataclass
class UserIdentity:
endpoint: Web3
address: str
private_key: str
nonce: int
transactions: List
@dataclass
class NetworkDid:
network: str
network_id: str
proxy_address: str
@classmethod
def from_did(cls, did: str):
network_items = did.split(':')
return NetworkDid(
network=network_items[2],
network_id=network_items[3],
proxy_address=network_items[4]
)
@dataclass
class UnsignedCredential:
iss: str
context: List[str]
credential_subject: dict
type: List[str]
kid: str = ''
sub: str = ''
exp: int = 0
nbf: int = 0
jti: str = ''
jwk: str = ''
REQUIRED_CONTEXT: List[str] = field(default_factory=lambda: [
'https://www.w3.org/2018/credentials/v1',
'https://alastria.github.io/identity/credentials/v1'
])
REQUIRED_TYPES: List[str] = field(default_factory=lambda: [
'VerifiableCredential',
'AlastriaVerifiableCredential'
])
def get_jwt_payload(self):
return {
'header': {
'typ': 'JWT',
'alg': 'ES256K',
'kid': self.kid,
'jwk': self.jwk
},
'payload': {
'jti': self.jti,
'iss': self.iss,
'sub': self.sub,
'iat': int(time.time()),
'exp': self.exp,
'nbf': self.nbf,
'vc': {
'@context': self.context,
'type': self.type,
'credentialSubject': self.credential_subject
}
}
}
@dataclass
class UnsignedPresentation:
iss: str
aud: str
context: List[str]
verifiable_credential: List[str]
proc_url: str
proc_hash: str
type: str
kid: str = ''
jwk: str = ''
exp: str = ''
nbf: str = ''
jti: str = ''
REQUIRED_CONTEXT: List[str] = field(default_factory=lambda: [
'https://www.w3.org/2018/credentials/v1',
'https://alastria.github.io/identity/credentials/v1'
])
REQUIRED_TYPES: List[str] = field(default_factory=lambda: [
'VerifiableCredential',
'AlastriaVerifiablePresentationRequest'
])
def get_jwt_payload(self):
return {
'header': {
'alg': 'ES256K',
'typ': 'JWT',
'kid': self.kid,
'jwk': self.jwk
},
'payload': {
'jti': self.jti,
'iss': self.iss,
'aud': self.aud,
'iat': int(time.time()),
'exp': self.exp,
'nbf': self.nbf,
'vp': {
'@context': self.REQUIRED_CONTEXT + self.context,
'type': self.REQUIRED_TYPES + self.type,
'procHash': self.proc_hash,
'procUrl': self.proc_url,
'verifiableCredential': self.verifiable_credential
}
}
} | PypiClean |
/columbia-discord-bot-0.2.1.tar.gz/columbia-discord-bot-0.2.1/docs/_build/html/_static/pygments/lexers/_scilab_builtins.py |
commands_kw = (
'abort',
'apropos',
'break',
'case',
'catch',
'continue',
'do',
'else',
'elseif',
'end',
'endfunction',
'for',
'function',
'help',
'if',
'pause',
'quit',
'select',
'then',
'try',
'while',
)
functions_kw = (
'!!_invoke_',
'%H5Object_e',
'%H5Object_fieldnames',
'%H5Object_p',
'%XMLAttr_6',
'%XMLAttr_e',
'%XMLAttr_i_XMLElem',
'%XMLAttr_length',
'%XMLAttr_p',
'%XMLAttr_size',
'%XMLDoc_6',
'%XMLDoc_e',
'%XMLDoc_i_XMLList',
'%XMLDoc_p',
'%XMLElem_6',
'%XMLElem_e',
'%XMLElem_i_XMLDoc',
'%XMLElem_i_XMLElem',
'%XMLElem_i_XMLList',
'%XMLElem_p',
'%XMLList_6',
'%XMLList_e',
'%XMLList_i_XMLElem',
'%XMLList_i_XMLList',
'%XMLList_length',
'%XMLList_p',
'%XMLList_size',
'%XMLNs_6',
'%XMLNs_e',
'%XMLNs_i_XMLElem',
'%XMLNs_p',
'%XMLSet_6',
'%XMLSet_e',
'%XMLSet_length',
'%XMLSet_p',
'%XMLSet_size',
'%XMLValid_p',
'%_EClass_6',
'%_EClass_e',
'%_EClass_p',
'%_EObj_0',
'%_EObj_1__EObj',
'%_EObj_1_b',
'%_EObj_1_c',
'%_EObj_1_i',
'%_EObj_1_s',
'%_EObj_2__EObj',
'%_EObj_2_b',
'%_EObj_2_c',
'%_EObj_2_i',
'%_EObj_2_s',
'%_EObj_3__EObj',
'%_EObj_3_b',
'%_EObj_3_c',
'%_EObj_3_i',
'%_EObj_3_s',
'%_EObj_4__EObj',
'%_EObj_4_b',
'%_EObj_4_c',
'%_EObj_4_i',
'%_EObj_4_s',
'%_EObj_5',
'%_EObj_6',
'%_EObj_a__EObj',
'%_EObj_a_b',
'%_EObj_a_c',
'%_EObj_a_i',
'%_EObj_a_s',
'%_EObj_d__EObj',
'%_EObj_d_b',
'%_EObj_d_c',
'%_EObj_d_i',
'%_EObj_d_s',
'%_EObj_disp',
'%_EObj_e',
'%_EObj_g__EObj',
'%_EObj_g_b',
'%_EObj_g_c',
'%_EObj_g_i',
'%_EObj_g_s',
'%_EObj_h__EObj',
'%_EObj_h_b',
'%_EObj_h_c',
'%_EObj_h_i',
'%_EObj_h_s',
'%_EObj_i__EObj',
'%_EObj_j__EObj',
'%_EObj_j_b',
'%_EObj_j_c',
'%_EObj_j_i',
'%_EObj_j_s',
'%_EObj_k__EObj',
'%_EObj_k_b',
'%_EObj_k_c',
'%_EObj_k_i',
'%_EObj_k_s',
'%_EObj_l__EObj',
'%_EObj_l_b',
'%_EObj_l_c',
'%_EObj_l_i',
'%_EObj_l_s',
'%_EObj_m__EObj',
'%_EObj_m_b',
'%_EObj_m_c',
'%_EObj_m_i',
'%_EObj_m_s',
'%_EObj_n__EObj',
'%_EObj_n_b',
'%_EObj_n_c',
'%_EObj_n_i',
'%_EObj_n_s',
'%_EObj_o__EObj',
'%_EObj_o_b',
'%_EObj_o_c',
'%_EObj_o_i',
'%_EObj_o_s',
'%_EObj_p',
'%_EObj_p__EObj',
'%_EObj_p_b',
'%_EObj_p_c',
'%_EObj_p_i',
'%_EObj_p_s',
'%_EObj_q__EObj',
'%_EObj_q_b',
'%_EObj_q_c',
'%_EObj_q_i',
'%_EObj_q_s',
'%_EObj_r__EObj',
'%_EObj_r_b',
'%_EObj_r_c',
'%_EObj_r_i',
'%_EObj_r_s',
'%_EObj_s__EObj',
'%_EObj_s_b',
'%_EObj_s_c',
'%_EObj_s_i',
'%_EObj_s_s',
'%_EObj_t',
'%_EObj_x__EObj',
'%_EObj_x_b',
'%_EObj_x_c',
'%_EObj_x_i',
'%_EObj_x_s',
'%_EObj_y__EObj',
'%_EObj_y_b',
'%_EObj_y_c',
'%_EObj_y_i',
'%_EObj_y_s',
'%_EObj_z__EObj',
'%_EObj_z_b',
'%_EObj_z_c',
'%_EObj_z_i',
'%_EObj_z_s',
'%_eigs',
'%_load',
'%b_1__EObj',
'%b_2__EObj',
'%b_3__EObj',
'%b_4__EObj',
'%b_a__EObj',
'%b_d__EObj',
'%b_g__EObj',
'%b_h__EObj',
'%b_i_XMLList',
'%b_i__EObj',
'%b_j__EObj',
'%b_k__EObj',
'%b_l__EObj',
'%b_m__EObj',
'%b_n__EObj',
'%b_o__EObj',
'%b_p__EObj',
'%b_q__EObj',
'%b_r__EObj',
'%b_s__EObj',
'%b_x__EObj',
'%b_y__EObj',
'%b_z__EObj',
'%c_1__EObj',
'%c_2__EObj',
'%c_3__EObj',
'%c_4__EObj',
'%c_a__EObj',
'%c_d__EObj',
'%c_g__EObj',
'%c_h__EObj',
'%c_i_XMLAttr',
'%c_i_XMLDoc',
'%c_i_XMLElem',
'%c_i_XMLList',
'%c_i__EObj',
'%c_j__EObj',
'%c_k__EObj',
'%c_l__EObj',
'%c_m__EObj',
'%c_n__EObj',
'%c_o__EObj',
'%c_p__EObj',
'%c_q__EObj',
'%c_r__EObj',
'%c_s__EObj',
'%c_x__EObj',
'%c_y__EObj',
'%c_z__EObj',
'%ce_i_XMLList',
'%fptr_i_XMLList',
'%h_i_XMLList',
'%hm_i_XMLList',
'%i_1__EObj',
'%i_2__EObj',
'%i_3__EObj',
'%i_4__EObj',
'%i_a__EObj',
'%i_abs',
'%i_cumprod',
'%i_cumsum',
'%i_d__EObj',
'%i_diag',
'%i_g__EObj',
'%i_h__EObj',
'%i_i_XMLList',
'%i_i__EObj',
'%i_j__EObj',
'%i_k__EObj',
'%i_l__EObj',
'%i_m__EObj',
'%i_matrix',
'%i_max',
'%i_maxi',
'%i_min',
'%i_mini',
'%i_mput',
'%i_n__EObj',
'%i_o__EObj',
'%i_p',
'%i_p__EObj',
'%i_prod',
'%i_q__EObj',
'%i_r__EObj',
'%i_s__EObj',
'%i_sum',
'%i_tril',
'%i_triu',
'%i_x__EObj',
'%i_y__EObj',
'%i_z__EObj',
'%ip_i_XMLList',
'%l_i_XMLList',
'%l_i__EObj',
'%lss_i_XMLList',
'%mc_i_XMLList',
'%msp_full',
'%msp_i_XMLList',
'%msp_spget',
'%p_i_XMLList',
'%ptr_i_XMLList',
'%r_i_XMLList',
'%s_1__EObj',
'%s_2__EObj',
'%s_3__EObj',
'%s_4__EObj',
'%s_a__EObj',
'%s_d__EObj',
'%s_g__EObj',
'%s_h__EObj',
'%s_i_XMLList',
'%s_i__EObj',
'%s_j__EObj',
'%s_k__EObj',
'%s_l__EObj',
'%s_m__EObj',
'%s_n__EObj',
'%s_o__EObj',
'%s_p__EObj',
'%s_q__EObj',
'%s_r__EObj',
'%s_s__EObj',
'%s_x__EObj',
'%s_y__EObj',
'%s_z__EObj',
'%sp_i_XMLList',
'%spb_i_XMLList',
'%st_i_XMLList',
'Calendar',
'ClipBoard',
'Matplot',
'Matplot1',
'PlaySound',
'TCL_DeleteInterp',
'TCL_DoOneEvent',
'TCL_EvalFile',
'TCL_EvalStr',
'TCL_ExistArray',
'TCL_ExistInterp',
'TCL_ExistVar',
'TCL_GetVar',
'TCL_GetVersion',
'TCL_SetVar',
'TCL_UnsetVar',
'TCL_UpVar',
'_',
'_code2str',
'_d',
'_str2code',
'about',
'abs',
'acos',
'addModulePreferences',
'addcolor',
'addf',
'addhistory',
'addinter',
'addlocalizationdomain',
'amell',
'and',
'argn',
'arl2_ius',
'ascii',
'asin',
'atan',
'backslash',
'balanc',
'banner',
'base2dec',
'basename',
'bdiag',
'beep',
'besselh',
'besseli',
'besselj',
'besselk',
'bessely',
'beta',
'bezout',
'bfinit',
'blkfc1i',
'blkslvi',
'bool2s',
'browsehistory',
'browsevar',
'bsplin3val',
'buildDoc',
'buildouttb',
'bvode',
'c_link',
'call',
'callblk',
'captions',
'cd',
'cdfbet',
'cdfbin',
'cdfchi',
'cdfchn',
'cdff',
'cdffnc',
'cdfgam',
'cdfnbn',
'cdfnor',
'cdfpoi',
'cdft',
'ceil',
'champ',
'champ1',
'chdir',
'chol',
'clc',
'clean',
'clear',
'clearfun',
'clearglobal',
'closeEditor',
'closeEditvar',
'closeXcos',
'code2str',
'coeff',
'color',
'comp',
'completion',
'conj',
'contour2di',
'contr',
'conv2',
'convstr',
'copy',
'copyfile',
'corr',
'cos',
'coserror',
'createdir',
'cshep2d',
'csvDefault',
'csvIsnum',
'csvRead',
'csvStringToDouble',
'csvTextScan',
'csvWrite',
'ctree2',
'ctree3',
'ctree4',
'cumprod',
'cumsum',
'curblock',
'curblockc',
'daskr',
'dasrt',
'dassl',
'data2sig',
'datatipCreate',
'datatipManagerMode',
'datatipMove',
'datatipRemove',
'datatipSetDisplay',
'datatipSetInterp',
'datatipSetOrientation',
'datatipSetStyle',
'datatipToggle',
'dawson',
'dct',
'debug',
'dec2base',
'deff',
'definedfields',
'degree',
'delbpt',
'delete',
'deletefile',
'delip',
'delmenu',
'det',
'dgettext',
'dhinf',
'diag',
'diary',
'diffobjs',
'disp',
'dispbpt',
'displayhistory',
'disposefftwlibrary',
'dlgamma',
'dnaupd',
'dneupd',
'double',
'drawaxis',
'drawlater',
'drawnow',
'driver',
'dsaupd',
'dsearch',
'dseupd',
'dst',
'duplicate',
'editvar',
'emptystr',
'end_scicosim',
'ereduc',
'erf',
'erfc',
'erfcx',
'erfi',
'errcatch',
'errclear',
'error',
'eval_cshep2d',
'exec',
'execstr',
'exists',
'exit',
'exp',
'expm',
'exportUI',
'export_to_hdf5',
'eye',
'fadj2sp',
'fec',
'feval',
'fft',
'fftw',
'fftw_flags',
'fftw_forget_wisdom',
'fftwlibraryisloaded',
'figure',
'file',
'filebrowser',
'fileext',
'fileinfo',
'fileparts',
'filesep',
'find',
'findBD',
'findfiles',
'fire_closing_finished',
'floor',
'format',
'fort',
'fprintfMat',
'freq',
'frexp',
'fromc',
'fromjava',
'fscanfMat',
'fsolve',
'fstair',
'full',
'fullpath',
'funcprot',
'funptr',
'gamma',
'gammaln',
'geom3d',
'get',
'getURL',
'get_absolute_file_path',
'get_fftw_wisdom',
'getblocklabel',
'getcallbackobject',
'getdate',
'getdebuginfo',
'getdefaultlanguage',
'getdrives',
'getdynlibext',
'getenv',
'getfield',
'gethistory',
'gethistoryfile',
'getinstalledlookandfeels',
'getio',
'getlanguage',
'getlongpathname',
'getlookandfeel',
'getmd5',
'getmemory',
'getmodules',
'getos',
'getpid',
'getrelativefilename',
'getscicosvars',
'getscilabmode',
'getshortpathname',
'gettext',
'getvariablesonstack',
'getversion',
'glist',
'global',
'glue',
'grand',
'graphicfunction',
'grayplot',
'grep',
'gsort',
'gstacksize',
'h5attr',
'h5close',
'h5cp',
'h5dataset',
'h5dump',
'h5exists',
'h5flush',
'h5get',
'h5group',
'h5isArray',
'h5isAttr',
'h5isCompound',
'h5isFile',
'h5isGroup',
'h5isList',
'h5isRef',
'h5isSet',
'h5isSpace',
'h5isType',
'h5isVlen',
'h5label',
'h5ln',
'h5ls',
'h5mount',
'h5mv',
'h5open',
'h5read',
'h5readattr',
'h5rm',
'h5umount',
'h5write',
'h5writeattr',
'havewindow',
'helpbrowser',
'hess',
'hinf',
'historymanager',
'historysize',
'host',
'htmlDump',
'htmlRead',
'htmlReadStr',
'htmlWrite',
'iconvert',
'ieee',
'ilib_verbose',
'imag',
'impl',
'import_from_hdf5',
'imult',
'inpnvi',
'int',
'int16',
'int2d',
'int32',
'int3d',
'int8',
'interp',
'interp2d',
'interp3d',
'intg',
'intppty',
'inttype',
'inv',
'invoke_lu',
'is_handle_valid',
'is_hdf5_file',
'isalphanum',
'isascii',
'isdef',
'isdigit',
'isdir',
'isequal',
'isequalbitwise',
'iserror',
'isfile',
'isglobal',
'isletter',
'isnum',
'isreal',
'iswaitingforinput',
'jallowClassReloading',
'jarray',
'jautoTranspose',
'jautoUnwrap',
'javaclasspath',
'javalibrarypath',
'jcast',
'jcompile',
'jconvMatrixMethod',
'jcreatejar',
'jdeff',
'jdisableTrace',
'jenableTrace',
'jexists',
'jgetclassname',
'jgetfield',
'jgetfields',
'jgetinfo',
'jgetmethods',
'jimport',
'jinvoke',
'jinvoke_db',
'jnewInstance',
'jremove',
'jsetfield',
'junwrap',
'junwraprem',
'jwrap',
'jwrapinfloat',
'kron',
'lasterror',
'ldiv',
'ldivf',
'legendre',
'length',
'lib',
'librarieslist',
'libraryinfo',
'light',
'linear_interpn',
'lines',
'link',
'linmeq',
'list',
'listvar_in_hdf5',
'load',
'loadGui',
'loadScicos',
'loadXcos',
'loadfftwlibrary',
'loadhistory',
'log',
'log1p',
'lsq',
'lsq_splin',
'lsqrsolve',
'lsslist',
'lstcat',
'lstsize',
'ltitr',
'lu',
'ludel',
'lufact',
'luget',
'lusolve',
'macr2lst',
'macr2tree',
'matfile_close',
'matfile_listvar',
'matfile_open',
'matfile_varreadnext',
'matfile_varwrite',
'matrix',
'max',
'maxfiles',
'mclearerr',
'mclose',
'meof',
'merror',
'messagebox',
'mfprintf',
'mfscanf',
'mget',
'mgeti',
'mgetl',
'mgetstr',
'min',
'mlist',
'mode',
'model2blk',
'mopen',
'move',
'movefile',
'mprintf',
'mput',
'mputl',
'mputstr',
'mscanf',
'mseek',
'msprintf',
'msscanf',
'mtell',
'mtlb_mode',
'mtlb_sparse',
'mucomp',
'mulf',
'name2rgb',
'nearfloat',
'newaxes',
'newest',
'newfun',
'nnz',
'norm',
'notify',
'number_properties',
'ode',
'odedc',
'ones',
'openged',
'opentk',
'optim',
'or',
'ordmmd',
'parallel_concurrency',
'parallel_run',
'param3d',
'param3d1',
'part',
'pathconvert',
'pathsep',
'phase_simulation',
'plot2d',
'plot2d1',
'plot2d2',
'plot2d3',
'plot2d4',
'plot3d',
'plot3d1',
'plotbrowser',
'pointer_xproperty',
'poly',
'ppol',
'pppdiv',
'predef',
'preferences',
'print',
'printf',
'printfigure',
'printsetupbox',
'prod',
'progressionbar',
'prompt',
'pwd',
'qld',
'qp_solve',
'qr',
'raise_window',
'rand',
'rankqr',
'rat',
'rcond',
'rdivf',
'read',
'read4b',
'read_csv',
'readb',
'readgateway',
'readmps',
'real',
'realtime',
'realtimeinit',
'regexp',
'relocate_handle',
'remez',
'removeModulePreferences',
'removedir',
'removelinehistory',
'res_with_prec',
'resethistory',
'residu',
'resume',
'return',
'ricc',
'rlist',
'roots',
'rotate_axes',
'round',
'rpem',
'rtitr',
'rubberbox',
'save',
'saveGui',
'saveafterncommands',
'saveconsecutivecommands',
'savehistory',
'schur',
'sci_haltscicos',
'sci_tree2',
'sci_tree3',
'sci_tree4',
'sciargs',
'scicos_debug',
'scicos_debug_count',
'scicos_time',
'scicosim',
'scinotes',
'sctree',
'semidef',
'set',
'set_blockerror',
'set_fftw_wisdom',
'set_xproperty',
'setbpt',
'setdefaultlanguage',
'setenv',
'setfield',
'sethistoryfile',
'setlanguage',
'setlookandfeel',
'setmenu',
'sfact',
'sfinit',
'show_window',
'sident',
'sig2data',
'sign',
'simp',
'simp_mode',
'sin',
'size',
'slash',
'sleep',
'sorder',
'sparse',
'spchol',
'spcompack',
'spec',
'spget',
'splin',
'splin2d',
'splin3d',
'splitURL',
'spones',
'sprintf',
'sqrt',
'stacksize',
'str2code',
'strcat',
'strchr',
'strcmp',
'strcspn',
'strindex',
'string',
'stringbox',
'stripblanks',
'strncpy',
'strrchr',
'strrev',
'strsplit',
'strspn',
'strstr',
'strsubst',
'strtod',
'strtok',
'subf',
'sum',
'svd',
'swap_handles',
'symfcti',
'syredi',
'system_getproperty',
'system_setproperty',
'ta2lpd',
'tan',
'taucs_chdel',
'taucs_chfact',
'taucs_chget',
'taucs_chinfo',
'taucs_chsolve',
'tempname',
'testmatrix',
'timer',
'tlist',
'tohome',
'tokens',
'toolbar',
'toprint',
'tr_zer',
'tril',
'triu',
'type',
'typename',
'uiDisplayTree',
'uicontextmenu',
'uicontrol',
'uigetcolor',
'uigetdir',
'uigetfile',
'uigetfont',
'uimenu',
'uint16',
'uint32',
'uint8',
'uipopup',
'uiputfile',
'uiwait',
'ulink',
'umf_ludel',
'umf_lufact',
'umf_luget',
'umf_luinfo',
'umf_lusolve',
'umfpack',
'unglue',
'unix',
'unsetmenu',
'unzoom',
'updatebrowsevar',
'usecanvas',
'useeditor',
'user',
'var2vec',
'varn',
'vec2var',
'waitbar',
'warnBlockByUID',
'warning',
'what',
'where',
'whereis',
'who',
'winsid',
'with_module',
'writb',
'write',
'write4b',
'write_csv',
'x_choose',
'x_choose_modeless',
'x_dialog',
'x_mdialog',
'xarc',
'xarcs',
'xarrows',
'xchange',
'xchoicesi',
'xclick',
'xcos',
'xcosAddToolsMenu',
'xcosConfigureXmlFile',
'xcosDiagramToScilab',
'xcosPalCategoryAdd',
'xcosPalDelete',
'xcosPalDisable',
'xcosPalEnable',
'xcosPalGenerateIcon',
'xcosPalGet',
'xcosPalLoad',
'xcosPalMove',
'xcosSimulationStarted',
'xcosUpdateBlock',
'xdel',
'xend',
'xfarc',
'xfarcs',
'xfpoly',
'xfpolys',
'xfrect',
'xget',
'xgetmouse',
'xgraduate',
'xgrid',
'xinit',
'xlfont',
'xls_open',
'xls_read',
'xmlAddNs',
'xmlAppend',
'xmlAsNumber',
'xmlAsText',
'xmlDTD',
'xmlDelete',
'xmlDocument',
'xmlDump',
'xmlElement',
'xmlFormat',
'xmlGetNsByHref',
'xmlGetNsByPrefix',
'xmlGetOpenDocs',
'xmlIsValidObject',
'xmlName',
'xmlNs',
'xmlRead',
'xmlReadStr',
'xmlRelaxNG',
'xmlRemove',
'xmlSchema',
'xmlSetAttributes',
'xmlValidate',
'xmlWrite',
'xmlXPath',
'xname',
'xpause',
'xpoly',
'xpolys',
'xrect',
'xrects',
'xs2bmp',
'xs2emf',
'xs2eps',
'xs2gif',
'xs2jpg',
'xs2pdf',
'xs2png',
'xs2ppm',
'xs2ps',
'xs2svg',
'xsegs',
'xset',
'xstring',
'xstringb',
'xtitle',
'zeros',
'znaupd',
'zneupd',
'zoom_rect',
)
macros_kw = (
'!_deff_wrapper',
'%0_i_st',
'%3d_i_h',
'%Block_xcosUpdateBlock',
'%TNELDER_p',
'%TNELDER_string',
'%TNMPLOT_p',
'%TNMPLOT_string',
'%TOPTIM_p',
'%TOPTIM_string',
'%TSIMPLEX_p',
'%TSIMPLEX_string',
'%_EVoid_p',
'%_gsort',
'%_listvarinfile',
'%_rlist',
'%_save',
'%_sodload',
'%_strsplit',
'%_unwrap',
'%ar_p',
'%asn',
'%b_a_b',
'%b_a_s',
'%b_c_s',
'%b_c_spb',
'%b_cumprod',
'%b_cumsum',
'%b_d_s',
'%b_diag',
'%b_e',
'%b_f_s',
'%b_f_spb',
'%b_g_s',
'%b_g_spb',
'%b_grand',
'%b_h_s',
'%b_h_spb',
'%b_i_b',
'%b_i_ce',
'%b_i_h',
'%b_i_hm',
'%b_i_s',
'%b_i_sp',
'%b_i_spb',
'%b_i_st',
'%b_iconvert',
'%b_l_b',
'%b_l_s',
'%b_m_b',
'%b_m_s',
'%b_matrix',
'%b_n_hm',
'%b_o_hm',
'%b_p_s',
'%b_prod',
'%b_r_b',
'%b_r_s',
'%b_s_b',
'%b_s_s',
'%b_string',
'%b_sum',
'%b_tril',
'%b_triu',
'%b_x_b',
'%b_x_s',
'%bicg',
'%bicgstab',
'%c_a_c',
'%c_b_c',
'%c_b_s',
'%c_diag',
'%c_dsearch',
'%c_e',
'%c_eye',
'%c_f_s',
'%c_grand',
'%c_i_c',
'%c_i_ce',
'%c_i_h',
'%c_i_hm',
'%c_i_lss',
'%c_i_r',
'%c_i_s',
'%c_i_st',
'%c_matrix',
'%c_n_l',
'%c_n_st',
'%c_o_l',
'%c_o_st',
'%c_ones',
'%c_rand',
'%c_tril',
'%c_triu',
'%cblock_c_cblock',
'%cblock_c_s',
'%cblock_e',
'%cblock_f_cblock',
'%cblock_p',
'%cblock_size',
'%ce_6',
'%ce_c_ce',
'%ce_e',
'%ce_f_ce',
'%ce_i_ce',
'%ce_i_s',
'%ce_i_st',
'%ce_matrix',
'%ce_p',
'%ce_size',
'%ce_string',
'%ce_t',
'%cgs',
'%champdat_i_h',
'%choose',
'%diagram_xcos',
'%dir_p',
'%fptr_i_st',
'%grand_perm',
'%grayplot_i_h',
'%h_i_st',
'%hmS_k_hmS_generic',
'%hm_1_hm',
'%hm_1_s',
'%hm_2_hm',
'%hm_2_s',
'%hm_3_hm',
'%hm_3_s',
'%hm_4_hm',
'%hm_4_s',
'%hm_5',
'%hm_a_hm',
'%hm_a_r',
'%hm_a_s',
'%hm_abs',
'%hm_and',
'%hm_bool2s',
'%hm_c_hm',
'%hm_ceil',
'%hm_conj',
'%hm_cos',
'%hm_cumprod',
'%hm_cumsum',
'%hm_d_hm',
'%hm_d_s',
'%hm_degree',
'%hm_dsearch',
'%hm_e',
'%hm_exp',
'%hm_eye',
'%hm_f_hm',
'%hm_find',
'%hm_floor',
'%hm_g_hm',
'%hm_grand',
'%hm_gsort',
'%hm_h_hm',
'%hm_i_b',
'%hm_i_ce',
'%hm_i_h',
'%hm_i_hm',
'%hm_i_i',
'%hm_i_p',
'%hm_i_r',
'%hm_i_s',
'%hm_i_st',
'%hm_iconvert',
'%hm_imag',
'%hm_int',
'%hm_isnan',
'%hm_isreal',
'%hm_j_hm',
'%hm_j_s',
'%hm_k_hm',
'%hm_k_s',
'%hm_log',
'%hm_m_p',
'%hm_m_r',
'%hm_m_s',
'%hm_matrix',
'%hm_max',
'%hm_mean',
'%hm_median',
'%hm_min',
'%hm_n_b',
'%hm_n_c',
'%hm_n_hm',
'%hm_n_i',
'%hm_n_p',
'%hm_n_s',
'%hm_o_b',
'%hm_o_c',
'%hm_o_hm',
'%hm_o_i',
'%hm_o_p',
'%hm_o_s',
'%hm_ones',
'%hm_or',
'%hm_p',
'%hm_prod',
'%hm_q_hm',
'%hm_r_s',
'%hm_rand',
'%hm_real',
'%hm_round',
'%hm_s',
'%hm_s_hm',
'%hm_s_r',
'%hm_s_s',
'%hm_sign',
'%hm_sin',
'%hm_size',
'%hm_sqrt',
'%hm_stdev',
'%hm_string',
'%hm_sum',
'%hm_x_hm',
'%hm_x_p',
'%hm_x_s',
'%hm_zeros',
'%i_1_s',
'%i_2_s',
'%i_3_s',
'%i_4_s',
'%i_Matplot',
'%i_a_i',
'%i_a_s',
'%i_and',
'%i_ascii',
'%i_b_s',
'%i_bezout',
'%i_champ',
'%i_champ1',
'%i_contour',
'%i_contour2d',
'%i_d_i',
'%i_d_s',
'%i_dsearch',
'%i_e',
'%i_fft',
'%i_g_i',
'%i_gcd',
'%i_grand',
'%i_h_i',
'%i_i_ce',
'%i_i_h',
'%i_i_hm',
'%i_i_i',
'%i_i_s',
'%i_i_st',
'%i_j_i',
'%i_j_s',
'%i_l_s',
'%i_lcm',
'%i_length',
'%i_m_i',
'%i_m_s',
'%i_mfprintf',
'%i_mprintf',
'%i_msprintf',
'%i_n_s',
'%i_o_s',
'%i_or',
'%i_p_i',
'%i_p_s',
'%i_plot2d',
'%i_plot2d1',
'%i_plot2d2',
'%i_q_s',
'%i_r_i',
'%i_r_s',
'%i_round',
'%i_s_i',
'%i_s_s',
'%i_sign',
'%i_string',
'%i_x_i',
'%i_x_s',
'%ip_a_s',
'%ip_i_st',
'%ip_m_s',
'%ip_n_ip',
'%ip_o_ip',
'%ip_p',
'%ip_part',
'%ip_s_s',
'%ip_string',
'%k',
'%l_i_h',
'%l_i_s',
'%l_i_st',
'%l_isequal',
'%l_n_c',
'%l_n_l',
'%l_n_m',
'%l_n_p',
'%l_n_s',
'%l_n_st',
'%l_o_c',
'%l_o_l',
'%l_o_m',
'%l_o_p',
'%l_o_s',
'%l_o_st',
'%lss_a_lss',
'%lss_a_p',
'%lss_a_r',
'%lss_a_s',
'%lss_c_lss',
'%lss_c_p',
'%lss_c_r',
'%lss_c_s',
'%lss_e',
'%lss_eye',
'%lss_f_lss',
'%lss_f_p',
'%lss_f_r',
'%lss_f_s',
'%lss_i_ce',
'%lss_i_lss',
'%lss_i_p',
'%lss_i_r',
'%lss_i_s',
'%lss_i_st',
'%lss_inv',
'%lss_l_lss',
'%lss_l_p',
'%lss_l_r',
'%lss_l_s',
'%lss_m_lss',
'%lss_m_p',
'%lss_m_r',
'%lss_m_s',
'%lss_n_lss',
'%lss_n_p',
'%lss_n_r',
'%lss_n_s',
'%lss_norm',
'%lss_o_lss',
'%lss_o_p',
'%lss_o_r',
'%lss_o_s',
'%lss_ones',
'%lss_r_lss',
'%lss_r_p',
'%lss_r_r',
'%lss_r_s',
'%lss_rand',
'%lss_s',
'%lss_s_lss',
'%lss_s_p',
'%lss_s_r',
'%lss_s_s',
'%lss_size',
'%lss_t',
'%lss_v_lss',
'%lss_v_p',
'%lss_v_r',
'%lss_v_s',
'%lt_i_s',
'%m_n_l',
'%m_o_l',
'%mc_i_h',
'%mc_i_s',
'%mc_i_st',
'%mc_n_st',
'%mc_o_st',
'%mc_string',
'%mps_p',
'%mps_string',
'%msp_a_s',
'%msp_abs',
'%msp_e',
'%msp_find',
'%msp_i_s',
'%msp_i_st',
'%msp_length',
'%msp_m_s',
'%msp_maxi',
'%msp_n_msp',
'%msp_nnz',
'%msp_o_msp',
'%msp_p',
'%msp_sparse',
'%msp_spones',
'%msp_t',
'%p_a_lss',
'%p_a_r',
'%p_c_lss',
'%p_c_r',
'%p_cumprod',
'%p_cumsum',
'%p_d_p',
'%p_d_r',
'%p_d_s',
'%p_det',
'%p_e',
'%p_f_lss',
'%p_f_r',
'%p_grand',
'%p_i_ce',
'%p_i_h',
'%p_i_hm',
'%p_i_lss',
'%p_i_p',
'%p_i_r',
'%p_i_s',
'%p_i_st',
'%p_inv',
'%p_j_s',
'%p_k_p',
'%p_k_r',
'%p_k_s',
'%p_l_lss',
'%p_l_p',
'%p_l_r',
'%p_l_s',
'%p_m_hm',
'%p_m_lss',
'%p_m_r',
'%p_matrix',
'%p_n_l',
'%p_n_lss',
'%p_n_r',
'%p_o_l',
'%p_o_lss',
'%p_o_r',
'%p_o_sp',
'%p_p_s',
'%p_part',
'%p_prod',
'%p_q_p',
'%p_q_r',
'%p_q_s',
'%p_r_lss',
'%p_r_p',
'%p_r_r',
'%p_r_s',
'%p_s_lss',
'%p_s_r',
'%p_simp',
'%p_string',
'%p_sum',
'%p_v_lss',
'%p_v_p',
'%p_v_r',
'%p_v_s',
'%p_x_hm',
'%p_x_r',
'%p_y_p',
'%p_y_r',
'%p_y_s',
'%p_z_p',
'%p_z_r',
'%p_z_s',
'%pcg',
'%plist_p',
'%plist_string',
'%r_0',
'%r_a_hm',
'%r_a_lss',
'%r_a_p',
'%r_a_r',
'%r_a_s',
'%r_c_lss',
'%r_c_p',
'%r_c_r',
'%r_c_s',
'%r_clean',
'%r_cumprod',
'%r_cumsum',
'%r_d_p',
'%r_d_r',
'%r_d_s',
'%r_det',
'%r_diag',
'%r_e',
'%r_eye',
'%r_f_lss',
'%r_f_p',
'%r_f_r',
'%r_f_s',
'%r_i_ce',
'%r_i_hm',
'%r_i_lss',
'%r_i_p',
'%r_i_r',
'%r_i_s',
'%r_i_st',
'%r_inv',
'%r_j_s',
'%r_k_p',
'%r_k_r',
'%r_k_s',
'%r_l_lss',
'%r_l_p',
'%r_l_r',
'%r_l_s',
'%r_m_hm',
'%r_m_lss',
'%r_m_p',
'%r_m_r',
'%r_m_s',
'%r_matrix',
'%r_n_lss',
'%r_n_p',
'%r_n_r',
'%r_n_s',
'%r_norm',
'%r_o_lss',
'%r_o_p',
'%r_o_r',
'%r_o_s',
'%r_ones',
'%r_p',
'%r_p_s',
'%r_prod',
'%r_q_p',
'%r_q_r',
'%r_q_s',
'%r_r_lss',
'%r_r_p',
'%r_r_r',
'%r_r_s',
'%r_rand',
'%r_s',
'%r_s_hm',
'%r_s_lss',
'%r_s_p',
'%r_s_r',
'%r_s_s',
'%r_simp',
'%r_size',
'%r_string',
'%r_sum',
'%r_t',
'%r_tril',
'%r_triu',
'%r_v_lss',
'%r_v_p',
'%r_v_r',
'%r_v_s',
'%r_varn',
'%r_x_p',
'%r_x_r',
'%r_x_s',
'%r_y_p',
'%r_y_r',
'%r_y_s',
'%r_z_p',
'%r_z_r',
'%r_z_s',
'%s_1_hm',
'%s_1_i',
'%s_2_hm',
'%s_2_i',
'%s_3_hm',
'%s_3_i',
'%s_4_hm',
'%s_4_i',
'%s_5',
'%s_a_b',
'%s_a_hm',
'%s_a_i',
'%s_a_ip',
'%s_a_lss',
'%s_a_msp',
'%s_a_r',
'%s_a_sp',
'%s_and',
'%s_b_i',
'%s_b_s',
'%s_bezout',
'%s_c_b',
'%s_c_cblock',
'%s_c_lss',
'%s_c_r',
'%s_c_sp',
'%s_d_b',
'%s_d_i',
'%s_d_p',
'%s_d_r',
'%s_d_sp',
'%s_e',
'%s_f_b',
'%s_f_cblock',
'%s_f_lss',
'%s_f_r',
'%s_f_sp',
'%s_g_b',
'%s_g_s',
'%s_gcd',
'%s_grand',
'%s_h_b',
'%s_h_s',
'%s_i_b',
'%s_i_c',
'%s_i_ce',
'%s_i_h',
'%s_i_hm',
'%s_i_i',
'%s_i_lss',
'%s_i_p',
'%s_i_r',
'%s_i_s',
'%s_i_sp',
'%s_i_spb',
'%s_i_st',
'%s_j_i',
'%s_k_hm',
'%s_k_p',
'%s_k_r',
'%s_k_sp',
'%s_l_b',
'%s_l_hm',
'%s_l_i',
'%s_l_lss',
'%s_l_p',
'%s_l_r',
'%s_l_s',
'%s_l_sp',
'%s_lcm',
'%s_m_b',
'%s_m_hm',
'%s_m_i',
'%s_m_ip',
'%s_m_lss',
'%s_m_msp',
'%s_m_r',
'%s_matrix',
'%s_n_hm',
'%s_n_i',
'%s_n_l',
'%s_n_lss',
'%s_n_r',
'%s_n_st',
'%s_o_hm',
'%s_o_i',
'%s_o_l',
'%s_o_lss',
'%s_o_r',
'%s_o_st',
'%s_or',
'%s_p_b',
'%s_p_i',
'%s_pow',
'%s_q_hm',
'%s_q_i',
'%s_q_p',
'%s_q_r',
'%s_q_sp',
'%s_r_b',
'%s_r_i',
'%s_r_lss',
'%s_r_p',
'%s_r_r',
'%s_r_s',
'%s_r_sp',
'%s_s_b',
'%s_s_hm',
'%s_s_i',
'%s_s_ip',
'%s_s_lss',
'%s_s_r',
'%s_s_sp',
'%s_simp',
'%s_v_lss',
'%s_v_p',
'%s_v_r',
'%s_v_s',
'%s_x_b',
'%s_x_hm',
'%s_x_i',
'%s_x_r',
'%s_y_p',
'%s_y_r',
'%s_y_sp',
'%s_z_p',
'%s_z_r',
'%s_z_sp',
'%sn',
'%sp_a_s',
'%sp_a_sp',
'%sp_and',
'%sp_c_s',
'%sp_ceil',
'%sp_conj',
'%sp_cos',
'%sp_cumprod',
'%sp_cumsum',
'%sp_d_s',
'%sp_d_sp',
'%sp_det',
'%sp_diag',
'%sp_e',
'%sp_exp',
'%sp_f_s',
'%sp_floor',
'%sp_grand',
'%sp_gsort',
'%sp_i_ce',
'%sp_i_h',
'%sp_i_s',
'%sp_i_sp',
'%sp_i_st',
'%sp_int',
'%sp_inv',
'%sp_k_s',
'%sp_k_sp',
'%sp_l_s',
'%sp_l_sp',
'%sp_length',
'%sp_max',
'%sp_min',
'%sp_norm',
'%sp_or',
'%sp_p_s',
'%sp_prod',
'%sp_q_s',
'%sp_q_sp',
'%sp_r_s',
'%sp_r_sp',
'%sp_round',
'%sp_s_s',
'%sp_s_sp',
'%sp_sin',
'%sp_sqrt',
'%sp_string',
'%sp_sum',
'%sp_tril',
'%sp_triu',
'%sp_y_s',
'%sp_y_sp',
'%sp_z_s',
'%sp_z_sp',
'%spb_and',
'%spb_c_b',
'%spb_cumprod',
'%spb_cumsum',
'%spb_diag',
'%spb_e',
'%spb_f_b',
'%spb_g_b',
'%spb_g_spb',
'%spb_h_b',
'%spb_h_spb',
'%spb_i_b',
'%spb_i_ce',
'%spb_i_h',
'%spb_i_st',
'%spb_or',
'%spb_prod',
'%spb_sum',
'%spb_tril',
'%spb_triu',
'%st_6',
'%st_c_st',
'%st_e',
'%st_f_st',
'%st_i_b',
'%st_i_c',
'%st_i_fptr',
'%st_i_h',
'%st_i_i',
'%st_i_ip',
'%st_i_lss',
'%st_i_msp',
'%st_i_p',
'%st_i_r',
'%st_i_s',
'%st_i_sp',
'%st_i_spb',
'%st_i_st',
'%st_matrix',
'%st_n_c',
'%st_n_l',
'%st_n_mc',
'%st_n_p',
'%st_n_s',
'%st_o_c',
'%st_o_l',
'%st_o_mc',
'%st_o_p',
'%st_o_s',
'%st_o_tl',
'%st_p',
'%st_size',
'%st_string',
'%st_t',
'%ticks_i_h',
'%xls_e',
'%xls_p',
'%xlssheet_e',
'%xlssheet_p',
'%xlssheet_size',
'%xlssheet_string',
'DominationRank',
'G_make',
'IsAScalar',
'NDcost',
'OS_Version',
'PlotSparse',
'ReadHBSparse',
'TCL_CreateSlave',
'abcd',
'abinv',
'accept_func_default',
'accept_func_vfsa',
'acf',
'acosd',
'acosh',
'acoshm',
'acosm',
'acot',
'acotd',
'acoth',
'acsc',
'acscd',
'acsch',
'add_demo',
'add_help_chapter',
'add_module_help_chapter',
'add_param',
'add_profiling',
'adj2sp',
'aff2ab',
'ana_style',
'analpf',
'analyze',
'aplat',
'arhnk',
'arl2',
'arma2p',
'arma2ss',
'armac',
'armax',
'armax1',
'arobasestring2strings',
'arsimul',
'ascii2string',
'asciimat',
'asec',
'asecd',
'asech',
'asind',
'asinh',
'asinhm',
'asinm',
'assert_checkalmostequal',
'assert_checkequal',
'assert_checkerror',
'assert_checkfalse',
'assert_checkfilesequal',
'assert_checktrue',
'assert_comparecomplex',
'assert_computedigits',
'assert_cond2reltol',
'assert_cond2reqdigits',
'assert_generror',
'atand',
'atanh',
'atanhm',
'atanm',
'atomsAutoload',
'atomsAutoloadAdd',
'atomsAutoloadDel',
'atomsAutoloadList',
'atomsCategoryList',
'atomsCheckModule',
'atomsDepTreeShow',
'atomsGetConfig',
'atomsGetInstalled',
'atomsGetInstalledPath',
'atomsGetLoaded',
'atomsGetLoadedPath',
'atomsInstall',
'atomsIsInstalled',
'atomsIsLoaded',
'atomsList',
'atomsLoad',
'atomsQuit',
'atomsRemove',
'atomsRepositoryAdd',
'atomsRepositoryDel',
'atomsRepositoryList',
'atomsRestoreConfig',
'atomsSaveConfig',
'atomsSearch',
'atomsSetConfig',
'atomsShow',
'atomsSystemInit',
'atomsSystemUpdate',
'atomsTest',
'atomsUpdate',
'atomsVersion',
'augment',
'auread',
'auwrite',
'balreal',
'bench_run',
'bilin',
'bilt',
'bin2dec',
'binomial',
'bitand',
'bitcmp',
'bitget',
'bitor',
'bitset',
'bitxor',
'black',
'blanks',
'bloc2exp',
'bloc2ss',
'block_parameter_error',
'bode',
'bode_asymp',
'bstap',
'buttmag',
'bvodeS',
'bytecode',
'bytecodewalk',
'cainv',
'calendar',
'calerf',
'calfrq',
'canon',
'casc',
'cat',
'cat_code',
'cb_m2sci_gui',
'ccontrg',
'cell',
'cell2mat',
'cellstr',
'center',
'cepstrum',
'cfspec',
'char',
'chart',
'cheb1mag',
'cheb2mag',
'check_gateways',
'check_modules_xml',
'check_versions',
'chepol',
'chfact',
'chsolve',
'classmarkov',
'clean_help',
'clock',
'cls2dls',
'cmb_lin',
'cmndred',
'cmoment',
'coding_ga_binary',
'coding_ga_identity',
'coff',
'coffg',
'colcomp',
'colcompr',
'colinout',
'colregul',
'companion',
'complex',
'compute_initial_temp',
'cond',
'cond2sp',
'condestsp',
'configure_msifort',
'configure_msvc',
'conjgrad',
'cont_frm',
'cont_mat',
'contrss',
'conv',
'convert_to_float',
'convertindex',
'convol',
'convol2d',
'copfac',
'correl',
'cosd',
'cosh',
'coshm',
'cosm',
'cotd',
'cotg',
'coth',
'cothm',
'cov',
'covar',
'createXConfiguration',
'createfun',
'createstruct',
'cross',
'crossover_ga_binary',
'crossover_ga_default',
'csc',
'cscd',
'csch',
'csgn',
'csim',
'cspect',
'ctr_gram',
'czt',
'dae',
'daeoptions',
'damp',
'datafit',
'date',
'datenum',
'datevec',
'dbphi',
'dcf',
'ddp',
'dec2bin',
'dec2hex',
'dec2oct',
'del_help_chapter',
'del_module_help_chapter',
'demo_begin',
'demo_choose',
'demo_compiler',
'demo_end',
'demo_file_choice',
'demo_folder_choice',
'demo_function_choice',
'demo_gui',
'demo_run',
'demo_viewCode',
'denom',
'derivat',
'derivative',
'des2ss',
'des2tf',
'detectmsifort64tools',
'detectmsvc64tools',
'determ',
'detr',
'detrend',
'devtools_run_builder',
'dhnorm',
'diff',
'diophant',
'dir',
'dirname',
'dispfiles',
'dllinfo',
'dscr',
'dsimul',
'dt_ility',
'dtsi',
'edit',
'edit_error',
'editor',
'eigenmarkov',
'eigs',
'ell1mag',
'enlarge_shape',
'entropy',
'eomday',
'epred',
'eqfir',
'eqiir',
'equil',
'equil1',
'erfinv',
'etime',
'eval',
'evans',
'evstr',
'example_run',
'expression2code',
'extract_help_examples',
'factor',
'factorial',
'factors',
'faurre',
'ffilt',
'fft2',
'fftshift',
'fieldnames',
'filt_sinc',
'filter',
'findABCD',
'findAC',
'findBDK',
'findR',
'find_freq',
'find_links',
'find_scicos_version',
'findm',
'findmsifortcompiler',
'findmsvccompiler',
'findx0BD',
'firstnonsingleton',
'fix',
'fixedpointgcd',
'flipdim',
'flts',
'fminsearch',
'formatBlackTip',
'formatBodeMagTip',
'formatBodePhaseTip',
'formatGainplotTip',
'formatHallModuleTip',
'formatHallPhaseTip',
'formatNicholsGainTip',
'formatNicholsPhaseTip',
'formatNyquistTip',
'formatPhaseplotTip',
'formatSgridDampingTip',
'formatSgridFreqTip',
'formatZgridDampingTip',
'formatZgridFreqTip',
'format_txt',
'fourplan',
'frep2tf',
'freson',
'frfit',
'frmag',
'fseek_origin',
'fsfirlin',
'fspec',
'fspecg',
'fstabst',
'ftest',
'ftuneq',
'fullfile',
'fullrf',
'fullrfk',
'fun2string',
'g_margin',
'gainplot',
'gamitg',
'gcare',
'gcd',
'gencompilationflags_unix',
'generateBlockImage',
'generateBlockImages',
'generic_i_ce',
'generic_i_h',
'generic_i_hm',
'generic_i_s',
'generic_i_st',
'genlib',
'genmarkov',
'geomean',
'getDiagramVersion',
'getModelicaPath',
'getPreferencesValue',
'get_file_path',
'get_function_path',
'get_param',
'get_profile',
'get_scicos_version',
'getd',
'getscilabkeywords',
'getshell',
'gettklib',
'gfare',
'gfrancis',
'givens',
'glever',
'gmres',
'group',
'gschur',
'gspec',
'gtild',
'h2norm',
'h_cl',
'h_inf',
'h_inf_st',
'h_norm',
'hallchart',
'halt',
'hank',
'hankelsv',
'harmean',
'haveacompiler',
'head_comments',
'help_from_sci',
'help_skeleton',
'hermit',
'hex2dec',
'hilb',
'hilbert',
'histc',
'horner',
'householder',
'hrmt',
'htrianr',
'hypermat',
'idct',
'idst',
'ifft',
'ifftshift',
'iir',
'iirgroup',
'iirlp',
'iirmod',
'ilib_build',
'ilib_build_jar',
'ilib_compile',
'ilib_for_link',
'ilib_gen_Make',
'ilib_gen_Make_unix',
'ilib_gen_cleaner',
'ilib_gen_gateway',
'ilib_gen_loader',
'ilib_include_flag',
'ilib_mex_build',
'im_inv',
'importScicosDiagram',
'importScicosPal',
'importXcosDiagram',
'imrep2ss',
'ind2sub',
'inistate',
'init_ga_default',
'init_param',
'initial_scicos_tables',
'input',
'instruction2code',
'intc',
'intdec',
'integrate',
'interp1',
'interpln',
'intersect',
'intl',
'intsplin',
'inttrap',
'inv_coeff',
'invr',
'invrs',
'invsyslin',
'iqr',
'isLeapYear',
'is_absolute_path',
'is_param',
'iscell',
'iscellstr',
'iscolumn',
'isempty',
'isfield',
'isinf',
'ismatrix',
'isnan',
'isrow',
'isscalar',
'issparse',
'issquare',
'isstruct',
'isvector',
'jmat',
'justify',
'kalm',
'karmarkar',
'kernel',
'kpure',
'krac2',
'kroneck',
'lattn',
'lattp',
'launchtest',
'lcf',
'lcm',
'lcmdiag',
'leastsq',
'leqe',
'leqr',
'lev',
'levin',
'lex_sort',
'lft',
'lin',
'lin2mu',
'lincos',
'lindquist',
'linf',
'linfn',
'linsolve',
'linspace',
'list2vec',
'list_param',
'listfiles',
'listfunctions',
'listvarinfile',
'lmisolver',
'lmitool',
'loadXcosLibs',
'loadmatfile',
'loadwave',
'log10',
'log2',
'logm',
'logspace',
'lqe',
'lqg',
'lqg2stan',
'lqg_ltr',
'lqr',
'ls',
'lyap',
'm2sci_gui',
'm_circle',
'macglov',
'macrovar',
'mad',
'makecell',
'manedit',
'mapsound',
'markp2ss',
'matfile2sci',
'mdelete',
'mean',
'meanf',
'median',
'members',
'mese',
'meshgrid',
'mfft',
'mfile2sci',
'minreal',
'minss',
'mkdir',
'modulo',
'moment',
'mrfit',
'msd',
'mstr2sci',
'mtlb',
'mtlb_0',
'mtlb_a',
'mtlb_all',
'mtlb_any',
'mtlb_axes',
'mtlb_axis',
'mtlb_beta',
'mtlb_box',
'mtlb_choices',
'mtlb_close',
'mtlb_colordef',
'mtlb_cond',
'mtlb_cov',
'mtlb_cumprod',
'mtlb_cumsum',
'mtlb_dec2hex',
'mtlb_delete',
'mtlb_diag',
'mtlb_diff',
'mtlb_dir',
'mtlb_double',
'mtlb_e',
'mtlb_echo',
'mtlb_error',
'mtlb_eval',
'mtlb_exist',
'mtlb_eye',
'mtlb_false',
'mtlb_fft',
'mtlb_fftshift',
'mtlb_filter',
'mtlb_find',
'mtlb_findstr',
'mtlb_fliplr',
'mtlb_fopen',
'mtlb_format',
'mtlb_fprintf',
'mtlb_fread',
'mtlb_fscanf',
'mtlb_full',
'mtlb_fwrite',
'mtlb_get',
'mtlb_grid',
'mtlb_hold',
'mtlb_i',
'mtlb_ifft',
'mtlb_image',
'mtlb_imp',
'mtlb_int16',
'mtlb_int32',
'mtlb_int8',
'mtlb_is',
'mtlb_isa',
'mtlb_isfield',
'mtlb_isletter',
'mtlb_isspace',
'mtlb_l',
'mtlb_legendre',
'mtlb_linspace',
'mtlb_logic',
'mtlb_logical',
'mtlb_loglog',
'mtlb_lower',
'mtlb_max',
'mtlb_mean',
'mtlb_median',
'mtlb_mesh',
'mtlb_meshdom',
'mtlb_min',
'mtlb_more',
'mtlb_num2str',
'mtlb_ones',
'mtlb_pcolor',
'mtlb_plot',
'mtlb_prod',
'mtlb_qr',
'mtlb_qz',
'mtlb_rand',
'mtlb_randn',
'mtlb_rcond',
'mtlb_realmax',
'mtlb_realmin',
'mtlb_s',
'mtlb_semilogx',
'mtlb_semilogy',
'mtlb_setstr',
'mtlb_size',
'mtlb_sort',
'mtlb_sortrows',
'mtlb_sprintf',
'mtlb_sscanf',
'mtlb_std',
'mtlb_strcmp',
'mtlb_strcmpi',
'mtlb_strfind',
'mtlb_strrep',
'mtlb_subplot',
'mtlb_sum',
'mtlb_t',
'mtlb_toeplitz',
'mtlb_tril',
'mtlb_triu',
'mtlb_true',
'mtlb_type',
'mtlb_uint16',
'mtlb_uint32',
'mtlb_uint8',
'mtlb_upper',
'mtlb_var',
'mtlb_zeros',
'mu2lin',
'mutation_ga_binary',
'mutation_ga_default',
'mvcorrel',
'mvvacov',
'nancumsum',
'nand2mean',
'nanmax',
'nanmean',
'nanmeanf',
'nanmedian',
'nanmin',
'nanreglin',
'nanstdev',
'nansum',
'narsimul',
'ndgrid',
'ndims',
'nehari',
'neigh_func_csa',
'neigh_func_default',
'neigh_func_fsa',
'neigh_func_vfsa',
'neldermead_cget',
'neldermead_configure',
'neldermead_costf',
'neldermead_defaultoutput',
'neldermead_destroy',
'neldermead_function',
'neldermead_get',
'neldermead_log',
'neldermead_new',
'neldermead_restart',
'neldermead_search',
'neldermead_updatesimp',
'nextpow2',
'nfreq',
'nicholschart',
'nlev',
'nmplot_cget',
'nmplot_configure',
'nmplot_contour',
'nmplot_destroy',
'nmplot_function',
'nmplot_get',
'nmplot_historyplot',
'nmplot_log',
'nmplot_new',
'nmplot_outputcmd',
'nmplot_restart',
'nmplot_search',
'nmplot_simplexhistory',
'noisegen',
'nonreg_test_run',
'now',
'nthroot',
'null',
'num2cell',
'numderivative',
'numdiff',
'numer',
'nyquist',
'nyquistfrequencybounds',
'obs_gram',
'obscont',
'observer',
'obsv_mat',
'obsvss',
'oct2dec',
'odeoptions',
'optim_ga',
'optim_moga',
'optim_nsga',
'optim_nsga2',
'optim_sa',
'optimbase_cget',
'optimbase_checkbounds',
'optimbase_checkcostfun',
'optimbase_checkx0',
'optimbase_configure',
'optimbase_destroy',
'optimbase_function',
'optimbase_get',
'optimbase_hasbounds',
'optimbase_hasconstraints',
'optimbase_hasnlcons',
'optimbase_histget',
'optimbase_histset',
'optimbase_incriter',
'optimbase_isfeasible',
'optimbase_isinbounds',
'optimbase_isinnonlincons',
'optimbase_log',
'optimbase_logshutdown',
'optimbase_logstartup',
'optimbase_new',
'optimbase_outputcmd',
'optimbase_outstruct',
'optimbase_proj2bnds',
'optimbase_set',
'optimbase_stoplog',
'optimbase_terminate',
'optimget',
'optimplotfunccount',
'optimplotfval',
'optimplotx',
'optimset',
'optimsimplex_center',
'optimsimplex_check',
'optimsimplex_compsomefv',
'optimsimplex_computefv',
'optimsimplex_deltafv',
'optimsimplex_deltafvmax',
'optimsimplex_destroy',
'optimsimplex_dirmat',
'optimsimplex_fvmean',
'optimsimplex_fvstdev',
'optimsimplex_fvvariance',
'optimsimplex_getall',
'optimsimplex_getallfv',
'optimsimplex_getallx',
'optimsimplex_getfv',
'optimsimplex_getn',
'optimsimplex_getnbve',
'optimsimplex_getve',
'optimsimplex_getx',
'optimsimplex_gradientfv',
'optimsimplex_log',
'optimsimplex_new',
'optimsimplex_reflect',
'optimsimplex_setall',
'optimsimplex_setallfv',
'optimsimplex_setallx',
'optimsimplex_setfv',
'optimsimplex_setn',
'optimsimplex_setnbve',
'optimsimplex_setve',
'optimsimplex_setx',
'optimsimplex_shrink',
'optimsimplex_size',
'optimsimplex_sort',
'optimsimplex_xbar',
'orth',
'output_ga_default',
'output_moga_default',
'output_nsga2_default',
'output_nsga_default',
'p_margin',
'pack',
'pareto_filter',
'parrot',
'pbig',
'pca',
'pcg',
'pdiv',
'pen2ea',
'pencan',
'pencost',
'penlaur',
'perctl',
'perl',
'perms',
'permute',
'pertrans',
'pfactors',
'pfss',
'phasemag',
'phaseplot',
'phc',
'pinv',
'playsnd',
'plotprofile',
'plzr',
'pmodulo',
'pol2des',
'pol2str',
'polar',
'polfact',
'prbs_a',
'prettyprint',
'primes',
'princomp',
'profile',
'proj',
'projsl',
'projspec',
'psmall',
'pspect',
'qmr',
'qpsolve',
'quart',
'quaskro',
'rafiter',
'randpencil',
'range',
'rank',
'readxls',
'recompilefunction',
'recons',
'reglin',
'regress',
'remezb',
'remove_param',
'remove_profiling',
'repfreq',
'replace_Ix_by_Fx',
'repmat',
'reset_profiling',
'resize_matrix',
'returntoscilab',
'rhs2code',
'ric_desc',
'riccati',
'rmdir',
'routh_t',
'rowcomp',
'rowcompr',
'rowinout',
'rowregul',
'rowshuff',
'rref',
'sample',
'samplef',
'samwr',
'savematfile',
'savewave',
'scanf',
'sci2exp',
'sciGUI_init',
'sci_sparse',
'scicos_getvalue',
'scicos_simulate',
'scicos_workspace_init',
'scisptdemo',
'scitest',
'sdiff',
'sec',
'secd',
'sech',
'selection_ga_elitist',
'selection_ga_random',
'sensi',
'setPreferencesValue',
'set_param',
'setdiff',
'sgrid',
'show_margins',
'show_pca',
'showprofile',
'signm',
'sinc',
'sincd',
'sind',
'sinh',
'sinhm',
'sinm',
'sm2des',
'sm2ss',
'smga',
'smooth',
'solve',
'sound',
'soundsec',
'sp2adj',
'spaninter',
'spanplus',
'spantwo',
'specfact',
'speye',
'sprand',
'spzeros',
'sqroot',
'sqrtm',
'squarewave',
'squeeze',
'srfaur',
'srkf',
'ss2des',
'ss2ss',
'ss2tf',
'sskf',
'ssprint',
'ssrand',
'st_deviation',
'st_i_generic',
'st_ility',
'stabil',
'statgain',
'stdev',
'stdevf',
'steadycos',
'strange',
'strcmpi',
'struct',
'sub2ind',
'sva',
'svplot',
'sylm',
'sylv',
'sysconv',
'sysdiag',
'sysfact',
'syslin',
'syssize',
'system',
'systmat',
'tabul',
'tand',
'tanh',
'tanhm',
'tanm',
'tbx_build_blocks',
'tbx_build_cleaner',
'tbx_build_gateway',
'tbx_build_gateway_clean',
'tbx_build_gateway_loader',
'tbx_build_help',
'tbx_build_help_loader',
'tbx_build_loader',
'tbx_build_localization',
'tbx_build_macros',
'tbx_build_pal_loader',
'tbx_build_src',
'tbx_builder',
'tbx_builder_gateway',
'tbx_builder_gateway_lang',
'tbx_builder_help',
'tbx_builder_help_lang',
'tbx_builder_macros',
'tbx_builder_src',
'tbx_builder_src_lang',
'tbx_generate_pofile',
'temp_law_csa',
'temp_law_default',
'temp_law_fsa',
'temp_law_huang',
'temp_law_vfsa',
'test_clean',
'test_on_columns',
'test_run',
'test_run_level',
'testexamples',
'tf2des',
'tf2ss',
'thrownan',
'tic',
'time_id',
'toc',
'toeplitz',
'tokenpos',
'toolboxes',
'trace',
'trans',
'translatepaths',
'tree2code',
'trfmod',
'trianfml',
'trimmean',
'trisolve',
'trzeros',
'typeof',
'ui_observer',
'union',
'unique',
'unit_test_run',
'unix_g',
'unix_s',
'unix_w',
'unix_x',
'unobs',
'unpack',
'unwrap',
'variance',
'variancef',
'vec2list',
'vectorfind',
'ver',
'warnobsolete',
'wavread',
'wavwrite',
'wcenter',
'weekday',
'wfir',
'wfir_gui',
'whereami',
'who_user',
'whos',
'wiener',
'wigner',
'window',
'winlist',
'with_javasci',
'with_macros_source',
'with_modelica_compiler',
'with_tk',
'xcorr',
'xcosBlockEval',
'xcosBlockInterface',
'xcosCodeGeneration',
'xcosConfigureModelica',
'xcosPal',
'xcosPalAdd',
'xcosPalAddBlock',
'xcosPalExport',
'xcosPalGenerateAllIcons',
'xcosShowBlockWarning',
'xcosValidateBlockSet',
'xcosValidateCompareBlock',
'xcos_compile',
'xcos_debug_gui',
'xcos_run',
'xcos_simulate',
'xcov',
'xmltochm',
'xmltoformat',
'xmltohtml',
'xmltojar',
'xmltopdf',
'xmltops',
'xmltoweb',
'yulewalk',
'zeropen',
'zgrid',
'zpbutt',
'zpch1',
'zpch2',
'zpell',
)
variables_kw = (
'$',
'%F',
'%T',
'%e',
'%eps',
'%f',
'%fftw',
'%gui',
'%i',
'%inf',
'%io',
'%modalWarning',
'%nan',
'%pi',
'%s',
'%t',
'%tk',
'%toolboxes',
'%toolboxes_dir',
'%z',
'PWD',
'SCI',
'SCIHOME',
'TMPDIR',
'arnoldilib',
'assertlib',
'atomslib',
'cacsdlib',
'compatibility_functilib',
'corelib',
'data_structureslib',
'demo_toolslib',
'development_toolslib',
'differential_equationlib',
'dynamic_linklib',
'elementary_functionslib',
'enull',
'evoid',
'external_objectslib',
'fd',
'fileiolib',
'functionslib',
'genetic_algorithmslib',
'helptoolslib',
'home',
'integerlib',
'interpolationlib',
'iolib',
'jnull',
'jvoid',
'linear_algebralib',
'm2scilib',
'matiolib',
'modules_managerlib',
'neldermeadlib',
'optimbaselib',
'optimizationlib',
'optimsimplexlib',
'output_streamlib',
'overloadinglib',
'parameterslib',
'polynomialslib',
'preferenceslib',
'randliblib',
'scicos_autolib',
'scicos_utilslib',
'scinoteslib',
'signal_processinglib',
'simulated_annealinglib',
'soundlib',
'sparselib',
'special_functionslib',
'spreadsheetlib',
'statisticslib',
'stringlib',
'tclscilib',
'timelib',
'umfpacklib',
'xcoslib',
)
if __name__ == '__main__': # pragma: no cover
import subprocess
from pygments.util import format_lines, duplicates_removed
mapping = {'variables': 'builtin'}
def extract_completion(var_type):
s = subprocess.Popen(['scilab', '-nwni'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = s.communicate('''\
fd = mopen("/dev/stderr", "wt");
mputl(strcat(completion("", "%s"), "||"), fd);
mclose(fd)\n''' % var_type)
if '||' not in output[1]:
raise Exception(output[0])
# Invalid DISPLAY causes this to be output:
text = output[1].strip()
if text.startswith('Error: unable to open display \n'):
text = text[len('Error: unable to open display \n'):]
return text.split('||')
new_data = {}
seen = set() # only keep first type for a given word
for t in ('functions', 'commands', 'macros', 'variables'):
new_data[t] = duplicates_removed(extract_completion(t), seen)
seen.update(set(new_data[t]))
with open(__file__, encoding='utf-8') as f:
content = f.read()
header = content[:content.find('# Autogenerated')]
footer = content[content.find("if __name__ == '__main__':"):]
with open(__file__, 'w', encoding='utf-8') as f:
f.write(header)
f.write('# Autogenerated\n\n')
for k, v in sorted(new_data.items()):
f.write(format_lines(k + '_kw', v) + '\n\n')
f.write(footer) | PypiClean |
/GPViz-0.0.4.tar.gz/GPViz-0.0.4/gpviz/gp.py | from typing import List
import gpjax.core as gpx
import jax.numpy as jnp
import matplotlib.pyplot as plt
from gpjax.gps import (
Posterior,
ConjugatePosterior,
SpectralPosterior,
NonConjugatePosterior,
)
from multipledispatch import dispatch
from .styles import get_colours
from .utils import tidy_legend, glow
Array = jnp.DeviceArray
####################
# Prior plotting
####################
@dispatch(Array, gpx.Prior, dict, gpx.Dataset)
def plot(
key: Array,
gp: gpx.Prior,
params: dict,
data: gpx.Dataset,
n_samples: int = 10,
title: str = None,
ax=None,
):
"""
Plot samples from the Gaussian process prior distribution.
:param key: A Jax PRNGKey object to ensure reproducibility when sampling from the prior distribution.
:param gp: A generic Gaussian process prior
:param params: The Gaussian process priors's corresponding parameter set.
:param data: The training dataset
:param n_samples: The number of samples to be drawn from the predictive posterior's distribution. The default argument is 0 which corresponds to no samples being plotteed.
:param title: What title, if any, should be added to the plot.
:param ax: Optional matplotlib axes argument.
:return:
"""
samples = gpx.sample(key, gp, params, data, n_samples=n_samples)
cols = get_colours()
if ax is None:
fig, ax = plt.subplots()
ax.plot(data.X, samples.T, alpha=0.3, color=cols["base"])
ax.set_xlabel("X")
ax.set_ylabel("y")
ax.set_xlim(jnp.min(data.X), jnp.max(data.X))
ax.set_title(title, loc="left")
@dispatch(Array, gpx.Prior, dict, Array)
def plot(
key: Array,
gp: gpx.Prior,
params: dict,
data: Array,
n_samples: int = 10,
title: str = None,
):
"""
Plot samples from the Gaussian process prior distribution.
:param key: A Jax PRNGKey object to ensure reproducibility when sampling from the prior distribution.
:param gp: A generic Gaussian process prior
:param params: The Gaussian process priors's corresponding parameter set.
:param data: The training dataset
:param n_samples: The number of samples to be drawn from the predictive posterior's distribution. The default argument is 0 which corresponds to no samples being plotteed.
:param title: What title, if any, should be added to the plot.
:param ax: Optional matplotlib axes argument.
:return:
"""
D = gpx.Dataset(X=data)
return plot(key, gp, params, D, n_samples=n_samples, title=title)
######################
# Posterior plotting
######################
@dispatch(
Array,
(Posterior, ConjugatePosterior, SpectralPosterior, NonConjugatePosterior),
dict,
gpx.Dataset,
Array,
)
def plot(
key: Array,
gp: Posterior,
params: dict,
data: gpx.Dataset,
testing: Array,
n_samples: int = 0,
mean: bool = True,
glow_mean: bool = True,
std_devs: List[int] = [1],
title: str = None,
legend:bool=False,
ax=None
):
"""
Create a plot of the Gaussian process' predictive posterior distribution.
:param key: A Jax PRNGKey object to ensure reproducibility when sampling from the posterior distribution.
:param gp: A generic Gaussian process posterior
:param params: The Gaussian process posterior's corresponding parameter set.
:param data: The training dataset
:param testing: The testing dataset array.
:param n_samples: The number of samples to be drawn from the predictive posterior's distribution. The default argument is 0 which corresponds to no samples being plotteed.
:param mean: Boolean as to whether the predictive mean should be plotted.
:param glow_mean: Boolean as to whether the predictive mean should line should be glowed.
:param std_devs: The number of posterior standard deviation bands to be plotted.
:param title: What title, if any, should be added to the plot.
:param legend: Boolean as to whether the legend should be added to the plot.
:param ax: Optional matplotlib axes argument.
:return:
"""
rv = gpx.random_variable(gp, params, data, jitter_amount=1e-6)(testing)
if ax is None:
fig, ax = plt.subplots()
mu = rv.mean()
sigma = rv.variance()
one_stddev = jnp.sqrt(sigma)
cols = get_colours()
ax.plot(data.X, data.y, "o", color=cols["dark_gray"], label="Training data")
if n_samples > 0:
if not mean and not std_devs:
col = cols['base']
alph = 0.6
width = 1.
else:
col = cols['base']
alph = 0.4
width = 0.5
posterior_samples = gpx.sample(key, rv, n_samples=n_samples)
ax.plot(
testing,
posterior_samples.T,
color=col,
alpha=alph,
linewidth=width,
label='Posterior samples'
)
for i in std_devs:
ax.fill_between(
testing.ravel(),
mu.ravel() - i * one_stddev,
mu.ravel() + i * one_stddev,
alpha=0.4 / i,
color=cols["cool_gray"],
label=f"{i} standard deviation",
)
if std_devs == [1]:
ax.plot(testing, mu.ravel() - one_stddev, linestyle="--", color=cols["base"])
ax.plot(testing, mu.ravel() + one_stddev, linestyle="--", color=cols["base"])
if mean:
mu_line = ax.plot(testing, mu, color=cols["base"], label="Predictive mean", linewidth=5)
if glow_mean:
glow(mu_line, ax)
ax.set_xlabel("X")
ax.set_ylabel("y")
xmin = jnp.min(data.X)
xmax = jnp.max(data.X)
ax.set_xlim(xmin - 0.05*jnp.abs(xmin), xmax + 0.05*jnp.abs(xmax))
ax.set_title(title, loc="left")
if legend:
# Remove duplicated labels
ax = tidy_legend(ax)
@dispatch(
Array,
(Posterior, ConjugatePosterior, SpectralPosterior, NonConjugatePosterior),
dict,
gpx.Dataset,
gpx.Dataset
)
def plot(
key: Array,
gp: Posterior,
params: dict,
data: gpx.Dataset,
testing: gpx.Dataset,
n_samples: int = 0,
mean: bool = True,
glow_mean: bool = True,
std_devs: List[int] = [1],
title: str = None,
legend:bool=False,
ax=None
):
"""
Create a plot of the Gaussian process' predictive posterior distribution.
:param key: A Jax PRNGKey object to ensure reproducibility when sampling from the posterior distribution.
:param gp: A generic Gaussian process posterior
:param params: The Gaussian process posterior's corresponding parameter set.
:param data: The training dataset
:param testing: The testing dataset.
:param n_samples: The number of samples to be drawn from the predictive posterior's distribution. The default argument is 0 which corresponds to no samples being plotteed.
:param mean: Boolean as to whether the predictive mean should be plotted.
:param glow_mean: Boolean as to whether the predictive mean should line should be glowed.
:param std_devs: The number of posterior standard deviation bands to be plotted.
:param title: What title, if any, should be added to the plot.
:param legend: Boolean as to whether the legend should be added to the plot.
:param ax: Optional matplotlib axes argument.
:return:
"""
xstar = testing.X
return plot(key, gp, params, data, xstar, n_samples=n_samples, mean=mean, glow_mean=glow_mean, std_devs=std_devs, title=title, legend=legend, ax=ax) | PypiClean |
/baserow_open_api_client-0.0.6.tar.gz/baserow_open_api_client-0.0.6/baserow_open_api_client/models/job.py | from typing import Any, Dict, List, Type, TypeVar, Union
import attr
from ..types import UNSET, Unset
T = TypeVar("T", bound="Job")
@attr.s(auto_attribs=True)
class Job:
"""
Attributes:
id (int):
type (str): The type of the job.
progress_percentage (int): A percentage indicating how far along the job is. 100 means that it's finished.
state (str): Indicates the state of the import job.
human_readable_error (Union[Unset, str]): A human readable error message indicating what went wrong.
"""
id: int
type: str
progress_percentage: int
state: str
human_readable_error: Union[Unset, str] = UNSET
additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)
def to_dict(self) -> Dict[str, Any]:
id = self.id
type = self.type
progress_percentage = self.progress_percentage
state = self.state
human_readable_error = self.human_readable_error
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update(
{
"id": id,
"type": type,
"progress_percentage": progress_percentage,
"state": state,
}
)
if human_readable_error is not UNSET:
field_dict["human_readable_error"] = human_readable_error
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
id = d.pop("id")
type = d.pop("type")
progress_percentage = d.pop("progress_percentage")
state = d.pop("state")
human_readable_error = d.pop("human_readable_error", UNSET)
job = cls(
id=id,
type=type,
progress_percentage=progress_percentage,
state=state,
human_readable_error=human_readable_error,
)
job.additional_properties = d
return job
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Any:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Any) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties | PypiClean |
/safegate_pro-2021.7.6-py3-none-any.whl/homeassistant/components/dynalite/convert_config.py | from __future__ import annotations
from typing import Any
from dynalite_devices_lib import const as dyn_const
from homeassistant.const import (
CONF_DEFAULT,
CONF_HOST,
CONF_NAME,
CONF_PORT,
CONF_ROOM,
CONF_TYPE,
)
from .const import (
ACTIVE_INIT,
ACTIVE_OFF,
ACTIVE_ON,
CONF_ACTIVE,
CONF_AREA,
CONF_AUTO_DISCOVER,
CONF_CHANNEL,
CONF_CHANNEL_COVER,
CONF_CLOSE_PRESET,
CONF_DEVICE_CLASS,
CONF_DURATION,
CONF_FADE,
CONF_LEVEL,
CONF_NO_DEFAULT,
CONF_OPEN_PRESET,
CONF_POLL_TIMER,
CONF_PRESET,
CONF_ROOM_OFF,
CONF_ROOM_ON,
CONF_STOP_PRESET,
CONF_TEMPLATE,
CONF_TILT_TIME,
CONF_TIME_COVER,
)
ACTIVE_MAP = {
ACTIVE_INIT: dyn_const.ACTIVE_INIT,
False: dyn_const.ACTIVE_OFF,
ACTIVE_OFF: dyn_const.ACTIVE_OFF,
ACTIVE_ON: dyn_const.ACTIVE_ON,
True: dyn_const.ACTIVE_ON,
}
TEMPLATE_MAP = {
CONF_ROOM: dyn_const.CONF_ROOM,
CONF_TIME_COVER: dyn_const.CONF_TIME_COVER,
}
def convert_with_map(config, conf_map):
"""Create the initial converted map with just the basic key:value pairs updated."""
result = {}
for conf in conf_map:
if conf in config:
result[conf_map[conf]] = config[conf]
return result
def convert_channel(config: dict[str, Any]) -> dict[str, Any]:
"""Convert the config for a channel."""
my_map = {
CONF_NAME: dyn_const.CONF_NAME,
CONF_FADE: dyn_const.CONF_FADE,
CONF_TYPE: dyn_const.CONF_CHANNEL_TYPE,
}
return convert_with_map(config, my_map)
def convert_preset(config: dict[str, Any]) -> dict[str, Any]:
"""Convert the config for a preset."""
my_map = {
CONF_NAME: dyn_const.CONF_NAME,
CONF_FADE: dyn_const.CONF_FADE,
CONF_LEVEL: dyn_const.CONF_LEVEL,
}
return convert_with_map(config, my_map)
def convert_area(config: dict[str, Any]) -> dict[str, Any]:
"""Convert the config for an area."""
my_map = {
CONF_NAME: dyn_const.CONF_NAME,
CONF_FADE: dyn_const.CONF_FADE,
CONF_NO_DEFAULT: dyn_const.CONF_NO_DEFAULT,
CONF_ROOM_ON: dyn_const.CONF_ROOM_ON,
CONF_ROOM_OFF: dyn_const.CONF_ROOM_OFF,
CONF_CHANNEL_COVER: dyn_const.CONF_CHANNEL_COVER,
CONF_DEVICE_CLASS: dyn_const.CONF_DEVICE_CLASS,
CONF_OPEN_PRESET: dyn_const.CONF_OPEN_PRESET,
CONF_CLOSE_PRESET: dyn_const.CONF_CLOSE_PRESET,
CONF_STOP_PRESET: dyn_const.CONF_STOP_PRESET,
CONF_DURATION: dyn_const.CONF_DURATION,
CONF_TILT_TIME: dyn_const.CONF_TILT_TIME,
}
result = convert_with_map(config, my_map)
if CONF_CHANNEL in config:
result[dyn_const.CONF_CHANNEL] = {
channel: convert_channel(channel_conf)
for (channel, channel_conf) in config[CONF_CHANNEL].items()
}
if CONF_PRESET in config:
result[dyn_const.CONF_PRESET] = {
preset: convert_preset(preset_conf)
for (preset, preset_conf) in config[CONF_PRESET].items()
}
if CONF_TEMPLATE in config:
result[dyn_const.CONF_TEMPLATE] = TEMPLATE_MAP[config[CONF_TEMPLATE]]
return result
def convert_default(config: dict[str, Any]) -> dict[str, Any]:
"""Convert the config for the platform defaults."""
return convert_with_map(config, {CONF_FADE: dyn_const.CONF_FADE})
def convert_template(config: dict[str, Any]) -> dict[str, Any]:
"""Convert the config for a template."""
my_map = {
CONF_ROOM_ON: dyn_const.CONF_ROOM_ON,
CONF_ROOM_OFF: dyn_const.CONF_ROOM_OFF,
CONF_CHANNEL_COVER: dyn_const.CONF_CHANNEL_COVER,
CONF_DEVICE_CLASS: dyn_const.CONF_DEVICE_CLASS,
CONF_OPEN_PRESET: dyn_const.CONF_OPEN_PRESET,
CONF_CLOSE_PRESET: dyn_const.CONF_CLOSE_PRESET,
CONF_STOP_PRESET: dyn_const.CONF_STOP_PRESET,
CONF_DURATION: dyn_const.CONF_DURATION,
CONF_TILT_TIME: dyn_const.CONF_TILT_TIME,
}
return convert_with_map(config, my_map)
def convert_config(config: dict[str, Any]) -> dict[str, Any]:
"""Convert a config dict by replacing component consts with library consts."""
my_map = {
CONF_NAME: dyn_const.CONF_NAME,
CONF_HOST: dyn_const.CONF_HOST,
CONF_PORT: dyn_const.CONF_PORT,
CONF_AUTO_DISCOVER: dyn_const.CONF_AUTO_DISCOVER,
CONF_POLL_TIMER: dyn_const.CONF_POLL_TIMER,
}
result = convert_with_map(config, my_map)
if CONF_AREA in config:
result[dyn_const.CONF_AREA] = {
area: convert_area(area_conf)
for (area, area_conf) in config[CONF_AREA].items()
}
if CONF_DEFAULT in config:
result[dyn_const.CONF_DEFAULT] = convert_default(config[CONF_DEFAULT])
if CONF_ACTIVE in config:
result[dyn_const.CONF_ACTIVE] = ACTIVE_MAP[config[CONF_ACTIVE]]
if CONF_PRESET in config:
result[dyn_const.CONF_PRESET] = {
preset: convert_preset(preset_conf)
for (preset, preset_conf) in config[CONF_PRESET].items()
}
if CONF_TEMPLATE in config:
result[dyn_const.CONF_TEMPLATE] = {
TEMPLATE_MAP[template]: convert_template(template_conf)
for (template, template_conf) in config[CONF_TEMPLATE].items()
}
return result | PypiClean |
/matplotlib_arm64-3.3.4-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl/matplotlib/backends/backend_ps.py | import datetime
from enum import Enum
import glob
from io import StringIO, TextIOWrapper
import logging
import math
import os
import pathlib
import re
import shutil
from tempfile import TemporaryDirectory
import time
import numpy as np
import matplotlib as mpl
from matplotlib import cbook, _path
from matplotlib import _text_layout
from matplotlib.backend_bases import (
_Backend, _check_savefig_extra_args, FigureCanvasBase, FigureManagerBase,
GraphicsContextBase, RendererBase)
from matplotlib.cbook import is_writable_file_like, file_requires_unicode
from matplotlib.font_manager import is_opentype_cff_font, get_font
from matplotlib.ft2font import LOAD_NO_HINTING
from matplotlib._ttconv import convert_ttf_to_ps
from matplotlib.mathtext import MathTextParser
from matplotlib._mathtext_data import uni2type1
from matplotlib.path import Path
from matplotlib.texmanager import TexManager
from matplotlib.transforms import Affine2D
from matplotlib.backends.backend_mixed import MixedModeRenderer
from . import _backend_pdf_ps
_log = logging.getLogger(__name__)
backend_version = 'Level II'
debugPS = 0
class PsBackendHelper:
def __init__(self):
self._cached = {}
ps_backend_helper = PsBackendHelper()
papersize = {'letter': (8.5, 11),
'legal': (8.5, 14),
'ledger': (11, 17),
'a0': (33.11, 46.81),
'a1': (23.39, 33.11),
'a2': (16.54, 23.39),
'a3': (11.69, 16.54),
'a4': (8.27, 11.69),
'a5': (5.83, 8.27),
'a6': (4.13, 5.83),
'a7': (2.91, 4.13),
'a8': (2.07, 2.91),
'a9': (1.457, 2.05),
'a10': (1.02, 1.457),
'b0': (40.55, 57.32),
'b1': (28.66, 40.55),
'b2': (20.27, 28.66),
'b3': (14.33, 20.27),
'b4': (10.11, 14.33),
'b5': (7.16, 10.11),
'b6': (5.04, 7.16),
'b7': (3.58, 5.04),
'b8': (2.51, 3.58),
'b9': (1.76, 2.51),
'b10': (1.26, 1.76)}
def _get_papertype(w, h):
for key, (pw, ph) in sorted(papersize.items(), reverse=True):
if key.startswith('l'):
continue
if w < pw and h < ph:
return key
return 'a0'
def _num_to_str(val):
if isinstance(val, str):
return val
ival = int(val)
if val == ival:
return str(ival)
s = "%1.3f" % val
s = s.rstrip("0")
s = s.rstrip(".")
return s
def _nums_to_str(*args):
return ' '.join(map(_num_to_str, args))
def quote_ps_string(s):
"""
Quote dangerous characters of S for use in a PostScript string constant.
"""
s = s.replace(b"\\", b"\\\\")
s = s.replace(b"(", b"\\(")
s = s.replace(b")", b"\\)")
s = s.replace(b"'", b"\\251")
s = s.replace(b"`", b"\\301")
s = re.sub(br"[^ -~\n]", lambda x: br"\%03o" % ord(x.group()), s)
return s.decode('ascii')
def _move_path_to_path_or_stream(src, dst):
"""
Move the contents of file at *src* to path-or-filelike *dst*.
If *dst* is a path, the metadata of *src* are *not* copied.
"""
if is_writable_file_like(dst):
fh = (open(src, 'r', encoding='latin-1')
if file_requires_unicode(dst)
else open(src, 'rb'))
with fh:
shutil.copyfileobj(fh, dst)
else:
shutil.move(src, dst, copy_function=shutil.copyfile)
class RendererPS(_backend_pdf_ps.RendererPDFPSBase):
"""
The renderer handles all the drawing primitives using a graphics
context instance that controls the colors/styles.
"""
_afm_font_dir = cbook._get_data_path("fonts/afm")
_use_afm_rc_name = "ps.useafm"
def __init__(self, width, height, pswriter, imagedpi=72):
# Although postscript itself is dpi independent, we need to inform the
# image code about a requested dpi to generate high resolution images
# and them scale them before embedding them.
super().__init__(width, height)
self._pswriter = pswriter
if mpl.rcParams['text.usetex']:
self.textcnt = 0
self.psfrag = []
self.imagedpi = imagedpi
# current renderer state (None=uninitialised)
self.color = None
self.linewidth = None
self.linejoin = None
self.linecap = None
self.linedash = None
self.fontname = None
self.fontsize = None
self._hatches = {}
self.image_magnification = imagedpi / 72
self._clip_paths = {}
self._path_collection_id = 0
self._character_tracker = _backend_pdf_ps.CharacterTracker()
self.mathtext_parser = MathTextParser("PS")
@cbook.deprecated("3.3")
@property
def used_characters(self):
return self._character_tracker.used_characters
@cbook.deprecated("3.3")
def track_characters(self, *args, **kwargs):
"""Keep track of which characters are required from each font."""
self._character_tracker.track(*args, **kwargs)
@cbook.deprecated("3.3")
def merge_used_characters(self, *args, **kwargs):
self._character_tracker.merge(*args, **kwargs)
def set_color(self, r, g, b, store=True):
if (r, g, b) != self.color:
if r == g and r == b:
self._pswriter.write("%1.3f setgray\n" % r)
else:
self._pswriter.write(
"%1.3f %1.3f %1.3f setrgbcolor\n" % (r, g, b))
if store:
self.color = (r, g, b)
def set_linewidth(self, linewidth, store=True):
linewidth = float(linewidth)
if linewidth != self.linewidth:
self._pswriter.write("%1.3f setlinewidth\n" % linewidth)
if store:
self.linewidth = linewidth
def set_linejoin(self, linejoin, store=True):
if linejoin != self.linejoin:
self._pswriter.write("%d setlinejoin\n" % linejoin)
if store:
self.linejoin = linejoin
def set_linecap(self, linecap, store=True):
if linecap != self.linecap:
self._pswriter.write("%d setlinecap\n" % linecap)
if store:
self.linecap = linecap
def set_linedash(self, offset, seq, store=True):
if self.linedash is not None:
oldo, oldseq = self.linedash
if np.array_equal(seq, oldseq) and oldo == offset:
return
if seq is not None and len(seq):
s = "[%s] %d setdash\n" % (_nums_to_str(*seq), offset)
self._pswriter.write(s)
else:
self._pswriter.write("[] 0 setdash\n")
if store:
self.linedash = (offset, seq)
def set_font(self, fontname, fontsize, store=True):
if mpl.rcParams['ps.useafm']:
return
if (fontname, fontsize) != (self.fontname, self.fontsize):
out = ("/%s findfont\n"
"%1.3f scalefont\n"
"setfont\n" % (fontname, fontsize))
self._pswriter.write(out)
if store:
self.fontname = fontname
self.fontsize = fontsize
def create_hatch(self, hatch):
sidelen = 72
if hatch in self._hatches:
return self._hatches[hatch]
name = 'H%d' % len(self._hatches)
linewidth = mpl.rcParams['hatch.linewidth']
pageheight = self.height * 72
self._pswriter.write(f"""\
<< /PatternType 1
/PaintType 2
/TilingType 2
/BBox[0 0 {sidelen:d} {sidelen:d}]
/XStep {sidelen:d}
/YStep {sidelen:d}
/PaintProc {{
pop
{linewidth:f} setlinewidth
{self._convert_path(
Path.hatch(hatch), Affine2D().scale(sidelen), simplify=False)}
gsave
fill
grestore
stroke
}} bind
>>
matrix
0.0 {pageheight:f} translate
makepattern
/{name} exch def
""")
self._hatches[hatch] = name
return name
def get_image_magnification(self):
"""
Get the factor by which to magnify images passed to draw_image.
Allows a backend to have images at a different resolution to other
artists.
"""
return self.image_magnification
def draw_image(self, gc, x, y, im, transform=None):
# docstring inherited
h, w = im.shape[:2]
imagecmd = "false 3 colorimage"
data = im[::-1, :, :3] # Vertically flipped rgb values.
# data.tobytes().hex() has no spaces, so can be linewrapped by simply
# splitting data every nchars. It's equivalent to textwrap.fill only
# much faster.
nchars = 128
data = data.tobytes().hex()
hexlines = "\n".join(
[
data[n * nchars:(n + 1) * nchars]
for n in range(math.ceil(len(data) / nchars))
]
)
if transform is None:
matrix = "1 0 0 1 0 0"
xscale = w / self.image_magnification
yscale = h / self.image_magnification
else:
matrix = " ".join(map(str, transform.frozen().to_values()))
xscale = 1.0
yscale = 1.0
bbox = gc.get_clip_rectangle()
clippath, clippath_trans = gc.get_clip_path()
clip = []
if bbox is not None:
clip.append('%s clipbox' % _nums_to_str(*bbox.size, *bbox.p0))
if clippath is not None:
id = self._get_clip_path(clippath, clippath_trans)
clip.append('%s' % id)
clip = '\n'.join(clip)
self._pswriter.write(f"""\
gsave
{clip}
{x:f} {y:f} translate
[{matrix}] concat
{xscale:f} {yscale:f} scale
/DataString {w:d} string def
{w:d} {h:d} 8 [ {w:d} 0 0 -{h:d} 0 {h:d} ]
{{
currentfile DataString readhexstring pop
}} bind {imagecmd}
{hexlines}
grestore
""")
def _convert_path(self, path, transform, clip=False, simplify=None):
if clip:
clip = (0.0, 0.0, self.width * 72.0, self.height * 72.0)
else:
clip = None
return _path.convert_to_string(
path, transform, clip, simplify, None,
6, [b'm', b'l', b'', b'c', b'cl'], True).decode('ascii')
def _get_clip_path(self, clippath, clippath_transform):
key = (clippath, id(clippath_transform))
pid = self._clip_paths.get(key)
if pid is None:
pid = 'c%x' % len(self._clip_paths)
clippath_bytes = self._convert_path(
clippath, clippath_transform, simplify=False)
self._pswriter.write(f"""\
/{pid} {{
{clippath_bytes}
clip
newpath
}} bind def
""")
self._clip_paths[key] = pid
return pid
def draw_path(self, gc, path, transform, rgbFace=None):
# docstring inherited
clip = rgbFace is None and gc.get_hatch_path() is None
simplify = path.should_simplify and clip
ps = self._convert_path(path, transform, clip=clip, simplify=simplify)
self._draw_ps(ps, gc, rgbFace)
def draw_markers(
self, gc, marker_path, marker_trans, path, trans, rgbFace=None):
# docstring inherited
if debugPS:
self._pswriter.write('% draw_markers \n')
ps_color = (
None
if _is_transparent(rgbFace)
else '%1.3f setgray' % rgbFace[0]
if rgbFace[0] == rgbFace[1] == rgbFace[2]
else '%1.3f %1.3f %1.3f setrgbcolor' % rgbFace[:3])
# construct the generic marker command:
# don't want the translate to be global
ps_cmd = ['/o {', 'gsave', 'newpath', 'translate']
lw = gc.get_linewidth()
alpha = (gc.get_alpha()
if gc.get_forced_alpha() or len(gc.get_rgb()) == 3
else gc.get_rgb()[3])
stroke = lw > 0 and alpha > 0
if stroke:
ps_cmd.append('%.1f setlinewidth' % lw)
jint = gc.get_joinstyle()
ps_cmd.append('%d setlinejoin' % jint)
cint = gc.get_capstyle()
ps_cmd.append('%d setlinecap' % cint)
ps_cmd.append(self._convert_path(marker_path, marker_trans,
simplify=False))
if rgbFace:
if stroke:
ps_cmd.append('gsave')
if ps_color:
ps_cmd.extend([ps_color, 'fill'])
if stroke:
ps_cmd.append('grestore')
if stroke:
ps_cmd.append('stroke')
ps_cmd.extend(['grestore', '} bind def'])
for vertices, code in path.iter_segments(
trans,
clip=(0, 0, self.width*72, self.height*72),
simplify=False):
if len(vertices):
x, y = vertices[-2:]
ps_cmd.append("%g %g o" % (x, y))
ps = '\n'.join(ps_cmd)
self._draw_ps(ps, gc, rgbFace, fill=False, stroke=False)
def draw_path_collection(self, gc, master_transform, paths, all_transforms,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls,
offset_position):
# Is the optimization worth it? Rough calculation:
# cost of emitting a path in-line is
# (len_path + 2) * uses_per_path
# cost of definition+use is
# (len_path + 3) + 3 * uses_per_path
len_path = len(paths[0].vertices) if len(paths) > 0 else 0
uses_per_path = self._iter_collection_uses_per_path(
paths, all_transforms, offsets, facecolors, edgecolors)
should_do_optimization = \
len_path + 3 * uses_per_path + 3 < (len_path + 2) * uses_per_path
if not should_do_optimization:
return RendererBase.draw_path_collection(
self, gc, master_transform, paths, all_transforms,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls,
offset_position)
path_codes = []
for i, (path, transform) in enumerate(self._iter_collection_raw_paths(
master_transform, paths, all_transforms)):
name = 'p%x_%x' % (self._path_collection_id, i)
path_bytes = self._convert_path(path, transform, simplify=False)
self._pswriter.write(f"""\
/{name} {{
newpath
translate
{path_bytes}
}} bind def
""")
path_codes.append(name)
for xo, yo, path_id, gc0, rgbFace in self._iter_collection(
gc, master_transform, all_transforms, path_codes, offsets,
offsetTrans, facecolors, edgecolors, linewidths, linestyles,
antialiaseds, urls, offset_position):
ps = "%g %g %s" % (xo, yo, path_id)
self._draw_ps(ps, gc0, rgbFace)
self._path_collection_id += 1
@cbook._delete_parameter("3.3", "ismath")
def draw_tex(self, gc, x, y, s, prop, angle, ismath='TeX!', mtext=None):
# docstring inherited
if not hasattr(self, "psfrag"):
_log.warning(
"The PS backend determines usetex status solely based on "
"rcParams['text.usetex'] and does not support having "
"usetex=True only for some elements; this element will thus "
"be rendered as if usetex=False.")
self.draw_text(gc, x, y, s, prop, angle, False, mtext)
return
w, h, bl = self.get_text_width_height_descent(s, prop, ismath="TeX")
fontsize = prop.get_size_in_points()
thetext = 'psmarker%d' % self.textcnt
color = '%1.3f,%1.3f,%1.3f' % gc.get_rgb()[:3]
fontcmd = {'sans-serif': r'{\sffamily %s}',
'monospace': r'{\ttfamily %s}'}.get(
mpl.rcParams['font.family'][0], r'{\rmfamily %s}')
s = fontcmd % s
tex = r'\color[rgb]{%s} %s' % (color, s)
corr = 0 # w/2*(fontsize-10)/10
if dict.__getitem__(mpl.rcParams, 'text.latex.preview'):
# use baseline alignment!
pos = _nums_to_str(x-corr, y)
self.psfrag.append(
r'\psfrag{%s}[Bl][Bl][1][%f]{\fontsize{%f}{%f}%s}' % (
thetext, angle, fontsize, fontsize*1.25, tex))
else:
# Stick to the bottom alignment.
pos = _nums_to_str(x-corr, y-bl)
self.psfrag.append(
r'\psfrag{%s}[bl][bl][1][%f]{\fontsize{%f}{%f}%s}' % (
thetext, angle, fontsize, fontsize*1.25, tex))
self._pswriter.write(f"""\
gsave
{pos} moveto
({thetext})
show
grestore
""")
self.textcnt += 1
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
# docstring inherited
if debugPS:
self._pswriter.write("% text\n")
if _is_transparent(gc.get_rgb()):
return # Special handling for fully transparent.
if ismath == 'TeX':
return self.draw_tex(gc, x, y, s, prop, angle)
elif ismath:
return self.draw_mathtext(gc, x, y, s, prop, angle)
elif mpl.rcParams['ps.useafm']:
self.set_color(*gc.get_rgb())
font = self._get_font_afm(prop)
fontname = font.get_fontname()
fontsize = prop.get_size_in_points()
scale = 0.001 * fontsize
thisx = 0
thisy = font.get_str_bbox_and_descent(s)[4] * scale
last_name = None
lines = []
for c in s:
name = uni2type1.get(ord(c), f"uni{ord(c):04X}")
try:
width = font.get_width_from_char_name(name)
except KeyError:
name = 'question'
width = font.get_width_char('?')
if last_name is not None:
kern = font.get_kern_dist_from_name(last_name, name)
else:
kern = 0
last_name = name
thisx += kern * scale
lines.append('%f %f m /%s glyphshow' % (thisx, thisy, name))
thisx += width * scale
thetext = "\n".join(lines)
self._pswriter.write(f"""\
gsave
/{fontname} findfont
{fontsize} scalefont
setfont
{x:f} {y:f} translate
{angle:f} rotate
{thetext}
grestore
""")
else:
font = self._get_font_ttf(prop)
font.set_text(s, 0, flags=LOAD_NO_HINTING)
self._character_tracker.track(font, s)
self.set_color(*gc.get_rgb())
ps_name = (font.postscript_name
.encode('ascii', 'replace').decode('ascii'))
self.set_font(ps_name, prop.get_size_in_points())
thetext = '\n'.join(
'%f 0 m /%s glyphshow' % (x, font.get_glyph_name(glyph_idx))
for glyph_idx, x in _text_layout.layout(s, font))
self._pswriter.write(f"""\
gsave
{x:f} {y:f} translate
{angle:f} rotate
{thetext}
grestore
""")
def new_gc(self):
# docstring inherited
return GraphicsContextPS()
def draw_mathtext(self, gc, x, y, s, prop, angle):
"""Draw the math text using matplotlib.mathtext."""
if debugPS:
self._pswriter.write("% mathtext\n")
width, height, descent, pswriter, used_characters = \
self.mathtext_parser.parse(s, 72, prop)
self._character_tracker.merge(used_characters)
self.set_color(*gc.get_rgb())
thetext = pswriter.getvalue()
self._pswriter.write(f"""\
gsave
{x:f} {y:f} translate
{angle:f} rotate
{thetext}
grestore
""")
def draw_gouraud_triangle(self, gc, points, colors, trans):
self.draw_gouraud_triangles(gc, points.reshape((1, 3, 2)),
colors.reshape((1, 3, 4)), trans)
def draw_gouraud_triangles(self, gc, points, colors, trans):
assert len(points) == len(colors)
assert points.ndim == 3
assert points.shape[1] == 3
assert points.shape[2] == 2
assert colors.ndim == 3
assert colors.shape[1] == 3
assert colors.shape[2] == 4
shape = points.shape
flat_points = points.reshape((shape[0] * shape[1], 2))
flat_points = trans.transform(flat_points)
flat_colors = colors.reshape((shape[0] * shape[1], 4))
points_min = np.min(flat_points, axis=0) - (1 << 12)
points_max = np.max(flat_points, axis=0) + (1 << 12)
factor = np.ceil((2 ** 32 - 1) / (points_max - points_min))
xmin, ymin = points_min
xmax, ymax = points_max
streamarr = np.empty(
shape[0] * shape[1],
dtype=[('flags', 'u1'), ('points', '2>u4'), ('colors', '3u1')])
streamarr['flags'] = 0
streamarr['points'] = (flat_points - points_min) * factor
streamarr['colors'] = flat_colors[:, :3] * 255.0
stream = quote_ps_string(streamarr.tobytes())
self._pswriter.write(f"""\
gsave
<< /ShadingType 4
/ColorSpace [/DeviceRGB]
/BitsPerCoordinate 32
/BitsPerComponent 8
/BitsPerFlag 8
/AntiAlias true
/Decode [ {xmin:f} {xmax:f} {ymin:f} {ymax:f} 0 1 0 1 0 1 ]
/DataSource ({stream})
>>
shfill
grestore
""")
def _draw_ps(self, ps, gc, rgbFace, fill=True, stroke=True, command=None):
"""
Emit the PostScript snippet 'ps' with all the attributes from 'gc'
applied. 'ps' must consist of PostScript commands to construct a path.
The fill and/or stroke kwargs can be set to False if the
'ps' string already includes filling and/or stroking, in
which case _draw_ps is just supplying properties and
clipping.
"""
# local variable eliminates all repeated attribute lookups
write = self._pswriter.write
if debugPS and command:
write("% "+command+"\n")
mightstroke = (gc.get_linewidth() > 0
and not _is_transparent(gc.get_rgb()))
if not mightstroke:
stroke = False
if _is_transparent(rgbFace):
fill = False
hatch = gc.get_hatch()
if mightstroke:
self.set_linewidth(gc.get_linewidth())
jint = gc.get_joinstyle()
self.set_linejoin(jint)
cint = gc.get_capstyle()
self.set_linecap(cint)
self.set_linedash(*gc.get_dashes())
self.set_color(*gc.get_rgb()[:3])
write('gsave\n')
cliprect = gc.get_clip_rectangle()
if cliprect:
write('%1.4g %1.4g %1.4g %1.4g clipbox\n'
% (*cliprect.size, *cliprect.p0))
clippath, clippath_trans = gc.get_clip_path()
if clippath:
id = self._get_clip_path(clippath, clippath_trans)
write('%s\n' % id)
# Jochen, is the strip necessary? - this could be a honking big string
write(ps.strip())
write("\n")
if fill:
if stroke or hatch:
write("gsave\n")
self.set_color(*rgbFace[:3], store=False)
write("fill\n")
if stroke or hatch:
write("grestore\n")
if hatch:
hatch_name = self.create_hatch(hatch)
write("gsave\n")
write("%f %f %f " % gc.get_hatch_color()[:3])
write("%s setpattern fill grestore\n" % hatch_name)
if stroke:
write("stroke\n")
write("grestore\n")
def _is_transparent(rgb_or_rgba):
if rgb_or_rgba is None:
return True # Consistent with rgbFace semantics.
elif len(rgb_or_rgba) == 4:
if rgb_or_rgba[3] == 0:
return True
if rgb_or_rgba[3] != 1:
_log.warning(
"The PostScript backend does not support transparency; "
"partially transparent artists will be rendered opaque.")
return False
else: # len() == 3.
return False
class GraphicsContextPS(GraphicsContextBase):
def get_capstyle(self):
return {'butt': 0, 'round': 1, 'projecting': 2}[
GraphicsContextBase.get_capstyle(self)]
def get_joinstyle(self):
return {'miter': 0, 'round': 1, 'bevel': 2}[
GraphicsContextBase.get_joinstyle(self)]
class _Orientation(Enum):
portrait, landscape = range(2)
def swap_if_landscape(self, shape):
return shape[::-1] if self.name == "landscape" else shape
class FigureCanvasPS(FigureCanvasBase):
fixed_dpi = 72
filetypes = {'ps': 'Postscript',
'eps': 'Encapsulated Postscript'}
def get_default_filetype(self):
return 'ps'
def print_ps(self, outfile, *args, **kwargs):
return self._print_ps(outfile, 'ps', *args, **kwargs)
def print_eps(self, outfile, *args, **kwargs):
return self._print_ps(outfile, 'eps', *args, **kwargs)
def _print_ps(
self, outfile, format, *args,
dpi=72, metadata=None, papertype=None, orientation='portrait',
**kwargs):
self.figure.set_dpi(72) # Override the dpi kwarg
dsc_comments = {}
if isinstance(outfile, (str, os.PathLike)):
dsc_comments["Title"] = \
os.fspath(outfile).encode("ascii", "replace").decode("ascii")
dsc_comments["Creator"] = (metadata or {}).get(
"Creator",
f"matplotlib version {mpl.__version__}, http://matplotlib.org/")
# See https://reproducible-builds.org/specs/source-date-epoch/
source_date_epoch = os.getenv("SOURCE_DATE_EPOCH")
dsc_comments["CreationDate"] = (
datetime.datetime.utcfromtimestamp(
int(source_date_epoch)).strftime("%a %b %d %H:%M:%S %Y")
if source_date_epoch
else time.ctime())
dsc_comments = "\n".join(
f"%%{k}: {v}" for k, v in dsc_comments.items())
if papertype is None:
papertype = mpl.rcParams['ps.papersize']
papertype = papertype.lower()
cbook._check_in_list(['auto', *papersize], papertype=papertype)
orientation = cbook._check_getitem(
_Orientation, orientation=orientation.lower())
printer = (self._print_figure_tex
if mpl.rcParams['text.usetex'] else
self._print_figure)
printer(outfile, format, dpi=dpi, dsc_comments=dsc_comments,
orientation=orientation, papertype=papertype, **kwargs)
@_check_savefig_extra_args
@cbook._delete_parameter("3.2", "dryrun")
def _print_figure(
self, outfile, format, *,
dpi, dsc_comments, orientation, papertype,
dryrun=False, bbox_inches_restore=None):
"""
Render the figure to a filesystem path or a file-like object.
Parameters are as for `.print_figure`, except that *dsc_comments* is a
all string containing Document Structuring Convention comments,
generated from the *metadata* parameter to `.print_figure`.
"""
is_eps = format == 'eps'
if isinstance(outfile, (str, os.PathLike)):
outfile = os.fspath(outfile)
passed_in_file_object = False
elif is_writable_file_like(outfile):
passed_in_file_object = True
else:
raise ValueError("outfile must be a path or a file-like object")
# find the appropriate papertype
width, height = self.figure.get_size_inches()
if papertype == 'auto':
papertype = _get_papertype(
*orientation.swap_if_landscape((width, height)))
paper_width, paper_height = orientation.swap_if_landscape(
papersize[papertype])
if mpl.rcParams['ps.usedistiller']:
# distillers improperly clip eps files if pagesize is too small
if width > paper_width or height > paper_height:
papertype = _get_papertype(
*orientation.swap_if_landscape(width, height))
paper_width, paper_height = orientation.swap_if_landscape(
papersize[papertype])
# center the figure on the paper
xo = 72 * 0.5 * (paper_width - width)
yo = 72 * 0.5 * (paper_height - height)
llx = xo
lly = yo
urx = llx + self.figure.bbox.width
ury = lly + self.figure.bbox.height
rotation = 0
if orientation is _Orientation.landscape:
llx, lly, urx, ury = lly, llx, ury, urx
xo, yo = 72 * paper_height - yo, xo
rotation = 90
bbox = (llx, lly, urx, ury)
if dryrun:
class NullWriter:
def write(self, *args, **kwargs):
pass
self._pswriter = NullWriter()
else:
self._pswriter = StringIO()
# mixed mode rendering
ps_renderer = RendererPS(width, height, self._pswriter, imagedpi=dpi)
renderer = MixedModeRenderer(
self.figure, width, height, dpi, ps_renderer,
bbox_inches_restore=bbox_inches_restore)
self.figure.draw(renderer)
if dryrun: # return immediately if dryrun (tightbbox=True)
return
def print_figure_impl(fh):
# write the PostScript headers
if is_eps:
print("%!PS-Adobe-3.0 EPSF-3.0", file=fh)
else:
print(f"%!PS-Adobe-3.0\n"
f"%%DocumentPaperSizes: {papertype}\n"
f"%%Pages: 1\n",
end="", file=fh)
print(f"{dsc_comments}\n"
f"%%Orientation: {orientation.name}\n"
f"%%BoundingBox: {bbox[0]} {bbox[1]} {bbox[2]} {bbox[3]}\n"
f"%%EndComments\n",
end="", file=fh)
Ndict = len(psDefs)
print("%%BeginProlog", file=fh)
if not mpl.rcParams['ps.useafm']:
Ndict += len(ps_renderer._character_tracker.used)
print("/mpldict %d dict def" % Ndict, file=fh)
print("mpldict begin", file=fh)
print("\n".join(psDefs), file=fh)
if not mpl.rcParams['ps.useafm']:
for font_path, chars \
in ps_renderer._character_tracker.used.items():
if not chars:
continue
font = get_font(font_path)
glyph_ids = [font.get_char_index(c) for c in chars]
fonttype = mpl.rcParams['ps.fonttype']
# Can't use more than 255 chars from a single Type 3 font.
if len(glyph_ids) > 255:
fonttype = 42
# The ttf to ps (subsetting) support doesn't work for
# OpenType fonts that are Postscript inside (like the STIX
# fonts). This will simply turn that off to avoid errors.
if is_opentype_cff_font(font_path):
raise RuntimeError(
"OpenType CFF fonts can not be saved using "
"the internal Postscript backend at this "
"time; consider using the Cairo backend")
fh.flush()
try:
convert_ttf_to_ps(os.fsencode(font_path),
fh, fonttype, glyph_ids)
except RuntimeError:
_log.warning("The PostScript backend does not "
"currently support the selected font.")
raise
print("end", file=fh)
print("%%EndProlog", file=fh)
if not is_eps:
print("%%Page: 1 1", file=fh)
print("mpldict begin", file=fh)
print("%s translate" % _nums_to_str(xo, yo), file=fh)
if rotation:
print("%d rotate" % rotation, file=fh)
print("%s clipbox" % _nums_to_str(width*72, height*72, 0, 0),
file=fh)
# write the figure
print(self._pswriter.getvalue(), file=fh)
# write the trailer
print("end", file=fh)
print("showpage", file=fh)
if not is_eps:
print("%%EOF", file=fh)
fh.flush()
if mpl.rcParams['ps.usedistiller']:
# We are going to use an external program to process the output.
# Write to a temporary file.
with TemporaryDirectory() as tmpdir:
tmpfile = os.path.join(tmpdir, "tmp.ps")
with open(tmpfile, 'w', encoding='latin-1') as fh:
print_figure_impl(fh)
if mpl.rcParams['ps.usedistiller'] == 'ghostscript':
_try_distill(gs_distill,
tmpfile, is_eps, ptype=papertype, bbox=bbox)
elif mpl.rcParams['ps.usedistiller'] == 'xpdf':
_try_distill(xpdf_distill,
tmpfile, is_eps, ptype=papertype, bbox=bbox)
_move_path_to_path_or_stream(tmpfile, outfile)
else:
# Write directly to outfile.
if passed_in_file_object:
requires_unicode = file_requires_unicode(outfile)
if not requires_unicode:
fh = TextIOWrapper(outfile, encoding="latin-1")
# Prevent the TextIOWrapper from closing the underlying
# file.
fh.close = lambda: None
else:
fh = outfile
print_figure_impl(fh)
else:
with open(outfile, 'w', encoding='latin-1') as fh:
print_figure_impl(fh)
@_check_savefig_extra_args
@cbook._delete_parameter("3.2", "dryrun")
def _print_figure_tex(
self, outfile, format, *,
dpi, dsc_comments, orientation, papertype,
dryrun=False, bbox_inches_restore=None):
"""
If :rc:`text.usetex` is True, a temporary pair of tex/eps files
are created to allow tex to manage the text layout via the PSFrags
package. These files are processed to yield the final ps or eps file.
The rest of the behavior is as for `._print_figure`.
"""
is_eps = format == 'eps'
width, height = self.figure.get_size_inches()
xo = 0
yo = 0
llx = xo
lly = yo
urx = llx + self.figure.bbox.width
ury = lly + self.figure.bbox.height
bbox = (llx, lly, urx, ury)
if dryrun:
class NullWriter:
def write(self, *args, **kwargs):
pass
self._pswriter = NullWriter()
else:
self._pswriter = StringIO()
# mixed mode rendering
ps_renderer = RendererPS(width, height, self._pswriter, imagedpi=dpi)
renderer = MixedModeRenderer(self.figure,
width, height, dpi, ps_renderer,
bbox_inches_restore=bbox_inches_restore)
self.figure.draw(renderer)
if dryrun: # return immediately if dryrun (tightbbox=True)
return
# write to a temp file, we'll move it to outfile when done
with TemporaryDirectory() as tmpdir:
tmpfile = os.path.join(tmpdir, "tmp.ps")
pathlib.Path(tmpfile).write_text(
f"""\
%!PS-Adobe-3.0 EPSF-3.0
{dsc_comments}
%%BoundingBox: {bbox[0]} {bbox[1]} {bbox[2]} {bbox[3]}
%%EndComments
%%BeginProlog
/mpldict {len(psDefs)} dict def
mpldict begin
{"".join(psDefs)}
end
%%EndProlog
mpldict begin
{_nums_to_str(xo, yo)} translate
{_nums_to_str(width*72, height*72)} 0 0 clipbox
{self._pswriter.getvalue()}
end
showpage
""",
encoding="latin-1")
if orientation is _Orientation.landscape: # now, ready to rotate
width, height = height, width
bbox = (lly, llx, ury, urx)
# set the paper size to the figure size if is_eps. The
# resulting ps file has the given size with correct bounding
# box so that there is no need to call 'pstoeps'
if is_eps:
paper_width, paper_height = orientation.swap_if_landscape(
self.figure.get_size_inches())
else:
if papertype == 'auto':
papertype = _get_papertype(width, height)
paper_width, paper_height = papersize[papertype]
texmanager = ps_renderer.get_texmanager()
font_preamble = texmanager.get_font_preamble()
custom_preamble = texmanager.get_custom_preamble()
psfrag_rotated = convert_psfrags(tmpfile, ps_renderer.psfrag,
font_preamble,
custom_preamble, paper_width,
paper_height,
orientation.name)
if (mpl.rcParams['ps.usedistiller'] == 'ghostscript'
or mpl.rcParams['text.usetex']):
_try_distill(gs_distill,
tmpfile, is_eps, ptype=papertype, bbox=bbox,
rotated=psfrag_rotated)
elif mpl.rcParams['ps.usedistiller'] == 'xpdf':
_try_distill(xpdf_distill,
tmpfile, is_eps, ptype=papertype, bbox=bbox,
rotated=psfrag_rotated)
_move_path_to_path_or_stream(tmpfile, outfile)
def convert_psfrags(tmpfile, psfrags, font_preamble, custom_preamble,
paper_width, paper_height, orientation):
"""
When we want to use the LaTeX backend with postscript, we write PSFrag tags
to a temporary postscript file, each one marking a position for LaTeX to
render some text. convert_psfrags generates a LaTeX document containing the
commands to convert those tags to text. LaTeX/dvips produces the postscript
file that includes the actual text.
"""
with mpl.rc_context({
"text.latex.preamble":
mpl.rcParams["text.latex.preamble"] +
r"\usepackage{psfrag,color}""\n"
r"\usepackage[dvips]{graphicx}""\n"
r"\geometry{papersize={%(width)sin,%(height)sin},"
r"body={%(width)sin,%(height)sin},margin=0in}"
% {"width": paper_width, "height": paper_height}
}):
dvifile = TexManager().make_dvi(
"\n"
r"\begin{figure}""\n"
r" \centering\leavevmode""\n"
r" %(psfrags)s""\n"
r" \includegraphics*[angle=%(angle)s]{%(epsfile)s}""\n"
r"\end{figure}"
% {
"psfrags": "\n".join(psfrags),
"angle": 90 if orientation == 'landscape' else 0,
"epsfile": pathlib.Path(tmpfile).resolve().as_posix(),
},
fontsize=10) # tex's default fontsize.
with TemporaryDirectory() as tmpdir:
psfile = os.path.join(tmpdir, "tmp.ps")
cbook._check_and_log_subprocess(
['dvips', '-q', '-R0', '-o', psfile, dvifile], _log)
shutil.move(psfile, tmpfile)
# check if the dvips created a ps in landscape paper. Somehow,
# above latex+dvips results in a ps file in a landscape mode for a
# certain figure sizes (e.g., 8.3in, 5.8in which is a5). And the
# bounding box of the final output got messed up. We check see if
# the generated ps file is in landscape and return this
# information. The return value is used in pstoeps step to recover
# the correct bounding box. 2010-06-05 JJL
with open(tmpfile) as fh:
psfrag_rotated = "Landscape" in fh.read(1000)
return psfrag_rotated
def _try_distill(func, *args, **kwargs):
try:
func(*args, **kwargs)
except mpl.ExecutableNotFoundError as exc:
_log.warning("%s. Distillation step skipped.", exc)
def gs_distill(tmpfile, eps=False, ptype='letter', bbox=None, rotated=False):
"""
Use ghostscript's pswrite or epswrite device to distill a file.
This yields smaller files without illegal encapsulated postscript
operators. The output is low-level, converting text to outlines.
"""
if eps:
paper_option = "-dEPSCrop"
else:
paper_option = "-sPAPERSIZE=%s" % ptype
psfile = tmpfile + '.ps'
dpi = mpl.rcParams['ps.distiller.res']
cbook._check_and_log_subprocess(
[mpl._get_executable_info("gs").executable,
"-dBATCH", "-dNOPAUSE", "-r%d" % dpi, "-sDEVICE=ps2write",
paper_option, "-sOutputFile=%s" % psfile, tmpfile],
_log)
os.remove(tmpfile)
shutil.move(psfile, tmpfile)
# While it is best if above steps preserve the original bounding
# box, there seem to be cases when it is not. For those cases,
# the original bbox can be restored during the pstoeps step.
if eps:
# For some versions of gs, above steps result in an ps file where the
# original bbox is no more correct. Do not adjust bbox for now.
pstoeps(tmpfile, bbox, rotated=rotated)
def xpdf_distill(tmpfile, eps=False, ptype='letter', bbox=None, rotated=False):
"""
Use ghostscript's ps2pdf and xpdf's/poppler's pdftops to distill a file.
This yields smaller files without illegal encapsulated postscript
operators. This distiller is preferred, generating high-level postscript
output that treats text as text.
"""
mpl._get_executable_info("gs") # Effectively checks for ps2pdf.
mpl._get_executable_info("pdftops")
pdffile = tmpfile + '.pdf'
psfile = tmpfile + '.ps'
# Pass options as `-foo#bar` instead of `-foo=bar` to keep Windows happy
# (https://www.ghostscript.com/doc/9.22/Use.htm#MS_Windows).
cbook._check_and_log_subprocess(
["ps2pdf",
"-dAutoFilterColorImages#false",
"-dAutoFilterGrayImages#false",
"-sAutoRotatePages#None",
"-sGrayImageFilter#FlateEncode",
"-sColorImageFilter#FlateEncode",
"-dEPSCrop" if eps else "-sPAPERSIZE#%s" % ptype,
tmpfile, pdffile], _log)
cbook._check_and_log_subprocess(
["pdftops", "-paper", "match", "-level2", pdffile, psfile], _log)
os.remove(tmpfile)
shutil.move(psfile, tmpfile)
if eps:
pstoeps(tmpfile)
for fname in glob.glob(tmpfile+'.*'):
os.remove(fname)
def get_bbox_header(lbrt, rotated=False):
"""
Return a postscript header string for the given bbox lbrt=(l, b, r, t).
Optionally, return rotate command.
"""
l, b, r, t = lbrt
if rotated:
rotate = "%.2f %.2f translate\n90 rotate" % (l+r, 0)
else:
rotate = ""
bbox_info = '%%%%BoundingBox: %d %d %d %d' % (l, b, np.ceil(r), np.ceil(t))
hires_bbox_info = '%%%%HiResBoundingBox: %.6f %.6f %.6f %.6f' % (
l, b, r, t)
return '\n'.join([bbox_info, hires_bbox_info]), rotate
def pstoeps(tmpfile, bbox=None, rotated=False):
"""
Convert the postscript to encapsulated postscript. The bbox of
the eps file will be replaced with the given *bbox* argument. If
None, original bbox will be used.
"""
# if rotated==True, the output eps file need to be rotated
if bbox:
bbox_info, rotate = get_bbox_header(bbox, rotated=rotated)
else:
bbox_info, rotate = None, None
epsfile = tmpfile + '.eps'
with open(epsfile, 'wb') as epsh, open(tmpfile, 'rb') as tmph:
write = epsh.write
# Modify the header:
for line in tmph:
if line.startswith(b'%!PS'):
write(b"%!PS-Adobe-3.0 EPSF-3.0\n")
if bbox:
write(bbox_info.encode('ascii') + b'\n')
elif line.startswith(b'%%EndComments'):
write(line)
write(b'%%BeginProlog\n'
b'save\n'
b'countdictstack\n'
b'mark\n'
b'newpath\n'
b'/showpage {} def\n'
b'/setpagedevice {pop} def\n'
b'%%EndProlog\n'
b'%%Page 1 1\n')
if rotate:
write(rotate.encode('ascii') + b'\n')
break
elif bbox and line.startswith((b'%%Bound', b'%%HiResBound',
b'%%DocumentMedia', b'%%Pages')):
pass
else:
write(line)
# Now rewrite the rest of the file, and modify the trailer.
# This is done in a second loop such that the header of the embedded
# eps file is not modified.
for line in tmph:
if line.startswith(b'%%EOF'):
write(b'cleartomark\n'
b'countdictstack\n'
b'exch sub { end } repeat\n'
b'restore\n'
b'showpage\n'
b'%%EOF\n')
elif line.startswith(b'%%PageBoundingBox'):
pass
else:
write(line)
os.remove(tmpfile)
shutil.move(epsfile, tmpfile)
FigureManagerPS = FigureManagerBase
# The following Python dictionary psDefs contains the entries for the
# PostScript dictionary mpldict. This dictionary implements most of
# the matplotlib primitives and some abbreviations.
#
# References:
# http://www.adobe.com/products/postscript/pdfs/PLRM.pdf
# http://www.mactech.com/articles/mactech/Vol.09/09.04/PostscriptTutorial/
# http://www.math.ubc.ca/people/faculty/cass/graphics/text/www/
#
# The usage comments use the notation of the operator summary
# in the PostScript Language reference manual.
psDefs = [
# x y *m* -
"/m { moveto } bind def",
# x y *l* -
"/l { lineto } bind def",
# x y *r* -
"/r { rlineto } bind def",
# x1 y1 x2 y2 x y *c* -
"/c { curveto } bind def",
# *closepath* -
"/cl { closepath } bind def",
# w h x y *box* -
"""/box {
m
1 index 0 r
0 exch r
neg 0 r
cl
} bind def""",
# w h x y *clipbox* -
"""/clipbox {
box
clip
newpath
} bind def""",
]
@_Backend.export
class _BackendPS(_Backend):
FigureCanvas = FigureCanvasPS | PypiClean |
/dsin100daysv32-6.0.1.tar.gz/dsin100daysv32-6.0.1/notebook/static/components/MathJax/jax/output/HTML-CSS/fonts/STIX-Web/Marks/Regular/Main.js | MathJax.OutputJax["HTML-CSS"].FONTDATA.FONTS.STIXMathJax_Marks={directory:"Marks/Regular",family:"STIXMathJax_Marks",testString:"\u00A0\u02B0\u02B1\u02B2\u02B3\u02B4\u02B5\u02B6\u02B7\u02B8\u02B9\u02BA\u02BB\u02BC\u02BD",32:[0,0,250,0,0],160:[0,0,250,0,0],688:[848,-336,378,7,365],689:[848,-336,378,7,365],690:[852,-169,300,44,244],691:[681,-336,252,5,252],692:[680,-335,277,10,257],693:[680,-168,325,10,338],694:[680,-335,390,6,379],695:[680,-331,520,6,512],696:[680,-176,370,14,361],697:[684,-421,208,90,257],698:[684,-421,305,19,324],699:[686,-443,333,79,218],700:[686,-443,333,79,218],701:[686,-443,333,79,218],702:[680,-485,198,35,163],703:[680,-485,198,35,163],704:[690,-295,326,23,303],705:[690,-295,326,23,303],706:[755,-419,317,33,285],707:[755,-419,317,33,285],708:[713,-461,317,-9,327],709:[713,-461,317,-9,327],712:[713,-448,278,119,159],716:[70,195,278,119,159],717:[-104,159,334,11,323],718:[-21,192,333,25,249],719:[-21,192,333,84,308],720:[460,-19,333,89,244],721:[460,-299,333,89,244],722:[365,-75,333,72,262],723:[365,-75,333,71,261],724:[205,-18,333,51,281],725:[205,-18,333,51,281],726:[218,-26,333,71,263],727:[144,-100,333,71,263],731:[0,165,333,64,249],733:[678,-507,333,-3,376],734:[443,-186,298,0,263],735:[662,-425,333,48,284],736:[684,-219,378,24,335],737:[848,-336,215,19,197],738:[681,-331,291,36,261],739:[680,-336,380,5,372],740:[850,-336,341,45,319],741:[662,0,413,48,373],742:[662,0,405,40,365],743:[662,0,405,40,365],744:[662,0,405,40,365],745:[662,0,405,40,365],748:[70,147,333,21,311],749:[665,-507,405,10,395],759:[-113,219,333,1,331],773:[820,-770,0,-480,20],777:[751,-492,0,-307,-118],781:[700,-500,0,-250,-195],782:[700,-500,0,-326,-133],783:[678,-507,0,-401,-22],784:[767,-507,0,-373,-92],785:[664,-507,0,-373,-92],786:[745,-502,0,-299,-160],787:[745,-502,0,-299,-160],788:[745,-502,0,-299,-160],789:[745,-502,0,-85,54],790:[-53,224,0,-351,-127],791:[-53,224,0,-371,-147],792:[-53,283,0,-397,-210],793:[-53,283,0,-267,-80],794:[735,-531,0,-380,-80],795:[474,-345,0,-44,51],796:[-71,266,0,-360,-232],797:[-53,240,0,-345,-115],798:[-53,240,0,-345,-115],799:[-53,250,0,-326,-134],800:[-124,168,0,-326,-134],801:[75,287,0,-235,1],802:[75,287,0,-54,182],803:[-118,217,0,-280,-181],804:[-119,218,0,-379,-81],805:[-69,268,0,-329,-130],806:[-110,353,0,-299,-160],807:[0,215,0,-334,-125],808:[0,165,0,-322,-137],809:[-102,234,0,-250,-210],810:[-98,235,0,-385,-73],811:[-110,227,0,-380,-75],812:[-73,240,0,-385,-74],813:[-73,240,0,-385,-74],814:[-68,225,0,-370,-89],815:[-59,216,0,-370,-89],816:[-113,219,0,-395,-65],817:[-141,195,0,-385,-74],818:[-141,191,0,-480,20],819:[-141,300,0,-480,20],820:[320,-214,0,-401,-71],821:[274,-230,0,-384,-78],822:[274,-230,0,-480,20],823:[580,74,0,-380,-41],825:[-71,266,0,-280,-152],826:[-53,190,0,-385,-73],827:[-53,227,0,-313,-147],828:[-65,189,0,-380,-79],829:[715,-525,0,-326,-135],830:[829,-499,0,-283,-177],831:[928,-770,0,-480,20],838:[681,-538,0,-350,-68],839:[-140,292,1,11,323],844:[777,-532,0,-386,-56],857:[-65,367,0,-357,-87],860:[-76,233,0,-373,295],864:[633,-517,0,-395,365],865:[664,-507,0,-373,295],866:[-65,270,0,-395,355],8208:[259,-193,333,39,285],8209:[257,-194,333,39,285],8210:[259,-193,500,0,500],8213:[250,-201,2000,0,2000],8215:[-141,300,500,0,500],8218:[102,141,333,79,218],8219:[676,-433,333,79,218],8222:[102,141,444,45,416],8223:[676,-433,444,30,401],8226:[444,-59,523,70,455],8229:[100,11,667,111,555],8240:[706,19,1109,61,1048],8241:[706,19,1471,61,1410],8246:[678,-401,426,75,351],8247:[678,-401,563,75,488],8248:[102,156,511,59,454],8249:[416,-33,333,63,285],8250:[416,-33,333,48,270],8251:[547,41,685,48,635],8252:[676,9,549,130,452],8256:[709,-512,798,72,726],8259:[332,-172,333,39,285],8263:[676,8,839,68,809],8270:[240,171,500,68,433],8271:[459,141,278,60,199],8272:[691,40,790,55,735],8273:[676,171,501,68,433],8274:[706,200,471,54,417],8287:[0,0,1000,0,0],8400:[760,-627,0,-453,-17],8401:[760,-627,0,-453,-17],8402:[662,156,0,-242,-192],8406:[760,-548,0,-453,-17],8411:[622,-523,0,-462,35],8412:[622,-523,0,-600,96],8413:[725,221,0,-723,223],8414:[780,180,0,-730,230],8415:[843,341,0,-840,344],8417:[760,-548,0,-453,25],8420:[1023,155,0,-970,490],8421:[662,156,0,-430,-40],8422:[662,156,0,-335,-102],8423:[725,178,0,-650,166],8424:[-119,218,0,-462,35],8425:[681,-538,0,-480,53],8426:[419,-87,0,-658,118],8427:[756,217,0,-448,193],8428:[-119,252,0,-453,-17],8429:[-119,252,0,-453,-17],8430:[-40,252,0,-453,-17],8431:[-40,252,0,-453,-17],8432:[819,-517,0,-357,-87],12306:[662,0,685,10,672],12336:[417,-93,1412,45,1367],57438:[698,-547,0,95,406],57441:[-141,390,0,11,322],57442:[-141,486,0,11,322],57443:[734,-508,0,94,485],57444:[777,-547,0,95,425],57445:[-141,371,0,1,331],57446:[770,-547,0,101,412],57447:[-141,371,0,1,331],57560:[584,0,400,57,343],57561:[665,0,255,56,199],57562:[665,0,388,56,332],57996:[474,-227,0,53,397],57997:[734,-484,0,94,460]};MathJax.Callback.Queue(["initFont",MathJax.OutputJax["HTML-CSS"],"STIXMathJax_Marks"],["loadComplete",MathJax.Ajax,MathJax.OutputJax["HTML-CSS"].fontDir+"/Marks/Regular/Main.js"]); | PypiClean |
/salt-3006.2.tar.gz/salt-3006.2/CODE_OF_CONDUCT.md | # Contributor Covenant Code of Conduct
## Our Pledge
We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, religion, or sexual identity
and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
## Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the
overall community
Examples of unacceptable behavior include:
* The use of sexualized language or imagery, and sexual attention or
advances of any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email
address, without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
## Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at
[email protected].
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
## Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
### 2. Warning
**Community Impact**: A violation through a single incident or series
of actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or
permanent ban.
### 3. Temporary Ban
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within
the community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.0, available at
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
Community Impact Guidelines were inspired by [Mozilla's code of conduct
enforcement ladder](https://github.com/mozilla/diversity).
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see the FAQ at
https://www.contributor-covenant.org/faq. Translations are available at
https://www.contributor-covenant.org/translations.
| PypiClean |
/fhirclientr4-4.0.0.tar.gz/fhirclientr4-4.0.0/fhirclient/models/library.py |
from . import domainresource
class Library(domainresource.DomainResource):
""" Represents a library of quality improvement components.
The Library resource is a general-purpose container for knowledge asset
definitions. It can be used to describe and expose existing knowledge
assets such as logic libraries and information model descriptions, as well
as to describe a collection of knowledge assets.
"""
resource_type = "Library"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.approvalDate = None
""" When the library was approved by publisher.
Type `FHIRDate` (represented as `str` in JSON). """
self.author = None
""" Who authored the content.
List of `ContactDetail` items (represented as `dict` in JSON). """
self.contact = None
""" Contact details for the publisher.
List of `ContactDetail` items (represented as `dict` in JSON). """
self.content = None
""" Contents of the library, either embedded or referenced.
List of `Attachment` items (represented as `dict` in JSON). """
self.copyright = None
""" Use and/or publishing restrictions.
Type `str`. """
self.dataRequirement = None
""" What data is referenced by this library.
List of `DataRequirement` items (represented as `dict` in JSON). """
self.date = None
""" Date last changed.
Type `FHIRDate` (represented as `str` in JSON). """
self.description = None
""" Natural language description of the library.
Type `str`. """
self.editor = None
""" Who edited the content.
List of `ContactDetail` items (represented as `dict` in JSON). """
self.effectivePeriod = None
""" When the library is expected to be used.
Type `Period` (represented as `dict` in JSON). """
self.endorser = None
""" Who endorsed the content.
List of `ContactDetail` items (represented as `dict` in JSON). """
self.experimental = None
""" For testing purposes, not real usage.
Type `bool`. """
self.identifier = None
""" Additional identifier for the library.
List of `Identifier` items (represented as `dict` in JSON). """
self.jurisdiction = None
""" Intended jurisdiction for library (if applicable).
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.lastReviewDate = None
""" When the library was last reviewed.
Type `FHIRDate` (represented as `str` in JSON). """
self.name = None
""" Name for this library (computer friendly).
Type `str`. """
self.parameter = None
""" Parameters defined by the library.
List of `ParameterDefinition` items (represented as `dict` in JSON). """
self.publisher = None
""" Name of the publisher (organization or individual).
Type `str`. """
self.purpose = None
""" Why this library is defined.
Type `str`. """
self.relatedArtifact = None
""" Additional documentation, citations, etc..
List of `RelatedArtifact` items (represented as `dict` in JSON). """
self.reviewer = None
""" Who reviewed the content.
List of `ContactDetail` items (represented as `dict` in JSON). """
self.status = None
""" draft | active | retired | unknown.
Type `str`. """
self.subjectCodeableConcept = None
""" Type of individual the library content is focused on.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.subjectReference = None
""" Type of individual the library content is focused on.
Type `FHIRReference` (represented as `dict` in JSON). """
self.subtitle = None
""" Subordinate title of the library.
Type `str`. """
self.title = None
""" Name for this library (human friendly).
Type `str`. """
self.topic = None
""" E.g. Education, Treatment, Assessment, etc..
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.type = None
""" logic-library | model-definition | asset-collection | module-
definition.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.url = None
""" Canonical identifier for this library, represented as a URI
(globally unique).
Type `str`. """
self.usage = None
""" Describes the clinical usage of the library.
Type `str`. """
self.useContext = None
""" The context that the content is intended to support.
List of `UsageContext` items (represented as `dict` in JSON). """
self.version = None
""" Business version of the library.
Type `str`. """
super(Library, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(Library, self).elementProperties()
js.extend([
("approvalDate", "approvalDate", fhirdate.FHIRDate, False, None, False),
("author", "author", contactdetail.ContactDetail, True, None, False),
("contact", "contact", contactdetail.ContactDetail, True, None, False),
("content", "content", attachment.Attachment, True, None, False),
("copyright", "copyright", str, False, None, False),
("dataRequirement", "dataRequirement", datarequirement.DataRequirement, True, None, False),
("date", "date", fhirdate.FHIRDate, False, None, False),
("description", "description", str, False, None, False),
("editor", "editor", contactdetail.ContactDetail, True, None, False),
("effectivePeriod", "effectivePeriod", period.Period, False, None, False),
("endorser", "endorser", contactdetail.ContactDetail, True, None, False),
("experimental", "experimental", bool, False, None, False),
("identifier", "identifier", identifier.Identifier, True, None, False),
("jurisdiction", "jurisdiction", codeableconcept.CodeableConcept, True, None, False),
("lastReviewDate", "lastReviewDate", fhirdate.FHIRDate, False, None, False),
("name", "name", str, False, None, False),
("parameter", "parameter", parameterdefinition.ParameterDefinition, True, None, False),
("publisher", "publisher", str, False, None, False),
("purpose", "purpose", str, False, None, False),
("relatedArtifact", "relatedArtifact", relatedartifact.RelatedArtifact, True, None, False),
("reviewer", "reviewer", contactdetail.ContactDetail, True, None, False),
("status", "status", str, False, None, True),
("subjectCodeableConcept", "subjectCodeableConcept", codeableconcept.CodeableConcept, False, "subject", False),
("subjectReference", "subjectReference", fhirreference.FHIRReference, False, "subject", False),
("subtitle", "subtitle", str, False, None, False),
("title", "title", str, False, None, False),
("topic", "topic", codeableconcept.CodeableConcept, True, None, False),
("type", "type", codeableconcept.CodeableConcept, False, None, True),
("url", "url", str, False, None, False),
("usage", "usage", str, False, None, False),
("useContext", "useContext", usagecontext.UsageContext, True, None, False),
("version", "version", str, False, None, False),
])
return js
import sys
try:
from . import attachment
except ImportError:
attachment = sys.modules[__package__ + '.attachment']
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + '.codeableconcept']
try:
from . import contactdetail
except ImportError:
contactdetail = sys.modules[__package__ + '.contactdetail']
try:
from . import datarequirement
except ImportError:
datarequirement = sys.modules[__package__ + '.datarequirement']
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + '.fhirdate']
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + '.fhirreference']
try:
from . import identifier
except ImportError:
identifier = sys.modules[__package__ + '.identifier']
try:
from . import parameterdefinition
except ImportError:
parameterdefinition = sys.modules[__package__ + '.parameterdefinition']
try:
from . import period
except ImportError:
period = sys.modules[__package__ + '.period']
try:
from . import relatedartifact
except ImportError:
relatedartifact = sys.modules[__package__ + '.relatedartifact']
try:
from . import usagecontext
except ImportError:
usagecontext = sys.modules[__package__ + '.usagecontext'] | PypiClean |
/azure-mgmt-resource-23.1.0b1.zip/azure-mgmt-resource-23.1.0b1/azure/mgmt/resource/subscriptions/v2019_06_01/aio/_configuration.py |
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy, AsyncARMChallengeAuthenticationPolicy
from .._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class SubscriptionClientConfiguration(Configuration): # pylint: disable=too-many-instance-attributes
"""Configuration for SubscriptionClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:keyword api_version: Api Version. Default value is "2019-06-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(self, credential: "AsyncTokenCredential", **kwargs: Any) -> None:
super(SubscriptionClientConfiguration, self).__init__(**kwargs)
api_version: str = kwargs.pop("api_version", "2019-06-01")
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
self.credential = credential
self.api_version = api_version
self.credential_scopes = kwargs.pop("credential_scopes", ["https://management.azure.com/.default"])
kwargs.setdefault("sdk_moniker", "mgmt-resource/{}".format(VERSION))
self._configure(**kwargs)
def _configure(self, **kwargs: Any) -> None:
self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get("http_logging_policy") or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get("authentication_policy")
if self.credential and not self.authentication_policy:
self.authentication_policy = AsyncARMChallengeAuthenticationPolicy(
self.credential, *self.credential_scopes, **kwargs
) | PypiClean |
/python_search-0.26.0.tar.gz/python_search-0.26.0/python_search/container.py | import os
from typing import Optional
from python_search.config import MLFlowConfig
def build():
result = os.system("docker build . -t ps:latest ")
if result == 0:
print("Build successful")
else:
raise Exception("Build failed")
def build_and_run():
build()
run()
def run(
cmd="",
entrypoint="",
port="",
restart=False,
extra_env_vars=None,
name: Optional[str] = None,
):
"""
Runs inside the docker container
:param cmd:
:param entrypoint:
:param port:
:param restart:
:param extra_env_vars:
:param name:
:return:
"""
if entrypoint:
entrypoint = f" --entrypoint '{entrypoint}'"
restart_exp = ""
if restart:
print("Adding restart flag")
restart_exp = " --restart always "
if port:
port = f" -p {port}"
volumes = " ".join(
[
" -v $HOME/projects/PythonSearch:/src ",
" -v $HOME/.ssh:/root/.ssh ",
" -v $HOME/projects/PySearchEntries/:/entries ",
" -v $HOME/.PythonSearch:/root/.PythonSearch ",
" -v $HOME/.ddataflow:/root/.ddataflow",
" -v $HOME/.PythonSearch/container_cache/:/root/.cache ",
" -v $HOME/.data:/root/.data" " -v $HOME/.gitconfig:/root/.gitconfig",
]
)
env_vars = [
" -e 'PS_ENTRIES_HOME=/entries' ",
" -e ARIZE_API_KEY=$ARIZE_API_KEY ",
" -e ARIZE_SPACE_KEY=$ARIZE_SPACE_KEY ",
" -e PS_WEB_PASSWORD=$PS_WEB_PASSWORD",
]
env_vars = env_vars + extra_env_vars if extra_env_vars else env_vars
environment_variables = " ".join(env_vars)
name_expr = ""
if name:
name_expr = f" --name {name} "
LIMIT_CPU = os.cpu_count()
print(f"Found {LIMIT_CPU} CPUs in the machine")
LIMIT_CPU = os.environ["LIMIT_CPU"] if "LIMIT_CPU" in os.environ else LIMIT_CPU
# more than 5 cpus is hardly useful
if LIMIT_CPU > 5:
LIMIT_CPU = 5
cmd = f"docker run {name_expr} {port} {restart_exp} --expose=8000 --expose 4040 --expose 6380 --cpus={LIMIT_CPU} {environment_variables} -it {volumes} {entrypoint} ps {cmd}"
print("Cmd: " + cmd)
os.system(cmd)
def run_webserver():
name = "python_search_webserver"
_stop_and_remove_by_name(name)
run(
cmd="python_search_webapi",
port="8000:8000",
name=name,
)
def sh():
shell()
def shell():
run(entrypoint="/bin/bash")
def run_jupyter(with_token=False, restart=False):
token_expression = " --NotebookApp.token=''"
if with_token:
token_expression = ""
run(
cmd=f"jupyter lab --allow-root --ip '*' --notebook-dir / {token_expression} --NotebookApp.password=''",
port="8888:8888",
restart=restart,
)
def run_mlflow(restart=False):
run(
cmd=f"mlflow ui --backend-store-uri file:/entries/mlflow --port {MLFlowConfig.port} --host '0.0.0.0' ",
port=f"{MLFlowConfig.port}:{MLFlowConfig.port}",
restart=restart,
)
def _restart_by_port(port):
print("Stopping previously running container")
os.system(f"docker stop $(docker ps | grep -i {port} | cut -d ' ' -f1) ; sleep 3")
def _stop_and_remove_by_name(name):
print("Stopping previously running container")
os.system(f"docker stop {name} ; docker rm {name}")
def run_streamlit(
*, custom_entry_point: Optional[str] = None, restart=False, disable_password=False
):
if restart:
_restart_by_port(8501)
entry_point = "python_search/data_ui/main.py"
if custom_entry_point:
entry_point = custom_entry_point
run(
cmd=f"streamlit run {entry_point} --server.address=0.0.0.0 --server.port=8501",
port="8501:8501",
restart=restart,
extra_env_vars=[" -e 'PS_DISABLE_PASSWORD=1' "] if disable_password else None,
)
def start():
import fire
fire.Fire()
if __name__ == "__main__":
start | PypiClean |
/fusionsc-2.0.0a2.tar.gz/fusionsc-2.0.0a2/vendor/capnproto/security-advisories/2015-03-02-0-c++-integer-overflow.md | Problem
=======
Integer overflow in pointer validation.
Discovered by
=============
Ben Laurie <[email protected]> using [American Fuzzy Lop](http://lcamtuf.coredump.cx/afl/)
Announced
=========
2015-03-02
CVE
===
CVE-2015-2310
Impact
======
- Remotely segfault a peer by sending it a malicious message.
- Possible exfiltration of memory, depending on application behavior.
Fixed in
========
- git commit [f343f0dbd0a2e87f17cd74f14186ed73e3fbdbfa][0]
- release 0.5.1.1:
- Unix: https://capnproto.org/capnproto-c++-0.5.1.1.tar.gz
- Windows: https://capnproto.org/capnproto-c++-win32-0.5.1.1.zip
- release 0.4.1.1:
- Unix: https://capnproto.org/capnproto-c++-0.4.1.1.tar.gz
- release 0.6 (future)
[0]: https://github.com/capnproto/capnproto/commit/f343f0dbd0a2e87f17cd74f14186ed73e3fbdbfa
Details
=======
*The following text contains speculation about the exploitability of this
bug. This is provided for informational purposes, but as such speculation is
often shown to be wrong, you should not rely on the accuracy of this
section for the safety of your service. Please update your library.*
A specially-crafted pointer could escape bounds checking by triggering an
integer overflow in the check. This causes the message to appear as if it
contains an extremely long list (over 2^32 bytes), stretching far beyond the
memory actually allocated to the message. If the application reads that list,
it will likely segfault, but if it manages to avoid a segfault (e.g. because
it has mapped a very large contiguous block of memory following the message,
or because it only reads some parts of the list and not others), it could end
up treating arbitrary parts of memory as input. If the application happens to
pass that data back to the user in some way, this problem could lead to
exfiltration of secrets.
The pointer is transitively read-only, therefore it is believed that this
vulnerability on its own CANNOT lead to memory corruption nor code execution.
This vulnerability is NOT a Sandstorm sandbox breakout. A Sandstorm app's
Cap'n Proto communications pass through a supervisor process which performs a
deep copy of the structure. As the supervisor has a very small heap, this
will always lead to a segfault, which has the effect of killing the app, but
does not affect any other app or the system at large. If somehow the copy
succeeds, the copied message will no longer contain an invalid pointer and
so will not harm its eventual destination, and the supervisor itself has no
secrets to steal. These mitigations are by design.
Preventative measures
=====================
In order to gain confidence that this is a one-off bug rather than endemic,
and to help prevent new bugs from being added, we have taken / will take the
following preventative measures going forward:
1. A fuzz test of each pointer type has been added to the standard unit test
suite. This test was confirmed to find the vulnerability in question.
2. We will additionally add fuzz testing with American Fuzzy Lop to our
extended test suite. AFL was used to find the original vulnerability. Our
current tests with AFL show only one other (less-critical) vulnerability
which will be reported separately ([2015-03-02-2][2]).
3. In parallel, we will extend our use of template metaprogramming for
compile-time unit analysis (kj::Quantity in kj/units.h) to also cover
overflow detection (by tracking the maximum size of an integer value across
arithmetic expressions and raising an error when it overflows). Preliminary
work with this approach successfully detected the vulnerability reported
here as well as one other vulnerability ([2015-03-02-1][3]).
[See the blog post][4] for more details.
4. We will continue to require that all tests (including the new fuzz test) run
cleanly under Valgrind before each release.
5. We will commission a professional security review before any 1.0 release.
Until that time, we continue to recommend against using Cap'n Proto to
interpret data from potentially-malicious sources.
I am pleased that measures 1, 2, and 3 all detected this bug, suggesting that
they have a high probability of catching any similar bugs.
[1]: https://github.com/capnproto/capnproto/tree/master/security-advisories/2015-03-02-0-all-cpu-amplification.md
[2]: https://github.com/capnproto/capnproto/tree/master/security-advisories/2015-03-02-1-c++-integer-underflow.md
[3]: https://capnproto.org/news/2015-03-02-security-advisory-and-integer-overflow-protection.html
| PypiClean |
/dmp_dash_components-1.3.2-py3-none-any.whl/dmp_dash_components/AntdEmpty.py |
from dash.development.base_component import Component, _explicitize_args
class AntdEmpty(Component):
"""An AntdEmpty component.
Keyword arguments:
- children (a list of or a singular dash component, string or number; optional)
- id (string; optional)
- className (string; optional)
- description (a list of or a singular dash component, string or number; optional)
- image (string | a value equal to: 'default', 'simple'; optional)
- imageStyle (dict; optional)
- key (string; optional)
- loading_state (dict; optional)
`loading_state` is a dict with keys:
- component_name (string; optional):
Holds the name of the component that is loading.
- is_loading (boolean; optional):
Determines if the component is loading or not.
- prop_name (string; optional):
Holds which property is loading.
- locale (a value equal to: 'zh-cn', 'en-us'; default 'zh-cn')
- style (dict; optional)"""
_children_props = ['description']
_base_nodes = ['description', 'children']
_namespace = 'dmp_dash_components'
_type = 'AntdEmpty'
@_explicitize_args
def __init__(self, children=None, id=Component.UNDEFINED, className=Component.UNDEFINED, style=Component.UNDEFINED, key=Component.UNDEFINED, locale=Component.UNDEFINED, description=Component.UNDEFINED, image=Component.UNDEFINED, imageStyle=Component.UNDEFINED, loading_state=Component.UNDEFINED, **kwargs):
self._prop_names = ['children', 'id', 'className', 'description', 'image', 'imageStyle', 'key', 'loading_state', 'locale', 'style']
self._valid_wildcard_attributes = []
self.available_properties = ['children', 'id', 'className', 'description', 'image', 'imageStyle', 'key', 'loading_state', 'locale', 'style']
self.available_wildcard_properties = []
_explicit_args = kwargs.pop('_explicit_args')
_locals = locals()
_locals.update(kwargs) # For wildcard attrs and excess named props
args = {k: _locals[k] for k in _explicit_args if k != 'children'}
super(AntdEmpty, self).__init__(children=children, **args) | PypiClean |
/kubeflow_kale-0.7.0-py3-none-any.whl/kale/pipeline.py |
import os
import copy
import logging
import networkx as nx
from typing import Iterable, Dict
from kubernetes.config import ConfigException
from kubernetes.client.rest import ApiException
from kale import Step, PipelineParam
from kale.config import Config, Field, validators
from kale.common import graphutils, utils, podutils
log = logging.getLogger(__name__)
VOLUME_ACCESS_MODE_MAP = {"rom": ["ReadOnlyMany"], "rwo": ["ReadWriteOnce"],
"rwm": ["ReadWriteMany"]}
DEFAULT_VOLUME_ACCESS_MODE = VOLUME_ACCESS_MODE_MAP["rwm"]
class VolumeConfig(Config):
"""Used for validating the `volumes` field of NotebookConfig."""
name = Field(type=str, required=True,
validators=[validators.K8sNameValidator])
mount_point = Field(type=str, required=True)
snapshot = Field(type=bool, default=False)
snapshot_name = Field(type=str)
size = Field(type=int) # fixme: validation for this field?
size_type = Field(type=str) # fixme: validation for this field?
type = Field(type=str, required=True,
validators=[validators.VolumeTypeValidator])
annotations = Field(type=list, default=list())
storage_class_name = Field(type=str,
validators=[validators.K8sNameValidator])
volume_access_mode = Field(
type=str, validators=[validators.IsLowerValidator,
validators.VolumeAccessModeValidator])
def _parse_annotations(self):
# Convert annotations to a {k: v} dictionary
try:
# TODO: Make JupyterLab annotate with {k: v} instead of
# {'key': k, 'value': v}
self.annotations = {a['key']: a['value']
for a in self.annotations
if a['key'] != '' and a['value'] != ''}
except KeyError as e:
if str(e) in ["'key'", "'value'"]:
raise ValueError("Volume spec: volume annotations must be a"
" list of {'key': k, 'value': v} dicts")
else:
raise e
def _parse_access_mode(self):
if self.volume_access_mode:
self.volume_access_mode = (
VOLUME_ACCESS_MODE_MAP[self.volume_access_mode])
def _postprocess(self):
self._parse_annotations()
self._parse_access_mode()
class KatibConfig(Config):
"""Used to validate the `katib_metadata` field of NotebookConfig."""
# fixme: improve validation of single fields
parameters = Field(type=list, default=[])
objective = Field(type=dict, default={})
algorithm = Field(type=dict, default={})
# fixme: Change these names to be Pythonic (need to change how the
# labextension passes them)
maxTrialCount = Field(type=int, default=12)
maxFailedTrialCount = Field(type=int, default=3)
parallelTrialCount = Field(type=int, default=3)
class PipelineConfig(Config):
"""Main config class to validate the pipeline metadata."""
pipeline_name = Field(type=str, required=True,
validators=[validators.PipelineNameValidator])
experiment_name = Field(type=str, required=True)
pipeline_description = Field(type=str, default="")
docker_image = Field(type=str, default="")
volumes = Field(type=list, items_config_type=VolumeConfig, default=[])
katib_run = Field(type=bool, default=False)
katib_metadata = Field(type=KatibConfig)
abs_working_dir = Field(type=str, default="")
marshal_volume = Field(type=bool, default=True)
marshal_path = Field(type=str, default="/marshal")
autosnapshot = Field(type=bool, default=True)
steps_defaults = Field(type=dict, default=dict())
kfp_host = Field(type=str)
storage_class_name = Field(type=str,
validators=[validators.K8sNameValidator])
volume_access_mode = Field(
type=str, validators=[validators.IsLowerValidator,
validators.VolumeAccessModeValidator])
timeout = Field(type=int, validators=[validators.PositiveIntegerValidator])
@property
def source_path(self):
"""Get the path to the main entry point script."""
return utils.get_main_source_path()
def _postprocess(self):
# self._randomize_pipeline_name()
self._set_docker_image()
self._set_volume_storage_class()
self._set_volume_access_mode()
self._sort_volumes()
self._set_abs_working_dir()
self._set_marshal_path()
def _randomize_pipeline_name(self):
self.pipeline_name = "%s-%s" % (self.pipeline_name,
utils.random_string())
def _set_docker_image(self):
if not self.docker_image:
try:
self.docker_image = podutils.get_docker_base_image()
except (ConfigException, RuntimeError, FileNotFoundError,
ApiException):
# * ConfigException: no K8s config found
# * RuntimeError, FileNotFoundError: this is not running in a
# pod
# * ApiException: K8s call to read pod raised exception;
# Use kfp default image
self.docker_image = ""
def _set_volume_storage_class(self):
if not self.storage_class_name:
return
for v in self.volumes:
if not v.storage_class_name:
v.storage_class_name = self.storage_class_name
def _set_volume_access_mode(self):
if not self.volume_access_mode:
self.volume_access_mode = DEFAULT_VOLUME_ACCESS_MODE
else:
self.volume_access_mode = VOLUME_ACCESS_MODE_MAP[
self.volume_access_mode]
for v in self.volumes:
if not v.volume_access_mode:
v.volume_access_mode = self.volume_access_mode
def _sort_volumes(self):
# The Jupyter Web App assumes the first volume of the notebook is the
# working directory, so we make sure to make it appear first in the
# spec.
self.volumes = sorted(self.volumes,
reverse=True,
key=lambda _v: podutils.is_workspace_dir(
_v.mount_point))
def _set_abs_working_dir(self):
if not self.abs_working_dir:
self.abs_working_dir = utils.abs_working_dir(self.source_path)
def _set_marshal_path(self):
# Check if the workspace directory is under a mounted volume.
# If so, marshal data into a folder in that volume,
# otherwise create a new volume and mount it at /marshal
wd = os.path.realpath(self.abs_working_dir)
# get the volumes for which the working directory is a sub-path of
# the mount point
vols = list(
filter(lambda x: wd.startswith(x.mount_point), self.volumes))
# if we found any, then set marshal directory inside working directory
if len(vols) > 0:
basename = os.path.basename(self.source_path)
marshal_dir = ".{}.kale.marshal.dir".format(basename)
self.marshal_volume = False
self.marshal_path = os.path.join(wd, marshal_dir)
class Pipeline(nx.DiGraph):
"""A Pipeline that can be converted into a KFP pipeline.
This class is used to define a pipeline, its steps and all its
configurations. It extends nx.DiGraph to exploit some graph-related
algorithms but provides helper functions to work with Step objects
instead of standard networkx "nodes". This makes it simpler to access
the steps of the pipeline and their attributes.
"""
def __init__(self, config: PipelineConfig, *args, **kwargs):
super().__init__(*args, **kwargs)
self.config = config
self.pipeline_parameters: Dict[str, PipelineParam] = dict()
self.processor = None
self._pps_names = None
def run(self):
"""Runs the steps locally in topological sort."""
for step in self.steps:
step.run(self.pipeline_parameters)
def add_step(self, step: Step):
"""Add a new Step to the pipeline."""
if not isinstance(step, Step):
raise RuntimeError("Not of type Step.")
if step.name in self.steps_names:
raise RuntimeError("Step with name '%s' already exists"
% step.name)
self.add_node(step.name, step=step)
def add_dependency(self, parent: Step, child: Step):
"""Link two Steps in the pipeline."""
self.add_edge(parent.name, child.name)
def get_step(self, name: str) -> Step:
"""Get the Step with the provided name."""
return self.nodes()[name]["step"]
@property
def steps(self) -> Iterable[Step]:
"""Get the Steps objects sorted topologically."""
return map(lambda x: self.nodes()[x]["step"], self.steps_names)
@property
def steps_names(self):
"""Get all Steps' names, sorted topologically."""
return [step.name for step in self._topological_sort()]
@property
def all_steps_parameters(self):
"""Create a dict with step names and their parameters."""
return {step: sorted(self.get_step(step).parameters.keys())
for step in self.steps_names}
@property
def pipeline_dependencies_tasks(self):
"""Generate a dictionary of Pipeline dependencies."""
return {step_name: list(self.predecessors(step_name))
for step_name in self.steps_names}
@property
def pps_names(self):
"""Get the names of the pipeline parameters sorted."""
if self._pps_names is None:
self._pps_names = sorted(self.pipeline_parameters.keys())
return self._pps_names
@property
def pps_types(self):
"""Get the types of the pipeline parameters, sorted by name."""
return [self.pipeline_parameters[n].param_type for n in self.pps_names]
@property
def pps_values(self):
"""Get the values of the pipeline parameters, sorted by name."""
return [self.pipeline_parameters[n].param_value
for n in self.pps_names]
def _topological_sort(self) -> Iterable[Step]:
return self._steps_iterable(nx.topological_sort(self))
def get_ordered_ancestors(self, step_name: str) -> Iterable[Step]:
"""Return the ancestors of a step in an ordered manner.
Wrapper of graphutils.get_ordered_ancestors.
Returns:
Iterable[Step]: A Steps iterable.
"""
return self._steps_iterable(
graphutils.get_ordered_ancestors(self, step_name))
def _steps_iterable(self, step_names: Iterable[str]) -> Iterable[Step]:
for name in step_names:
yield self.get_step(name)
def get_leaf_steps(self):
"""Get the list of leaf steps of the pipeline.
A step is considered a leaf when its in-degree is > 0 and its
out-degree is 0.
Returns (list): A list of leaf Steps.
"""
return [x for x in self.steps if self.out_degree(x.name) == 0]
def override_pipeline_parameters_from_kwargs(self, **kwargs):
"""Overwrite the current pipeline parameters with provided inputs."""
_pipeline_parameters = copy.deepcopy(self.pipeline_parameters)
for k, v in kwargs.items():
if k not in self.pipeline_parameters:
raise RuntimeError("Running pipeline '%s' with"
" an input argument that is not in its"
" parameters: %s"
% (self.config.pipeline_name, k))
# replace default value with the provided one
_type = _pipeline_parameters[k].param_type
_pipeline_parameters[k] = PipelineParam(_type, v)
self.pipeline_parameters = _pipeline_parameters
def show(self):
"""Print the pipeline nodes and dependencies in a table."""
from tabulate import tabulate
data = []
for step in self.steps:
data.append([step.name, [x for x in self.predecessors(step.name)]])
log.info("Pipeline layout:")
log.info("\n" + tabulate(data, headers=["Step", "Depends On"]) + "\n") | PypiClean |
/applause-tool-0.5.0.tar.gz/applause-tool-0.5.0/applause/auth.py | from applause import __program_name__, __version__
import logging
try:
from urlparse import urljoin
except ImportError:
from urllib.parse import urljoin
import requests
from applause.errors import InvalidLogin
from . import settings
class ApplauseAuth(object):
"""
Handles Applause's 3 legged OAuth API.
"""
def __init__(self, client_id, client_secret, oauth_base_url=None):
self.client_id = client_id
self.client_secret = client_secret
self.session = requests.Session()
self.session.auth = (client_id, client_secret)
self.access_token = None
self.oauth_base_url = oauth_base_url or settings.OAUTH_BASE_URL
@staticmethod
def get_oauth_token_url(oauth_base_url):
return urljoin(oauth_base_url, "/auth/token")
@staticmethod
def generate_requests_session(access_token):
"""
Generates a new requests `Session` object objects with all the
necessary auth headers and version header for debug purposes set.
"""
session = requests.Session()
session.headers = {
"Authorization": "Bearer " + access_token,
"Accept": "application/json",
"User-Agent": "%s v.%s" % (__program_name__, __version__)
}
return session
def _get_access_token(self, username, password):
"""
Gets an access token from the auth-service using the
Resource Owner Client Credentials Grant Flow (OAuth2).
"""
headers = {
'Content-Type': 'application/x-www-form-urlencoded'
}
data = {
'grant_type': 'password',
'username': username,
'password': password,
'rememberMe': 'on'
}
url = self.get_oauth_token_url(self.oauth_base_url)
response = self.session.post(url, data=data, headers=headers)
if not response.ok:
logging.debug("Response: {data}".format(data=response.content))
raise InvalidLogin("Could not get a new access token. Please check your credentials.")
data = response.json().get('data', None)
return data.get('access_token', None) if data else None
def login(self, username=None, password=None, access_token=None):
"""
Initiates user session with one of the following arguments:
* username, password
"""
if username and password:
logging.debug("Logging in with username & password")
self.access_token = self._get_access_token(username, password)
else:
logging.debug("Logging in with access token")
self.access_token = access_token
if not self.access_token:
raise InvalidLogin("Could not use an existing or fetch a new access token. Please try again.")
return self.generate_requests_session(self.access_token)
def logged_in(self):
"""
Returns true if a user auth session has been initiated.
"""
return self.access_token is not None | PypiClean |
/NlpToolkit-MorphologicalDisambiguation-1.0.16.tar.gz/NlpToolkit-MorphologicalDisambiguation-1.0.16/MorphologicalDisambiguation/HmmDisambiguation.py | import math
from Dictionary.Word import Word
from MorphologicalAnalysis.FsmParse import FsmParse
from NGram.LaplaceSmoothing import LaplaceSmoothing
from NGram.NGram import NGram
from DisambiguationCorpus.DisambiguationCorpus import DisambiguationCorpus
from MorphologicalDisambiguation.NaiveDisambiguation import NaiveDisambiguation
class HmmDisambiguation(NaiveDisambiguation):
word_bi_gram_model: NGram
ig_bi_gram_model: NGram
def train(self, corpus: DisambiguationCorpus):
"""
The train method gets sentences from given DisambiguationCorpus and both word and the next word of that sentence
at each iteration. Then, adds these words together with their part of speech tags to word unigram and bigram
models. It also adds the last inflectional group of word to the ig unigram and bigram models.
At the end, it calculates the NGram probabilities of both word and ig unigram models by using LaplaceSmoothing,
and both word and ig bigram models by using InterpolatedSmoothing.
PARAMETERS
----------
corpus : DisambiguationCorpus
DisambiguationCorpus to train.
"""
words1 = [None]
igs1 = [None]
words2 = [None, None]
igs2 = [None, None]
self.word_uni_gram_model = NGram(1)
self.ig_uni_gram_model = NGram(1)
self.word_bi_gram_model = NGram(2)
self.ig_bi_gram_model = NGram(2)
for sentence in corpus.sentences:
for j in range(sentence.wordCount() - 1):
word = sentence.getWord(j)
next_word = sentence.getWord(j + 1)
words2[0] = word.getParse().getWordWithPos()
words1[0] = words2[0]
words2[1] = next_word.getParse().getWordWithPos()
self.word_uni_gram_model.addNGram(words1)
self.word_bi_gram_model.addNGram(words2)
for k in range(next_word.getParse().size()):
igs2[0] = Word(word.getParse().getLastInflectionalGroup().__str__())
igs2[1] = Word(next_word.getParse().getInflectionalGroup(k).__str__())
self.ig_bi_gram_model.addNGram(igs2)
igs1[0] = igs2[1]
self.ig_uni_gram_model.addNGram(igs1)
self.word_uni_gram_model.calculateNGramProbabilitiesSimple(LaplaceSmoothing())
self.ig_uni_gram_model.calculateNGramProbabilitiesSimple(LaplaceSmoothing())
self.word_bi_gram_model.calculateNGramProbabilitiesSimple(LaplaceSmoothing())
self.ig_bi_gram_model.calculateNGramProbabilitiesSimple(LaplaceSmoothing())
def disambiguate(self, fsmParses: list) -> list:
"""
The disambiguate method takes FsmParseList as an input and gets one word with its part of speech tags, then gets
its probability from word unigram model. It also gets ig and its probability. Then, hold the logarithmic value
of the product of these probabilities in an array. Also by taking into consideration the parses of these word it
recalculates the probabilities and returns these parses.
PARAMETERS
----------
fsmParses : list
FsmParseList to disambiguate.
RETURNS
-------
list
List of FsmParses.
"""
if len(fsmParses) == 0:
return None
for i in range(len(fsmParses)):
if fsmParses[i].size() == 0:
return None
correct_fsm_parses = []
probabilities = [[0.0 for _ in range(fsmParses[i].size())] for i in range(len(fsmParses))]
best = [[0 for _ in range(fsmParses[i].size())] for i in range(len(fsmParses))]
for i in range(fsmParses[0].size()):
current_parse = fsmParses[0].getFsmParse(i)
if isinstance(current_parse, FsmParse):
w1 = current_parse.getWordWithPos()
probability = self.word_uni_gram_model.getProbability(w1)
for j in range(current_parse.size()):
ig1 = Word(current_parse.getInflectionalGroup(j).__str__())
probability *= self.ig_uni_gram_model.getProbability(ig1)
probabilities[0][i] = math.log(probability)
for i in range(1, len(fsmParses)):
for j in range(fsmParses[i].size()):
best_probability = -10000
best_index = -1
current_parse = fsmParses[i].getFsmParse(j)
if isinstance(current_parse, FsmParse):
for k in range(fsmParses[i - 1].size()):
previous_parse = fsmParses[i - 1].getFsmParse(k)
w1 = previous_parse.getWordWithPos()
w2 = current_parse.getWordWithPos()
probability = probabilities[i - 1][k] + math.log(self.word_bi_gram_model.getProbability(w1, w2))
for t in range(fsmParses[i].getFsmParse(j).size()):
ig1 = Word(previous_parse.lastInflectionalGroup().__str__())
ig2 = Word(current_parse.getInflectionalGroup(t).__str__())
probability += math.log(self.ig_bi_gram_model.getProbability(ig1, ig2))
if probability > best_probability:
best_index = k
best_probability = probability
probabilities[i][j] = best_probability
best[i][j] = best_index
best_probability = -10000
best_index = -1
for i in range(fsmParses[len(fsmParses) - 1].size()):
if probabilities[len(fsmParses) - 1][i] > best_probability:
best_probability = probabilities[len(fsmParses) - 1][i]
best_index = i
if best_index == -1:
return None
correct_fsm_parses.append(fsmParses[len(fsmParses) - 1].getFsmParse(best_index))
for i in range(len(fsmParses) - 2, -1, -1):
best_index = best[i + 1][best_index]
if best_index == -1:
return None
correct_fsm_parses.insert(0, fsmParses[i].getFsmParse(best_index))
return correct_fsm_parses
def saveModel(self):
"""
Method to save unigrams and bigrams.
"""
super().saveModel()
self.word_bi_gram_model.saveAsText("words2.txt")
self.ig_bi_gram_model.saveAsText("igs2.txt")
def loadModel(self):
"""
Method to load unigrams and bigrams.
"""
super().loadModel()
self.word_bi_gram_model = NGram("words2.txt")
self.ig_bi_gram_model = NGram("igs2.txt") | PypiClean |
/cirq_google-1.2.0-py3-none-any.whl/cirq_google/workflow/qubit_placement.py | import abc
import dataclasses
from functools import lru_cache
from typing import Dict, Any, Tuple, List, Callable, TYPE_CHECKING, Hashable
import numpy as np
import cirq
from cirq import _compat
from cirq.devices.named_topologies import get_placements, NamedTopology
from cirq.protocols import obj_to_dict_helper
from cirq_google.workflow._device_shim import _Device_dot_get_nx_graph
if TYPE_CHECKING:
import cirq_google as cg
class CouldNotPlaceError(RuntimeError):
"""Raised if a problem topology could not be placed on a device graph."""
class QubitPlacer(metaclass=abc.ABCMeta):
@abc.abstractmethod
def place_circuit(
self,
circuit: cirq.AbstractCircuit,
problem_topology: 'cirq.NamedTopology',
shared_rt_info: 'cg.SharedRuntimeInfo',
rs: np.random.RandomState,
) -> Tuple['cirq.FrozenCircuit', Dict[Any, 'cirq.Qid']]:
"""Place a circuit with a given topology.
Args:
circuit: The circuit.
problem_topology: The topologies (i.e. connectivity) of the circuit.
shared_rt_info: A `cg.SharedRuntimeInfo` object that may contain additional info
to inform placement.
rs: A `RandomState` to enable pseudo-random placement strategies.
Returns:
A tuple of a new frozen circuit with the qubits placed and a mapping from input
qubits or nodes to output qubits.
"""
@dataclasses.dataclass(frozen=True)
class NaiveQubitPlacer(QubitPlacer):
"""Don't do any qubit placement, use circuit qubits."""
def place_circuit(
self,
circuit: 'cirq.AbstractCircuit',
problem_topology: 'cirq.NamedTopology',
shared_rt_info: 'cg.SharedRuntimeInfo',
rs: np.random.RandomState,
) -> Tuple['cirq.FrozenCircuit', Dict[Any, 'cirq.Qid']]:
return circuit.freeze(), {q: q for q in circuit.all_qubits()}
@classmethod
def _json_namespace_(cls) -> str:
return 'cirq.google'
def _json_dict_(self) -> Dict[str, Any]:
return cirq.dataclass_json_dict(self)
def __repr__(self) -> str:
return _compat.dataclass_repr(self, namespace='cirq_google')
def default_topo_node_to_qubit(node: Any) -> cirq.Qid:
"""The default mapping from `cirq.NamedTopology` nodes and `cirq.Qid`.
There is a correspondence between nodes and the "abstract" Qids
used to construct un-placed circuit. `cirq.get_placements` returns a dictionary
mapping from node to Qid. We use this function to transform it into a mapping
from "abstract" Qid to device Qid. This function encodes the default behavior used by
`RandomDevicePlacer`.
If nodes are tuples of integers, map to `cirq.GridQubit`. Otherwise, try
to map to `cirq.LineQubit` and rely on its validation.
Args:
node: A node from a `cirq.NamedTopology` graph.
Returns:
A `cirq.Qid` appropriate for the node type.
"""
try:
return cirq.GridQubit(*node)
except TypeError:
return cirq.LineQubit(node)
class HardcodedQubitPlacer(QubitPlacer):
def __init__(
self,
mapping: Dict[cirq.NamedTopology, Dict[Any, cirq.Qid]],
topo_node_to_qubit_func: Callable[[Hashable], cirq.Qid] = default_topo_node_to_qubit,
):
"""A placement strategy that uses the explicitly provided `mapping`.
Args:
mapping: The hardcoded placements. This provides a placement for each supported
`cirq.NamedTopology`. The topology serves as the key for the mapping dictionary.
Each placement is a dictionary mapping topology node to final `cirq.Qid` device
qubit.
topo_node_to_qubit_func: A function that maps from `cirq.NamedTopology` nodes
to `cirq.Qid`. There is a correspondence between nodes and the "abstract" Qids
used to construct the un-placed circuit. We use this function to interpret
the provided mappings. By default: nodes which are tuples correspond
to `cirq.GridQubit`s; otherwise `cirq.LineQubit`.
Note:
The attribute `topo_node_to_qubit_func` is not preserved in JSON serialization. This
bit of plumbing does not affect the placement behavior.
"""
self._mapping = mapping
self.topo_node_to_qubit_func = topo_node_to_qubit_func
def place_circuit(
self,
circuit: cirq.AbstractCircuit,
problem_topology: NamedTopology,
shared_rt_info: 'cg.SharedRuntimeInfo',
rs: np.random.RandomState,
) -> Tuple[cirq.FrozenCircuit, Dict[Any, cirq.Qid]]:
"""Place a circuit according to the hardcoded placements.
Args:
circuit: The circuit.
problem_topology: The topologies (i.e. connectivity) of the circuit, use to look
up the placement in `self.mapping`.
shared_rt_info: A `cg.SharedRuntimeInfo` object; ignored for hardcoded placement.
rs: A `RandomState`; ignored for hardcoded placement.
Returns:
A tuple of a new frozen circuit with the qubits placed and a mapping from input
qubits or nodes to output qubits.
Raises:
CouldNotPlaceError: if the given problem_topology is not present in the hardcoded
mapping.
"""
try:
nt_mapping = self._mapping[problem_topology]
except KeyError as e:
raise CouldNotPlaceError(str(e))
circuit_mapping = {
self.topo_node_to_qubit_func(nt_node): gridq for nt_node, gridq in nt_mapping.items()
}
circuit = circuit.unfreeze().transform_qubits(circuit_mapping).freeze()
return circuit, circuit_mapping
def __repr__(self) -> str:
return f'cirq_google.HardcodedQubitPlacer(mapping={_compat.proper_repr(self._mapping)})'
@classmethod
def _json_namespace_(cls) -> str:
return 'cirq.google'
def _json_dict_(self):
d = obj_to_dict_helper(self, attribute_names=[])
# Nested dict: turn both levels to list(key_value_pair)
mapping = {topo: list(placement.items()) for topo, placement in self._mapping.items()}
mapping = list(mapping.items())
d['mapping'] = mapping
return d
@classmethod
def _from_json_dict_(cls, **kwargs) -> 'HardcodedQubitPlacer':
# From nested list(key_value_pair) to dictionary
mapping: Dict[cirq.NamedTopology, Dict[Any, 'cirq.Qid']] = {}
for topo, placement_kvs in kwargs['mapping']:
placement: Dict[Hashable, 'cirq.Qid'] = {}
for k, v in placement_kvs:
if isinstance(k, list):
k = tuple(k)
placement[k] = v
mapping[topo] = placement
return cls(mapping=mapping)
def __eq__(self, other):
if not isinstance(other, HardcodedQubitPlacer):
# coverage: ignore
return False
return self._mapping == other._mapping
@lru_cache()
def _cached_get_placements(
problem_topo: 'cirq.NamedTopology', device: 'cirq.Device'
) -> List[Dict[Any, 'cirq.Qid']]:
"""Cache `cirq.get_placements` onto the specific device."""
return get_placements(
big_graph=_Device_dot_get_nx_graph(device), small_graph=problem_topo.graph
)
def _get_random_placement(
problem_topology: 'cirq.NamedTopology',
device: 'cirq.Device',
rs: np.random.RandomState,
topo_node_to_qubit_func: Callable[[Any], 'cirq.Qid'] = default_topo_node_to_qubit,
) -> Dict['cirq.Qid', 'cirq.Qid']:
"""Place `problem_topology` randomly onto a device.
This is a helper function used by `RandomDevicePlacer.place_circuit`.
"""
placements = _cached_get_placements(problem_topology, device)
if len(placements) == 0:
raise CouldNotPlaceError
random_i = rs.randint(len(placements))
placement = placements[random_i]
placement_gq = {topo_node_to_qubit_func(k): v for k, v in placement.items()}
return placement_gq
class RandomDevicePlacer(QubitPlacer):
def __init__(
self, topo_node_to_qubit_func: Callable[[Any], cirq.Qid] = default_topo_node_to_qubit
):
"""A placement strategy that randomly places circuits onto devices.
Args:
topo_node_to_qubit_func: A function that maps from `cirq.NamedTopology` nodes
to `cirq.Qid`. There is a correspondence between nodes and the "abstract" Qids
used to construct the un-placed circuit. `cirq.get_placements` returns a dictionary
mapping from node to Qid. We use this function to transform it into a mapping
from "abstract" Qid to device Qid. By default: nodes which are tuples correspond
to `cirq.GridQubit`s; otherwise `cirq.LineQubit`.
Note:
The attribute `topo_node_to_qubit_func` is not preserved in JSON serialization. This
bit of plumbing does not affect the placement behavior.
"""
self.topo_node_to_qubit_func = topo_node_to_qubit_func
def place_circuit(
self,
circuit: 'cirq.AbstractCircuit',
problem_topology: 'cirq.NamedTopology',
shared_rt_info: 'cg.SharedRuntimeInfo',
rs: np.random.RandomState,
) -> Tuple['cirq.FrozenCircuit', Dict[Any, 'cirq.Qid']]:
"""Place a circuit with a given topology onto a device via `cirq.get_placements` with
randomized selection of the placement each time.
This requires device information to be present in `shared_rt_info`.
Args:
circuit: The circuit.
problem_topology: The topologies (i.e. connectivity) of the circuit.
shared_rt_info: A `cg.SharedRuntimeInfo` object that contains a `device` attribute
of type `cirq.Device` to enable placement.
rs: A `RandomState` as a source of randomness for random placements.
Returns:
A tuple of a new frozen circuit with the qubits placed and a mapping from input
qubits or nodes to output qubits.
Raises:
ValueError: If `shared_rt_info` does not have a device field.
"""
device = shared_rt_info.device
if device is None:
raise ValueError(
"RandomDevicePlacer requires shared_rt_info.device to be a `cirq.Device`. "
"This should have been set during the initialization phase of `cg.execute`."
)
placement = _get_random_placement(
problem_topology, device, rs=rs, topo_node_to_qubit_func=self.topo_node_to_qubit_func
)
return circuit.unfreeze().transform_qubits(placement).freeze(), placement
@classmethod
def _json_namespace_(cls) -> str:
return 'cirq.google'
def _json_dict_(self) -> Dict[str, Any]:
return cirq.obj_to_dict_helper(self, [])
def __repr__(self) -> str:
return "cirq_google.RandomDevicePlacer()"
def __eq__(self, other):
if isinstance(other, RandomDevicePlacer):
return True | PypiClean |
/sysnet-persons-1.0.2.tar.gz/sysnet-persons-1.0.2/persons/configuration.py | from __future__ import absolute_import
import copy
import logging
import multiprocessing
import sys
import urllib3
import six
from six.moves import http_client as httplib
class TypeWithDefault(type):
def __init__(cls, name, bases, dct):
super(TypeWithDefault, cls).__init__(name, bases, dct)
cls._default = None
def __call__(cls):
if cls._default is None:
cls._default = type.__call__(cls)
return copy.copy(cls._default)
def set_default(cls, default):
cls._default = copy.copy(default)
class Configuration(six.with_metaclass(TypeWithDefault, object)):
"""NOTE: This class is auto generated by the swagger code generator program.
Ref: https://github.com/swagger-api/swagger-codegen
Do not edit the class manually.
"""
def __init__(self):
"""Constructor"""
# Default Base url
self.host = "https://service.sysnet.cz/persons/1.0.2"
# Temp file folder for downloading files
self.temp_folder_path = None
# Authentication Settings
# dict to store API key(s)
self.api_key = {}
# dict to store API prefix (e.g. Bearer)
self.api_key_prefix = {}
# function to refresh API key if expired
self.refresh_api_key_hook = None
# Username for HTTP basic authentication
self.username = ""
# Password for HTTP basic authentication
self.password = ""
# Logging Settings
self.logger = {}
self.logger["package_logger"] = logging.getLogger("persons")
self.logger["urllib3_logger"] = logging.getLogger("urllib3")
# Log format
self.logger_format = '%(asctime)s %(levelname)s %(message)s'
# Log stream handler
self.logger_stream_handler = None
# Log file handler
self.logger_file_handler = None
# Debug file location
self.logger_file = None
# Debug switch
self.debug = False
# SSL/TLS verification
# Set this to false to skip verifying SSL certificate when calling API
# from https server.
self.verify_ssl = True
# Set this to customize the certificate file to verify the peer.
self.ssl_ca_cert = None
# client certificate file
self.cert_file = None
# client key file
self.key_file = None
# Set this to True/False to enable/disable SSL hostname verification.
self.assert_hostname = None
# urllib3 connection pool's maximum number of connections saved
# per pool. urllib3 uses 1 connection as default value, but this is
# not the best value when you are making a lot of possibly parallel
# requests to the same host, which is often the case here.
# cpu_count * 5 is used as default value to increase performance.
self.connection_pool_maxsize = multiprocessing.cpu_count() * 5
# Proxy URL
self.proxy = None
# Safe chars for path_param
self.safe_chars_for_path_param = ''
@property
def logger_file(self):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
return self.__logger_file
@logger_file.setter
def logger_file(self, value):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
self.__logger_file = value
if self.__logger_file:
# If set logging file,
# then add file handler and remove stream handler.
self.logger_file_handler = logging.FileHandler(self.__logger_file)
self.logger_file_handler.setFormatter(self.logger_formatter)
for _, logger in six.iteritems(self.logger):
logger.addHandler(self.logger_file_handler)
if self.logger_stream_handler:
logger.removeHandler(self.logger_stream_handler)
else:
# If not set logging file,
# then add stream handler and remove file handler.
self.logger_stream_handler = logging.StreamHandler()
self.logger_stream_handler.setFormatter(self.logger_formatter)
for _, logger in six.iteritems(self.logger):
logger.addHandler(self.logger_stream_handler)
if self.logger_file_handler:
logger.removeHandler(self.logger_file_handler)
@property
def debug(self):
"""Debug status
:param value: The debug status, True or False.
:type: bool
"""
return self.__debug
@debug.setter
def debug(self, value):
"""Debug status
:param value: The debug status, True or False.
:type: bool
"""
self.__debug = value
if self.__debug:
# if debug status is True, turn on debug logging
for _, logger in six.iteritems(self.logger):
logger.setLevel(logging.DEBUG)
# turn on httplib debug
httplib.HTTPConnection.debuglevel = 1
else:
# if debug status is False, turn off debug logging,
# setting log level to default `logging.WARNING`
for _, logger in six.iteritems(self.logger):
logger.setLevel(logging.WARNING)
# turn off httplib debug
httplib.HTTPConnection.debuglevel = 0
@property
def logger_format(self):
"""The logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
return self.__logger_format
@logger_format.setter
def logger_format(self, value):
"""The logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
self.__logger_format = value
self.logger_formatter = logging.Formatter(self.__logger_format)
def get_api_key_with_prefix(self, identifier):
"""Gets API key (with prefix if set).
:param identifier: The identifier of apiKey.
:return: The token for api key authentication.
"""
if self.refresh_api_key_hook:
self.refresh_api_key_hook(self)
key = self.api_key.get(identifier)
if key:
prefix = self.api_key_prefix.get(identifier)
if prefix:
return "%s %s" % (prefix, key)
else:
return key
def get_basic_auth_token(self):
"""Gets HTTP basic authentication header (string).
:return: The token for basic HTTP authentication.
"""
return urllib3.util.make_headers(
basic_auth=self.username + ':' + self.password
).get('authorization')
def auth_settings(self):
"""Gets Auth Settings dict for api client.
:return: The Auth Settings information dict.
"""
return {
'apiKey':
{
'type': 'api_key',
'in': 'header',
'key': 'X-API-KEY',
'value': self.get_api_key_with_prefix('X-API-KEY')
},
}
def to_debug_report(self):
"""Gets the essential information for debugging.
:return: The report for debugging.
"""
return "Python SDK Debug Report:\n"\
"OS: {env}\n"\
"Python Version: {pyversion}\n"\
"Version of the API: 1.0.2\n"\
"SDK Package Version: 1.0.2".\
format(env=sys.platform, pyversion=sys.version) | PypiClean |
/dashboard_clients-3.0.3-py3-none-any.whl/dashboard_clients/dashboard_service_client.py | from typing import Any, Union
from clients_core.service_clients import E360ServiceClient
from .models import TabbedDashboardModel
class DashboardsClient(E360ServiceClient):
"""
Subclasses dataclass `clients_core.service_clients.E360ServiceClient`.
Args:
client (clients_core.rest_client.RestClient): an instance of a rest client
user_id (str): the user_id guid
"""
service_endpoint = ""
extra_headers = {
"accept": "application/json",
"Content-Type": "application/json-patch+json",
}
def create(
self, payload: TabbedDashboardModel, **kwargs: Any
) -> TabbedDashboardModel:
"""
Creates a dashboard, returns a deserialised model instance.
Args:
payload: a pydantic model for tabbed dashboard
"""
data: dict = payload.dump()
response = self.client.post(
"", json=data, headers=self.service_headers, raises=True, **kwargs
)
return TabbedDashboardModel.parse_obj(response.json())
def get_dashboard_by_id(
self, dashboard_id: Union[int, str], **kwargs: Any
) -> TabbedDashboardModel:
"""
Gets a dashboard by id
"""
response = self.client.get(
str(dashboard_id), headers=self.service_headers, raises=True, **kwargs
)
return TabbedDashboardModel.parse_obj(response.json())
def update_dashboard(
self, payload: TabbedDashboardModel, **kwargs: Any
) -> TabbedDashboardModel:
"""
Updates a dashboard with given payload which includes the dashboard id
Args:
payload: a pydantic model for tabbed dashboard
"""
data: dict = payload.dump()
response = self.client.put(
str(payload.id),
json=data,
headers=self.service_headers,
raises=True,
**kwargs,
)
return TabbedDashboardModel.parse_obj(response.json())
def delete_by_id(self, dashboard_id: Union[int, str], **kwargs: Any) -> bool:
"""Deletes a dashboard by id"""
response = self.client.delete(
str(dashboard_id),
headers=self.service_headers,
raises=True,
**kwargs,
)
return response.ok | PypiClean |
/torch_tb_profiler-0.4.1-py3-none-any.whl/torch_tb_profiler/io/base.py | import os
from abc import ABC, abstractmethod
from collections import namedtuple
# Data returned from the Stat call.
StatData = namedtuple('StatData', ['length'])
class BaseFileSystem(ABC):
def support_append(self):
return False
def append(self, filename, file_content, binary_mode=False):
pass
def download_file(self, file_to_download, file_to_save):
pass
@abstractmethod
def exists(self, filename):
raise NotImplementedError
@abstractmethod
def read(self, file, binary_mode=False, size=None, continue_from=None):
raise NotImplementedError
@abstractmethod
def write(self, filename, file_content, binary_mode=False):
raise NotImplementedError
@abstractmethod
def glob(self, filename):
raise NotImplementedError
@abstractmethod
def isdir(self, dirname):
raise NotImplementedError
@abstractmethod
def listdir(self, dirname):
raise NotImplementedError
@abstractmethod
def makedirs(self, path):
raise NotImplementedError
@abstractmethod
def stat(self, filename):
raise NotImplementedError
class BasePath(ABC):
@abstractmethod
def join(self, path, *paths):
pass
@abstractmethod
def abspath(self, path):
pass
@abstractmethod
def basename(self, path):
pass
@abstractmethod
def relpath(self, path, start):
pass
class LocalPath(BasePath):
def abspath(self, path):
return os.path.abspath(os.path.expanduser(os.path.expandvars(path)))
def basename(self, path):
return os.path.basename(path)
def relpath(self, path, start):
return os.path.relpath(path, start)
def join(self, path, *paths):
return os.path.join(path, *paths)
class RemotePath(BasePath):
def split(self, path):
"""Split a pathname. Returns tuple '(head, tail)' where 'tail' is
everything after the final slash. Either part may be empty."""
sep = '/'
i = path.rfind(sep) + 1
head, tail = path[:i], path[i:]
head = head.rstrip(sep)
return (head, tail)
def join(self, path, *paths):
"""Join paths with a slash."""
return '/'.join((path,) + paths)
def abspath(self, path):
return path
def basename(self, path):
return path.split('/')[-1]
def relpath(self, path, start):
if not path.startswith(start):
return path
start = start.rstrip('/')
begin = len(start) + 1 # include the ending slash '/'
return path[begin:] | PypiClean |
/smartninja-redis-0.3.tar.gz/smartninja-redis-0.3/README.md | # SmartNinja Redis
A wrapper that simulates Redis on localhost (using TinyDB) and uses a real Redis db in production.
**Important:** This package is meant to be used at SmartNinja courses for learning purposes. It is not advised to use this package for serious projects. Use the default `redis` package instead. You only need to change the import statement.
## Installation
Install via pip:
```bash
pip install smartninja-redis
```
## Dependencies
The package has two dependencies: `tinydb` and `redis`. It installs them automatically.
## Usage
Access Redis via the `from_url()` function:
```python
import smartninja_redis as redis
r_url = redis.from_url(url="localhost")
r_url.set(name="Ninja", value="Smart")
print(r_url.get("Ninja"))
```
or via Redis class directly:
```python
from smartninja_redis import Redis
r_class = Redis(host="localhost")
r_class.set(name="smart", value="ninja")
print(r_class.get("smart"))
```
For now, only `set()` and `get()` methods work on localhost.
> The following set() parameters do not work: ex, px, nx, xx
### TinyDB
TinyDB is used to simulate Redis on localhost (if you don't have Redis installed and `REDIS_URL` env var set). TinyDB does not store any data on disk (in this case). It uses memory storage only.
### Using a real Redis service
If you'd like to use SmartNinja with a real Redis service (instead of TinyDB), make sure you have `REDIS_URL` environment variable set.
## Contributions
Contributions via pull requests are warmly welcome!
## TODO
- tests
- CI
| PypiClean |
/python-bitcoinaddress-0.2.2.tar.gz/python-bitcoinaddress-0.2.2/distribute_setup.py | import os
import sys
import time
import fnmatch
import tempfile
import tarfile
import optparse
from distutils import log
try:
from site import USER_SITE
except ImportError:
USER_SITE = None
try:
import subprocess
def _python_cmd(*args):
args = (sys.executable,) + args
return subprocess.call(args) == 0
except ImportError:
# will be used for python 2.3
def _python_cmd(*args):
args = (sys.executable,) + args
# quoting arguments if windows
if sys.platform == 'win32':
def quote(arg):
if ' ' in arg:
return '"%s"' % arg
return arg
args = [quote(arg) for arg in args]
return os.spawnl(os.P_WAIT, sys.executable, *args) == 0
DEFAULT_VERSION = "0.6.29"
DEFAULT_URL = "http://pypi.python.org/packages/source/d/distribute/"
SETUPTOOLS_FAKED_VERSION = "0.6c11"
SETUPTOOLS_PKG_INFO = """\
Metadata-Version: 1.0
Name: setuptools
Version: %s
Summary: xxxx
Home-page: xxx
Author: xxx
Author-email: xxx
License: xxx
Description: xxx
""" % SETUPTOOLS_FAKED_VERSION
def _install(tarball, install_args=()):
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
_extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
# installing
log.warn('Installing Distribute')
if not _python_cmd('setup.py', 'install', *install_args):
log.warn('Something went wrong during the installation.')
log.warn('See the error message above.')
# exitcode will be 2
return 2
finally:
os.chdir(old_wd)
def _build_egg(egg, tarball, to_dir):
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
_extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
# building an egg
log.warn('Building a Distribute egg in %s', to_dir)
_python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
finally:
os.chdir(old_wd)
# returning the result
log.warn(egg)
if not os.path.exists(egg):
raise IOError('Could not build the egg.')
def _do_download(version, download_base, to_dir, download_delay):
egg = os.path.join(to_dir, 'distribute-%s-py%d.%d.egg'
% (version, sys.version_info[0], sys.version_info[1]))
if not os.path.exists(egg):
tarball = download_setuptools(version, download_base,
to_dir, download_delay)
_build_egg(egg, tarball, to_dir)
sys.path.insert(0, egg)
import setuptools
setuptools.bootstrap_install_from = egg
def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, download_delay=15, no_fake=True):
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
was_imported = 'pkg_resources' in sys.modules or \
'setuptools' in sys.modules
try:
try:
import pkg_resources
if not hasattr(pkg_resources, '_distribute'):
if not no_fake:
_fake_setuptools()
raise ImportError
except ImportError:
return _do_download(version, download_base, to_dir, download_delay)
try:
pkg_resources.require("distribute>=" + version)
return
except pkg_resources.VersionConflict:
e = sys.exc_info()[1]
if was_imported:
sys.stderr.write(
"The required version of distribute (>=%s) is not available,\n"
"and can't be installed while this script is running. Please\n"
"install a more recent version first, using\n"
"'easy_install -U distribute'."
"\n\n(Currently using %r)\n" % (version, e.args[0]))
sys.exit(2)
else:
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return _do_download(version, download_base, to_dir,
download_delay)
except pkg_resources.DistributionNotFound:
return _do_download(version, download_base, to_dir,
download_delay)
finally:
if not no_fake:
_create_fake_setuptools_pkg_info(to_dir)
def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, delay=15):
"""Download distribute from a specified location and return its filename
`version` should be a valid distribute version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download
attempt.
"""
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
tgz_name = "distribute-%s.tar.gz" % version
url = download_base + tgz_name
saveto = os.path.join(to_dir, tgz_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
log.warn("Downloading %s", url)
src = urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = src.read()
dst = open(saveto, "wb")
dst.write(data)
finally:
if src:
src.close()
if dst:
dst.close()
return os.path.realpath(saveto)
def _no_sandbox(function):
def __no_sandbox(*args, **kw):
try:
from setuptools.sandbox import DirectorySandbox
if not hasattr(DirectorySandbox, '_old'):
def violation(*args):
pass
DirectorySandbox._old = DirectorySandbox._violation
DirectorySandbox._violation = violation
patched = True
else:
patched = False
except ImportError:
patched = False
try:
return function(*args, **kw)
finally:
if patched:
DirectorySandbox._violation = DirectorySandbox._old
del DirectorySandbox._old
return __no_sandbox
def _patch_file(path, content):
"""Will backup the file then patch it"""
existing_content = open(path).read()
if existing_content == content:
# already patched
log.warn('Already patched.')
return False
log.warn('Patching...')
_rename_path(path)
f = open(path, 'w')
try:
f.write(content)
finally:
f.close()
return True
_patch_file = _no_sandbox(_patch_file)
def _same_content(path, content):
return open(path).read() == content
def _rename_path(path):
new_name = path + '.OLD.%s' % time.time()
log.warn('Renaming %s to %s', path, new_name)
os.rename(path, new_name)
return new_name
def _remove_flat_installation(placeholder):
if not os.path.isdir(placeholder):
log.warn('Unkown installation at %s', placeholder)
return False
found = False
for file in os.listdir(placeholder):
if fnmatch.fnmatch(file, 'setuptools*.egg-info'):
found = True
break
if not found:
log.warn('Could not locate setuptools*.egg-info')
return
log.warn('Moving elements out of the way...')
pkg_info = os.path.join(placeholder, file)
if os.path.isdir(pkg_info):
patched = _patch_egg_dir(pkg_info)
else:
patched = _patch_file(pkg_info, SETUPTOOLS_PKG_INFO)
if not patched:
log.warn('%s already patched.', pkg_info)
return False
# now let's move the files out of the way
for element in ('setuptools', 'pkg_resources.py', 'site.py'):
element = os.path.join(placeholder, element)
if os.path.exists(element):
_rename_path(element)
else:
log.warn('Could not find the %s element of the '
'Setuptools distribution', element)
return True
_remove_flat_installation = _no_sandbox(_remove_flat_installation)
def _after_install(dist):
log.warn('After install bootstrap.')
placeholder = dist.get_command_obj('install').install_purelib
_create_fake_setuptools_pkg_info(placeholder)
def _create_fake_setuptools_pkg_info(placeholder):
if not placeholder or not os.path.exists(placeholder):
log.warn('Could not find the install location')
return
pyver = '%s.%s' % (sys.version_info[0], sys.version_info[1])
setuptools_file = 'setuptools-%s-py%s.egg-info' % \
(SETUPTOOLS_FAKED_VERSION, pyver)
pkg_info = os.path.join(placeholder, setuptools_file)
if os.path.exists(pkg_info):
log.warn('%s already exists', pkg_info)
return
log.warn('Creating %s', pkg_info)
try:
f = open(pkg_info, 'w')
except EnvironmentError:
log.warn("Don't have permissions to write %s, skipping", pkg_info)
return
try:
f.write(SETUPTOOLS_PKG_INFO)
finally:
f.close()
pth_file = os.path.join(placeholder, 'setuptools.pth')
log.warn('Creating %s', pth_file)
f = open(pth_file, 'w')
try:
f.write(os.path.join(os.curdir, setuptools_file))
finally:
f.close()
_create_fake_setuptools_pkg_info = _no_sandbox(
_create_fake_setuptools_pkg_info
)
def _patch_egg_dir(path):
# let's check if it's already patched
pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
if os.path.exists(pkg_info):
if _same_content(pkg_info, SETUPTOOLS_PKG_INFO):
log.warn('%s already patched.', pkg_info)
return False
_rename_path(path)
os.mkdir(path)
os.mkdir(os.path.join(path, 'EGG-INFO'))
pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
f = open(pkg_info, 'w')
try:
f.write(SETUPTOOLS_PKG_INFO)
finally:
f.close()
return True
_patch_egg_dir = _no_sandbox(_patch_egg_dir)
def _before_install():
log.warn('Before install bootstrap.')
_fake_setuptools()
def _under_prefix(location):
if 'install' not in sys.argv:
return True
args = sys.argv[sys.argv.index('install') + 1:]
for index, arg in enumerate(args):
for option in ('--root', '--prefix'):
if arg.startswith('%s=' % option):
top_dir = arg.split('root=')[-1]
return location.startswith(top_dir)
elif arg == option:
if len(args) > index:
top_dir = args[index + 1]
return location.startswith(top_dir)
if arg == '--user' and USER_SITE is not None:
return location.startswith(USER_SITE)
return True
def _fake_setuptools():
log.warn('Scanning installed packages')
try:
import pkg_resources
except ImportError:
# we're cool
log.warn('Setuptools or Distribute does not seem to be installed.')
return
ws = pkg_resources.working_set
try:
setuptools_dist = ws.find(
pkg_resources.Requirement.parse('setuptools', replacement=False)
)
except TypeError:
# old distribute API
setuptools_dist = ws.find(
pkg_resources.Requirement.parse('setuptools')
)
if setuptools_dist is None:
log.warn('No setuptools distribution found')
return
# detecting if it was already faked
setuptools_location = setuptools_dist.location
log.warn('Setuptools installation detected at %s', setuptools_location)
# if --root or --preix was provided, and if
# setuptools is not located in them, we don't patch it
if not _under_prefix(setuptools_location):
log.warn('Not patching, --root or --prefix is installing Distribute'
' in another location')
return
# let's see if its an egg
if not setuptools_location.endswith('.egg'):
log.warn('Non-egg installation')
res = _remove_flat_installation(setuptools_location)
if not res:
return
else:
log.warn('Egg installation')
pkg_info = os.path.join(setuptools_location, 'EGG-INFO', 'PKG-INFO')
if (os.path.exists(pkg_info) and
_same_content(pkg_info, SETUPTOOLS_PKG_INFO)):
log.warn('Already patched.')
return
log.warn('Patching...')
# let's create a fake egg replacing setuptools one
res = _patch_egg_dir(setuptools_location)
if not res:
return
log.warn('Patching complete.')
_relaunch()
def _relaunch():
log.warn('Relaunching...')
# we have to relaunch the process
# pip marker to avoid a relaunch bug
_cmd = ['-c', 'install', '--single-version-externally-managed']
_cmd2 = ['-c', 'install', '--record']
if sys.argv[:3] == _cmd1 or sys.argv[:3] == _cmd2:
sys.argv[0] = 'setup.py'
args = [sys.executable] + sys.argv
sys.exit(subprocess.call(args))
def _extractall(self, path=".", members=None):
"""Extract all members from the archive to the current working
directory and set owner, modification time and permissions on
directories afterwards. `path' specifies a different directory
to extract to. `members' is optional and must be a subset of the
list returned by getmembers().
"""
import copy
import operator
from tarfile import ExtractError
directories = []
if members is None:
members = self
for tarinfo in members:
if tarinfo.isdir():
# Extract directories with a safe mode.
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
tarinfo.mode = 448 # decimal for oct 0700
self.extract(tarinfo, path)
# Reverse sort directories.
if sys.version_info < (2, 4):
def sorter(dir1, dir2):
return cmp(dir1.name, dir2.name)
directories.sort(sorter)
directories.reverse()
else:
directories.sort(key=operator.attrgetter('name'), reverse=True)
# Set correct owner, mtime and filemode on directories.
for tarinfo in directories:
dirpath = os.path.join(path, tarinfo.name)
try:
self.chown(tarinfo, dirpath)
self.utime(tarinfo, dirpath)
self.chmod(tarinfo, dirpath)
except ExtractError:
e = sys.exc_info()[1]
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def _build_install_args(options):
"""
Build the arguments to 'python setup.py install' on the distribute package
"""
install_args = []
if options.user_install:
if sys.version_info < (2, 6):
log.warn("--user requires Python 2.6 or later")
raise SystemExit(1)
install_args.append('--user')
return install_args
def _parse_args():
"""
Parse the command line for options
"""
parser = optparse.OptionParser()
parser.add_option(
'--user', dest='user_install', action='store_true', default=False,
help='install in user site package (requires Python 2.6 or later)')
parser.add_option(
'--download-base', dest='download_base', metavar="URL",
default=DEFAULT_URL,
help='alternative URL from where to download the distribute package')
options, args = parser.parse_args()
# positional arguments are ignored
return options
def main(version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
options = _parse_args()
tarball = download_setuptools(download_base=options.download_base)
return _install(tarball, _build_install_args(options))
if __name__ == '__main__':
sys.exit(main()) | PypiClean |
/streamlit-charticulator-0.0.7.tar.gz/streamlit-charticulator-0.0.7/src/charticulator/frontend/charticulator/dist/scripts/app/stores/defaults.js | "use strict";
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license.
Object.defineProperty(exports, "__esModule", { value: true });
exports.defaultVersionOfTemplate = exports.defaultFontSizeLegend = exports.defaultFontSize = exports.defaultFont = exports.createDefaultChart = exports.createDefaultTitle = exports.createDefaultPlotSegment = exports.createDefaultGlyph = void 0;
var core_1 = require("../../core");
var base_1 = require("../../core/prototypes/plot_segments/region_2d/base");
var specification_1 = require("../../core/specification");
/** Create a default glyph */
function createDefaultGlyph(tableName) {
return {
_id: core_1.uniqueID(),
classID: "glyph.rectangle",
properties: { name: "Glyph" },
table: tableName,
marks: [
{
_id: core_1.uniqueID(),
classID: "mark.anchor",
properties: { name: "Anchor" },
mappings: {
x: {
type: specification_1.MappingType.parent,
parentAttribute: "icx",
},
y: {
type: specification_1.MappingType.parent,
parentAttribute: "icy",
},
},
},
],
mappings: {},
constraints: [],
};
}
exports.createDefaultGlyph = createDefaultGlyph;
/** Create a default plot segment */
function createDefaultPlotSegment(table, glyph) {
return {
_id: core_1.uniqueID(),
classID: "plot-segment.cartesian",
glyph: glyph._id,
table: table.name,
filter: null,
mappings: {
x1: {
type: specification_1.MappingType.parent,
parentAttribute: "x1",
},
y1: {
type: specification_1.MappingType.parent,
parentAttribute: "y1",
},
x2: {
type: specification_1.MappingType.parent,
parentAttribute: "x2",
},
y2: {
type: specification_1.MappingType.parent,
parentAttribute: "y2",
},
},
properties: {
name: "PlotSegment1",
visible: true,
marginX1: 0,
marginY1: 0,
marginX2: 0,
marginY2: 0,
sublayout: {
type: table.rows.length >= 100 ? "grid" : base_1.Region2DSublayoutType.DodgeX,
order: null,
ratioX: 0.1,
ratioY: 0.1,
align: {
x: base_1.SublayoutAlignment.Start,
y: base_1.SublayoutAlignment.Start,
},
grid: {
direction: base_1.GridDirection.X,
xCount: null,
yCount: null,
gridStartPosition: base_1.GridStartPosition.LeftTop,
},
packing: {
gravityX: 0.1,
gravityY: 0.1,
boxedX: null,
boxedY: null,
},
jitter: {
vertical: true,
horizontal: true,
},
},
},
};
}
exports.createDefaultPlotSegment = createDefaultPlotSegment;
/** Create a default chart title */
function createDefaultTitle(dataset) {
return {
_id: core_1.uniqueID(),
classID: "mark.text",
properties: {
name: "Title",
visible: true,
alignment: { x: "middle", y: "top", xMargin: 0, yMargin: 30 },
rotation: 0,
},
mappings: {
x: {
type: specification_1.MappingType.parent,
parentAttribute: "cx",
},
y: {
type: specification_1.MappingType.parent,
parentAttribute: "oy2",
},
text: {
type: specification_1.MappingType.value,
value: dataset.name,
},
fontSize: {
type: specification_1.MappingType.value,
value: 24,
},
color: {
type: specification_1.MappingType.value,
value: { r: 0, g: 0, b: 0 },
},
},
};
}
exports.createDefaultTitle = createDefaultTitle;
/** Create a default chart */
function createDefaultChart(dataset, createTitle) {
var table = dataset.tables[0];
var glyph = createDefaultGlyph(table.name);
return {
_id: core_1.uniqueID(),
classID: "chart.rectangle",
properties: {
name: "Chart",
backgroundColor: null,
backgroundOpacity: 1,
enableContextMenu: true,
exposed: true,
},
mappings: {
marginTop: {
type: specification_1.MappingType.value,
value: 80,
},
width: {
type: specification_1.MappingType.value,
value: 900,
},
height: {
type: specification_1.MappingType.value,
value: 600,
},
},
glyphs: [glyph],
elements: [
createDefaultPlotSegment(table, glyph),
createTitle ? createDefaultTitle(dataset) : null,
].filter(function (elem) { return elem != null; }),
scales: [],
scaleMappings: [],
constraints: [],
resources: [],
};
}
exports.createDefaultChart = createDefaultChart;
exports.defaultFont = "Segoe UI";
exports.defaultFontSize = 12;
exports.defaultFontSizeLegend = 12;
exports.defaultVersionOfTemplate = "2.0.3";
//# sourceMappingURL=defaults.js.map | PypiClean |
/flytekitplugins_awssagemaker-1.9.1-py3-none-any.whl/flytekitplugins/awssagemaker/models/hpo_job.py | from flyteidl.plugins.sagemaker import hyperparameter_tuning_job_pb2 as _pb2_hpo_job
from flytekit.models import common as _common
from . import training_job as _training_job
class HyperparameterTuningObjectiveType(object):
MINIMIZE = _pb2_hpo_job.HyperparameterTuningObjectiveType.MINIMIZE
MAXIMIZE = _pb2_hpo_job.HyperparameterTuningObjectiveType.MAXIMIZE
class HyperparameterTuningObjective(_common.FlyteIdlEntity):
"""
HyperparameterTuningObjective is a data structure that contains the target metric and the
objective of the hyperparameter tuning.
https://docs.aws.amazon.com/sagemaker/latest/dg/automatic-model-tuning-define-metrics.html
"""
def __init__(
self,
objective_type: int,
metric_name: str,
):
self._objective_type = objective_type
self._metric_name = metric_name
@property
def objective_type(self) -> int:
"""
Enum value of HyperparameterTuningObjectiveType. objective_type determines the direction of the tuning of
the Hyperparameter Tuning Job with respect to the specified metric.
:rtype: int
"""
return self._objective_type
@property
def metric_name(self) -> str:
"""
The target metric name, which is the user-defined name of the metric specified in the
training job's algorithm specification
:rtype: str
"""
return self._metric_name
def to_flyte_idl(self) -> _pb2_hpo_job.HyperparameterTuningObjective:
return _pb2_hpo_job.HyperparameterTuningObjective(
objective_type=self.objective_type,
metric_name=self._metric_name,
)
@classmethod
def from_flyte_idl(cls, pb2_object: _pb2_hpo_job.HyperparameterTuningObjective):
return cls(
objective_type=pb2_object.objective_type,
metric_name=pb2_object.metric_name,
)
class HyperparameterTuningStrategy:
BAYESIAN = _pb2_hpo_job.HyperparameterTuningStrategy.BAYESIAN
RANDOM = _pb2_hpo_job.HyperparameterTuningStrategy.RANDOM
class TrainingJobEarlyStoppingType:
OFF = _pb2_hpo_job.TrainingJobEarlyStoppingType.OFF
AUTO = _pb2_hpo_job.TrainingJobEarlyStoppingType.AUTO
class HyperparameterTuningJobConfig(_common.FlyteIdlEntity):
"""
The specification of the hyperparameter tuning process
https://docs.aws.amazon.com/sagemaker/latest/dg/automatic-model-tuning-ex-tuning-job.html#automatic-model-tuning-ex-low-tuning-config
"""
def __init__(
self,
tuning_strategy: int,
tuning_objective: HyperparameterTuningObjective,
training_job_early_stopping_type: TrainingJobEarlyStoppingType,
):
self._tuning_strategy = tuning_strategy
self._tuning_objective = tuning_objective
self._training_job_early_stopping_type = training_job_early_stopping_type
@property
def tuning_strategy(self) -> int:
"""
Enum value of HyperparameterTuningStrategy. Setting the strategy used when searching in the hyperparameter space
:rtype: int
"""
return self._tuning_strategy
@property
def tuning_objective(self) -> HyperparameterTuningObjective:
"""
The target metric and the objective of the hyperparameter tuning.
:rtype: HyperparameterTuningObjective
"""
return self._tuning_objective
@property
def training_job_early_stopping_type(self) -> int:
"""
Enum value of TrainingJobEarlyStoppingType. When the training jobs launched by the hyperparameter tuning job
are not improving significantly, a hyperparameter tuning job can be stopping early. This attribute determines
how the early stopping is to be done.
Note that there's only a subset of built-in algorithms that supports early stopping.
see: https://docs.aws.amazon.com/sagemaker/latest/dg/automatic-model-tuning-early-stopping.html
:rtype: int
"""
return self._training_job_early_stopping_type
def to_flyte_idl(self) -> _pb2_hpo_job.HyperparameterTuningJobConfig:
return _pb2_hpo_job.HyperparameterTuningJobConfig(
tuning_strategy=self._tuning_strategy,
tuning_objective=self._tuning_objective.to_flyte_idl(),
training_job_early_stopping_type=self._training_job_early_stopping_type,
)
@classmethod
def from_flyte_idl(cls, pb2_object: _pb2_hpo_job.HyperparameterTuningJobConfig):
return cls(
tuning_strategy=pb2_object.tuning_strategy,
tuning_objective=HyperparameterTuningObjective.from_flyte_idl(pb2_object.tuning_objective),
training_job_early_stopping_type=pb2_object.training_job_early_stopping_type,
)
class HyperparameterTuningJob(_common.FlyteIdlEntity):
def __init__(
self,
max_number_of_training_jobs: int,
max_parallel_training_jobs: int,
training_job: _training_job.TrainingJob,
):
self._max_number_of_training_jobs = max_number_of_training_jobs
self._max_parallel_training_jobs = max_parallel_training_jobs
self._training_job = training_job
@property
def max_number_of_training_jobs(self) -> int:
"""
The maximum number of training jobs that a hyperparameter tuning job can launch.
https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_ResourceLimits.html
:rtype: int
"""
return self._max_number_of_training_jobs
@property
def max_parallel_training_jobs(self) -> int:
"""
The maximum number of concurrent training job that an hpo job can launch
https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_ResourceLimits.html
:rtype: int
"""
return self._max_parallel_training_jobs
@property
def training_job(self) -> _training_job.TrainingJob:
"""
The reference to the underlying training job that the hyperparameter tuning job will launch during the process
:rtype: _training_job.TrainingJob
"""
return self._training_job
def to_flyte_idl(self) -> _pb2_hpo_job.HyperparameterTuningJob:
return _pb2_hpo_job.HyperparameterTuningJob(
max_number_of_training_jobs=self._max_number_of_training_jobs,
max_parallel_training_jobs=self._max_parallel_training_jobs,
training_job=self._training_job.to_flyte_idl(), # SDK task has already serialized it
)
@classmethod
def from_flyte_idl(cls, pb2_object: _pb2_hpo_job.HyperparameterTuningJob):
return cls(
max_number_of_training_jobs=pb2_object.max_number_of_training_jobs,
max_parallel_training_jobs=pb2_object.max_parallel_training_jobs,
training_job=_training_job.TrainingJob.from_flyte_idl(pb2_object.training_job),
) | PypiClean |
/yellowbrickhotfix-1.2.17-py3-none-any.whl/yellowbrick/classifier/classification_report.py | ##########################################################################
## Imports
##########################################################################
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import precision_recall_fscore_support
from yellowbrick.style import find_text_color
from yellowbrick.style.palettes import color_sequence
from yellowbrick.exceptions import YellowbrickValueError
from yellowbrick.classifier.base import ClassificationScoreVisualizer
##########################################################################
## Classification Report
##########################################################################
PERCENT = "percent"
CMAP_UNDERCOLOR = "w"
CMAP_OVERCOLOR = "#2a7d4f"
SCORES_KEYS = ("precision", "recall", "f1", "support")
class ClassificationReport(ClassificationScoreVisualizer):
"""
Classification report that shows the precision, recall, F1, and support scores
for the model. Integrates numerical scores as well as a color-coded heatmap.
Parameters
----------
model : estimator
A scikit-learn estimator that should be a classifier. If the model is
not a classifier, an exception is raised. If the internal model is not
fitted, it is fit when the visualizer is fitted, unless otherwise specified
by ``is_fitted``.
ax : matplotlib Axes, default: None
The axes to plot the figure on. If not specified the current axes will be
used (or generated if required).
classes : list of str, defult: None
The class labels to use for the legend ordered by the index of the sorted
classes discovered in the ``fit()`` method. Specifying classes in this
manner is used to change the class names to a more specific format or
to label encoded integer classes. Some visualizers may also use this
field to filter the visualization for specific classes. For more advanced
usage specify an encoder rather than class labels.
cmap : string, default: ``'YlOrRd'``
Specify a colormap to define the heatmap of the predicted class
against the actual class in the classification report.
support: {True, False, None, 'percent', 'count'}, default: None
Specify if support will be displayed. It can be further defined by
whether support should be reported as a raw count or percentage.
encoder : dict or LabelEncoder, default: None
A mapping of classes to human readable labels. Often there is a mismatch
between desired class labels and those contained in the target variable
passed to ``fit()`` or ``score()``. The encoder disambiguates this mismatch
ensuring that classes are labeled correctly in the visualization.
is_fitted : bool or str, default="auto"
Specify if the wrapped estimator is already fitted. If False, the estimator
will be fit when the visualizer is fit, otherwise, the estimator will not be
modified. If "auto" (default), a helper method will check if the estimator
is fitted before fitting it again.
force_model : bool, default: False
Do not check to ensure that the underlying estimator is a classifier. This
will prevent an exception when the visualizer is initialized but may result
in unexpected or unintended behavior.
kwargs : dict
Keyword arguments passed to the visualizer base classes.
Examples
--------
>>> from yellowbrick.classifier import ClassificationReport
>>> from sklearn.linear_model import LogisticRegression
>>> viz = ClassificationReport(LogisticRegression())
>>> viz.fit(X_train, y_train)
>>> viz.score(X_test, y_test)
>>> viz.show()
Attributes
----------
classes_ : ndarray of shape (n_classes,)
The class labels observed while fitting.
class_count_ : ndarray of shape (n_classes,)
Number of samples encountered for each class during fitting.
score_ : float
An evaluation metric of the classifier on test data produced when
``score()`` is called. This metric is between 0 and 1 -- higher scores are
generally better. For classifiers, this score is usually accuracy, but
ensure you check the underlying model for more details about the score.
scores_ : dict of dicts
Outer dictionary composed of precision, recall, f1, and support scores with
inner dictionaries specifiying the values for each class listed.
"""
def __init__(
self,
model,
ax=None,
classes=None,
cmap="YlOrRd",
support=None,
encoder=None,
is_fitted="auto",
force_model=False,
**kwargs
):
super(ClassificationReport, self).__init__(
model,
ax=ax,
classes=classes,
encoder=encoder,
is_fitted=is_fitted,
force_model=force_model,
**kwargs
)
self.support = support
self.cmap = color_sequence(cmap)
self.cmap.set_over(color=CMAP_OVERCOLOR)
self.cmap.set_under(color=CMAP_UNDERCOLOR)
self._displayed_scores = [key for key in SCORES_KEYS]
if support not in {None, True, False, "percent", "count"}:
raise YellowbrickValueError(
"'{}' is an invalid argument for support, use None, True, "
"False, 'percent', or 'count'".format(support)
)
if not support:
self._displayed_scores.remove("support")
def score(self, X, y):
"""
Generates the Scikit-Learn classification report.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n
An array or series of target or class values
Returns
-------
score_ : float
Global accuracy score
"""
# Call super to check if fitted and to compute self.score_
super(ClassificationReport, self).score(X, y)
y_pred = self.predict(X)
scores = precision_recall_fscore_support(y, y_pred)
# Calculate the percentage for the support metric
# and store the percent in place of raw support counts
self.support_score_ = scores[-1]
scores = list(scores)
scores[-1] = scores[-1] / scores[-1].sum()
# Create a mapping composed of precision, recall, F1, and support
# to their respective values
scores = map(lambda s: dict(zip(self.classes_, s)), scores)
self.scores_ = dict(zip(SCORES_KEYS, scores))
# Remove support scores if not required
if not self.support:
self.scores_.pop("support")
self.draw()
return self.score_
def draw(self):
"""
Renders the classification report across each axis.
"""
# Create display grid
cr_display = np.zeros((len(self.classes_), len(self._displayed_scores)))
# For each class row, append columns for precision, recall, f1, and support
for idx, cls in enumerate(self.classes_):
for jdx, metric in enumerate(self._displayed_scores):
cr_display[idx, jdx] = self.scores_[metric][cls]
# Set up the dimensions of the pcolormesh
# NOTE: pcolormesh accepts grids that are (N+1,M+1)
X, Y = (
np.arange(len(self.classes_) + 1),
np.arange(len(self._displayed_scores) + 1),
)
self.ax.set_ylim(bottom=0, top=cr_display.shape[0])
self.ax.set_xlim(left=0, right=cr_display.shape[1])
# Set data labels in the grid, enumerating over class, metric pairs
# NOTE: X and Y are one element longer than the classification report
# so skip the last element to label the grid correctly.
for x in X[:-1]:
for y in Y[:-1]:
# Extract the value and the text label
value = cr_display[x, y]
svalue = "{:0.3f}".format(value)
# change the svalue for support (when y == 3) because we want
# to label it as the actual support value, not the percentage
if y == 3:
if self.support != PERCENT:
svalue = self.support_score_[x]
# Determine the grid and text colors
base_color = self.cmap(value)
text_color = find_text_color(base_color)
# Add the label to the middle of the grid
cx, cy = x + 0.5, y + 0.5
self.ax.text(cy, cx, svalue, va="center", ha="center", color=text_color)
# Draw the heatmap with colors bounded by the min and max of the grid
# NOTE: I do not understand why this is Y, X instead of X, Y it works
# in this order but raises an exception with the other order.
g = self.ax.pcolormesh(
Y, X, cr_display, vmin=0, vmax=1, cmap=self.cmap, edgecolor="w"
)
# Add the color bar
plt.colorbar(g, ax=self.ax) # TODO: Could use self.fig now
# Return the axes being drawn on
return self.ax
def finalize(self, **kwargs):
"""
Adds a title and sets the axis labels correctly. Also calls tight layout
to ensure that no parts of the figure are cut off in the final visualization.
Parameters
----------
kwargs: generic keyword arguments.
Notes
-----
Generally this method is called from show and not directly by the user.
"""
# Set the title of the classifiation report
self.set_title("{} Classification Report".format(self.name))
# Set the tick marks appropriately
self.ax.set_xticks(np.arange(len(self._displayed_scores)) + 0.5)
self.ax.set_yticks(np.arange(len(self.classes_)) + 0.5)
self.ax.set_xticklabels(self._displayed_scores, rotation=45)
self.ax.set_yticklabels(self.classes_)
self.fig.tight_layout()
def classification_report(
model,
X_train,
y_train,
X_test=None,
y_test=None,
ax=None,
classes=None,
cmap="YlOrRd",
support=None,
encoder=None,
is_fitted="auto",
force_model=False,
show=True,
**kwargs
):
"""Classification Report
Displays precision, recall, F1, and support scores for the model.
Integrates numerical scores as well as color-coded heatmap.
Parameters
----------
model : estimator
A scikit-learn estimator that should be a classifier. If the model is
not a classifier, an exception is raised. If the internal model is not
fitted, it is fit when the visualizer is fitted, unless otherwise specified
by ``is_fitted``.
X_train : ndarray or DataFrame of shape n x m
A feature array of n instances with m features the model is trained on.
Used to fit the visualizer and also to score the visualizer if test splits are
not directly specified.
y_train : ndarray or Series of length n
An array or series of target or class values. Used to fit the visualizer and
also to score the visualizer if test splits are not specified.
X_test : ndarray or DataFrame of shape n x m, default: None
An optional feature array of n instances with m features that the model
is scored on if specified, using X_train as the training data.
y_test : ndarray or Series of length n, default: None
An optional array or series of target or class values that serve as actual
labels for X_test for scoring purposes.
ax : matplotlib Axes, default: None
The axes to plot the figure on. If not specified the current axes will be
used (or generated if required).
classes : list of str, defult: None
The class labels to use for the legend ordered by the index of the sorted
classes discovered in the ``fit()`` method. Specifying classes in this
manner is used to change the class names to a more specific format or
to label encoded integer classes. Some visualizers may also use this
field to filter the visualization for specific classes. For more advanced
usage specify an encoder rather than class labels.
cmap : string, default: ``'YlOrRd'``
Specify a colormap to define the heatmap of the predicted class
against the actual class in the classification report.
support: {True, False, None, 'percent', 'count'}, default: None
Specify if support will be displayed. It can be further defined by
whether support should be reported as a raw count or percentage.
encoder : dict or LabelEncoder, default: None
A mapping of classes to human readable labels. Often there is a mismatch
between desired class labels and those contained in the target variable
passed to ``fit()`` or ``score()``. The encoder disambiguates this mismatch
ensuring that classes are labeled correctly in the visualization.
is_fitted : bool or str, default='auto'
Specify if the wrapped estimator is already fitted. If False, the estimator
will be fit when the visualizer is fit, otherwise, the estimator will not be
modified. If 'auto' (default), a helper method will check if the estimator
is fitted before fitting it again.
force_model : bool, default: False
Do not check to ensure that the underlying estimator is a classifier. This
will prevent an exception when the visualizer is initialized but may result
in unexpected or unintended behavior.
show: bool, default: True
If True, calls ``show()``, which in turn calls ``plt.show()`` however you cannot
call ``plt.savefig`` from this signature, nor ``clear_figure``. If False, simply
calls ``finalize()``
kwargs : dict
Keyword arguments passed to the visualizer base classes.
Returns
-------
viz : ClassificationReport
Returns the fitted, finalized visualizer
"""
# Instantiate the visualizer
visualizer = ClassificationReport(
model=model,
ax=ax,
classes=classes,
cmap=cmap,
support=support,
encoder=encoder,
is_fitted=is_fitted,
force_model=force_model,
**kwargs
)
# Fit and transform the visualizer (calls draw)
visualizer.fit(X_train, y_train)
# Score the visualizer
if X_test is not None and y_test is not None:
visualizer.score(X_test, y_test)
elif X_test is not None or y_test is not None:
raise YellowbrickValueError(
"both X_test and y_test are required if one is specified"
)
else:
visualizer.score(X_train, y_train)
# Draw the final visualization
if show:
visualizer.show()
else:
visualizer.finalize()
# Return the visualizer
return visualizer | PypiClean |
/jupyros-0.7.0a0.tar.gz/jupyros-0.7.0a0/js/node_modules/globalthis/CHANGELOG.md | # Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/)
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [v1.0.3](https://github.com/es-shims/globalThis/compare/v1.0.2...v1.0.3) - 2022-05-07
### Commits
- [actions] reuse common workflows [`65891e4`](https://github.com/es-shims/globalThis/commit/65891e4d285ae04e216ff01160cff861e0e41a4f)
- [actions] use `node/install` instead of `node/run`; use `codecov` action [`82f8481`](https://github.com/es-shims/globalThis/commit/82f84815027f666f625e1ccb41f723800a05d016)
- [meta] use `npmignore` to autogenerate an npmignore file [`53afc39`](https://github.com/es-shims/globalThis/commit/53afc39bfd3eb262c5e6e9dfd25e4f81f3578c1c)
- [Dev Deps] update `eslint`, `@ljharb/eslint-config`, `aud`, `auto-changelog`, `tape` [`03169d4`](https://github.com/es-shims/globalThis/commit/03169d4254c9ef177d6537becca5b0b56df50d91)
- [Dev Deps] update `eslint`, `@ljharb/eslint-config`, `safe-publish-latest`, `tape` [`4986e3e`](https://github.com/es-shims/globalThis/commit/4986e3e20c5f664601871a0fac68c1efd0a68472)
- [actions] update codecov uploader [`15c4b06`](https://github.com/es-shims/globalThis/commit/15c4b062b1a9434dbec93604ed31b6893d11d458)
- [Dev Deps] update `eslint`, `@ljharb/eslint-config`, `auto-changelog`, `tape` [`8b04a74`](https://github.com/es-shims/globalThis/commit/8b04a749d3cb2f825920beb700899f0c13ad2fb8)
- [Fix] `globalThis` should be writable [`8759985`](https://github.com/es-shims/globalThis/commit/87599852d5f91e2e1f06e424cdefcd443ec98476)
- [readme] add github actions/codecov badges [`0263f0d`](https://github.com/es-shims/globalThis/commit/0263f0debfa982b928fcd301b11fe3e3193bf33d)
- [Dev Deps] update `aud`, `eslint`, `tape` [`e88d296`](https://github.com/es-shims/globalThis/commit/e88d296bb026633bdd1be2e1542903a5d0107cd8)
- [meta] use `prepublishOnly` script for npm 7+ [`c81fde6`](https://github.com/es-shims/globalThis/commit/c81fde6a9e44345e56dada588e16db736809ddd9)
- [Tests] nycignore `dist` [`bde0c0d`](https://github.com/es-shims/globalThis/commit/bde0c0df46f684316ab414da1487a0cd2efe3eeb)
- [meta] gitignore coverage output [`79f73f8`](https://github.com/es-shims/globalThis/commit/79f73f8b0c1180567fba473f92c07d71efd4dd0b)
## [v1.0.2](https://github.com/es-shims/globalThis/compare/v1.0.1...v1.0.2) - 2021-02-22
### Commits
- [Tests] migrate tests to Github Actions [`a3f50f7`](https://github.com/es-shims/globalThis/commit/a3f50f77a392c0ffdaca18fb5881743b874d0a6f)
- [meta] do not publish github action workflow files [`eb5c787`](https://github.com/es-shims/globalThis/commit/eb5c7879317cd7f1fde52228660be8e779c9d4e3)
- [Tests] add `implementation` est; run `es-shim-api` in postlint; use `tape` runner [`c9dd792`](https://github.com/es-shims/globalThis/commit/c9dd792d492ec9744a5e5d5033e919b94d441bac)
- [Tests] fix native tests [`6b76dff`](https://github.com/es-shims/globalThis/commit/6b76dff3af3fe9bcd7b24d48c6ba55116169e840)
- [Tests] run `nyc` on all tests [`0407f79`](https://github.com/es-shims/globalThis/commit/0407f79f64bf9fc30111f3bf4dff7e4205331fb6)
- [Dev Deps] update `eslint`, `@ljharb/eslint-config`, `aud`, `auto-changelog`, `tape`, `browserify` [`b8cc020`](https://github.com/es-shims/globalThis/commit/b8cc020e5ecc2d5a5a5b4160aabc60cc42d50c03)
- [actions] add "Allow Edits" workflow [`e2854df`](https://github.com/es-shims/globalThis/commit/e2854df653667b16ff34a7a0a7b677231dfe2b02)
- [readme] remove travis badge [`262eb76`](https://github.com/es-shims/globalThis/commit/262eb76e4e0d3f2df354cc6aff1b18f50c7b147f)
- [Dev Deps] update `eslint`, `@ljharb/eslint-config`, `aud`, `auto-changelog`; add `safe-publish-latest` [`3c76883`](https://github.com/es-shims/globalThis/commit/3c7688325f6aa050afe3ed978e423e70974e4d3b)
- [Dev Deps] update `eslint`, `@ljharb/eslint-config`, `aud`, `tape` [`7276123`](https://github.com/es-shims/globalThis/commit/727612396262fc22275f44159ec5b39115dc359f)
- [actions] update workflows [`bcb0f42`](https://github.com/es-shims/globalThis/commit/bcb0f42c319cf19746e03a6667cf25d3e835f46e)
- [Dev Deps] update `eslint`, `@ljharb/eslint-config`, `tape` [`5485851`](https://github.com/es-shims/globalThis/commit/548585148e874d6eb0b0463526a88e8b64e7c5eb)
- [Dev Deps] update `auto-changelog`, `tape` [`6a01da3`](https://github.com/es-shims/globalThis/commit/6a01da3f321983d1970d793711d31cf8508ef94d)
- [Dev Deps] update `@ljharb/eslint-config`, `tape` [`7a07f4e`](https://github.com/es-shims/globalThis/commit/7a07f4ebc5580933b40bbe67f357632e0f7d5586)
- [meta] only run the build script in publish [`797e492`](https://github.com/es-shims/globalThis/commit/797e492519ed0bf6270537290e69ca0456790575)
- [meta] combine duplicate `prepublish` scripts [`92bbef0`](https://github.com/es-shims/globalThis/commit/92bbef0f91f6e91163186f68b5f5f1ffd26c479d)
- [Dev Deps] update `auto-changelog`; add `aud` [`be6dbec`](https://github.com/es-shims/globalThis/commit/be6dbecefddb40493c5568a2cbe83f74e2e0385f)
- [actions] switch Automatic Rebase workflow to `pull_request_target` event [`bfd54f8`](https://github.com/es-shims/globalThis/commit/bfd54f8388758e7dec618dc34956e7075a7c15f0)
- [Tests] only audit prod deps [`0f64b47`](https://github.com/es-shims/globalThis/commit/0f64b47acfa812affbacbe487fcb0f6c02eccc25)
## [v1.0.1](https://github.com/es-shims/globalThis/compare/v1.0.0...v1.0.1) - 2019-12-15
### Fixed
- [Refactor] only use `global` in node; only check browser globals in browsers [`#2`](https://github.com/es-shims/globalThis/issues/2)
### Commits
- [Tests] use shared travis-ci configs [`edb1cc9`](https://github.com/es-shims/globalThis/commit/edb1cc9d900a40e8c1732264b6e85d4f9760920c)
- [Tests] remove `jscs` [`1847ac2`](https://github.com/es-shims/globalThis/commit/1847ac2487e2c13cf8bf717211c6a93fe60831f9)
- [meta] add `auto-changelog` [`933c381`](https://github.com/es-shims/globalThis/commit/933c381083890965ac848d3da21ed9e910cc09cf)
- [Dev Deps] update `eslint`, `@ljharb/eslint-config`, `browserify`, `tape` [`93310bc`](https://github.com/es-shims/globalThis/commit/93310bc01ddacbe23a93b3022daebc9b6f6ae8c3)
- [actions] add automatic rebasing / merge commit blocking [`231dec5`](https://github.com/es-shims/globalThis/commit/231dec511c42e1509035d176e2451c55de20bfe7)
- [Dev Deps] update `eslint`, `@ljharb/eslint-config`, `browserify`, `covert`, `is`, `tape` [`e50c1f6`](https://github.com/es-shims/globalThis/commit/e50c1f6d2d45c66f53ffda471bbf62c08ed15c9b)
- [Tests] use `npx aud` instead of `nsp` or `npm audit` with hoops [`4abd340`](https://github.com/es-shims/globalThis/commit/4abd3400fc8942963e77515d0cf2fbcac3cb7bc8)
- [meta] add `funding` field [`2d1f9eb`](https://github.com/es-shims/globalThis/commit/2d1f9eb00b2dea46f6de7d563b31db17f44f1899)
- [meta] remove unused deps [`5bd6bef`](https://github.com/es-shims/globalThis/commit/5bd6befefbaf0c7e6f70eb3c1919b5c5a271d29d)
- readme: Fix casing + phrasing [`66379cc`](https://github.com/es-shims/globalThis/commit/66379ccf5008f7676aac5f3dec1ea2fe55e3516c)
- [Deps] update `define-properties`, `object-keys` [`4585e5a`](https://github.com/es-shims/globalThis/commit/4585e5ab461093ab6c62ce0b22b959925e8f818c)
- fix issue with Webpack's CaseSensitivePathsPlugin [`842e84e`](https://github.com/es-shims/globalThis/commit/842e84e0096c9eea660c78fd19c9c07799b81537)
## v1.0.0 - 2018-08-10
### Commits
- Dotfiles. [`f01b02d`](https://github.com/es-shims/globalThis/commit/f01b02d315865c812e5b9158f71bb18f3b153def)
- [Tests] up to `node` `v10.7`, `v9.11`, `v8.11`, `v7.10`, `v6.14`, `v4.9`; use `nvm install-latest-npm`; improve matrix [`ed1fa5d`](https://github.com/es-shims/globalThis/commit/ed1fa5d473d933b3270410b658183dc1c556a663)
- Tests [`ab99527`](https://github.com/es-shims/globalThis/commit/ab99527e3c434e89dd40f8cba3b0e2e976156611)
- [breaking] update property name, rename repo [`be42e3d`](https://github.com/es-shims/globalThis/commit/be42e3dce08b62a78260d487f62fa69b410d7918)
- package.json [`ca43a36`](https://github.com/es-shims/globalThis/commit/ca43a363e3ce0dbc2d4623169f8cb3d792f8bc84)
- implementation [`80b5a40`](https://github.com/es-shims/globalThis/commit/80b5a403ef532254b2af46ec3ba5f442a308a57d)
- read me [`f6df9b3`](https://github.com/es-shims/globalThis/commit/f6df9b3b69977f04e080d1720ba1203c13447884)
- Rename `System.global` to `global` [`fa8503c`](https://github.com/es-shims/globalThis/commit/fa8503cf94afe84b3729dd5b0e9f73f481fb1fee)
- Initial commit [`99f1dc3`](https://github.com/es-shims/globalThis/commit/99f1dc328d0b4c52a550037de0139d5452ac01de)
- [Tests] up to `node` `v6.7`, `v5.12`, `v4.6`; improve test matrix [`712ec0e`](https://github.com/es-shims/globalThis/commit/712ec0e545d1603c4e23f4ff1acb066cc4a3c9ee)
- [Dev Deps] update `browserify`, `tape`, `jscs`, `nsp`, `eslint`, `@ljharb/eslint-config` [`73278bd`](https://github.com/es-shims/globalThis/commit/73278bd638d1e762eb7415350a738f5d345896f5)
- [Dev Deps] update `@es-shims/api`, `@ljharb/eslint-config`, `browserify`, `eslint`, `for-each`, `is`, `nsp`, `tape` [`75fa992`](https://github.com/es-shims/globalThis/commit/75fa9929be81afec43895c02e33d0b8a78f11d1f)
- [Dev Deps] update `browserify`, `is`, `tape`, `nsp`, `eslint` [`b223e86`](https://github.com/es-shims/globalThis/commit/b223e86d0868efb1f0c966370ff2f822516d6956)
- [Tests] fix linting; remove parallelshell [`271b329`](https://github.com/es-shims/globalThis/commit/271b329d174b94c08913060752a2e9f9116fe5b8)
- [Deps] update `function-bind`, `object-keys` [`002d0c5`](https://github.com/es-shims/globalThis/commit/002d0c5685a83f97e014a8a07134eb621794c649)
- Only apps should have lockfiles [`960f1d0`](https://github.com/es-shims/globalThis/commit/960f1d00598cbba5427849c863eb10b8de82fb1b)
- [Tests] on `node` `v10.8` [`37fad9d`](https://github.com/es-shims/globalThis/commit/37fad9db9860c654efe0a32ec187f21730d5fed8)
- [Dev Deps] update `eslint`, `@ljharb/eslint-config` [`df28dfe`](https://github.com/es-shims/globalThis/commit/df28dfe7f0daf3db95a536a6ce64062bd706185d)
- [New] add `auto` entry point [`86eb2ab`](https://github.com/es-shims/globalThis/commit/86eb2ab4c4dc2babff20ac436cf7fb7f8da7d2f2)
- [Dev Deps] update `eslint` [`1bdc1aa`](https://github.com/es-shims/globalThis/commit/1bdc1aacfb94dcdc7bb61688c7634c435012e35d)
- [Deps] update `object-keys` [`72cdbf5`](https://github.com/es-shims/globalThis/commit/72cdbf596b16103ee711d52b2b645b42efc08c51)
- Update most common usage to invoke the function upon being required [`5026296`](https://github.com/es-shims/globalThis/commit/502629660da2c21cfb0f8ca233e2b9d427c052fe)
| PypiClean |
/waifu_py-1.0.3-py3-none-any.whl/waifu/client.py | import logging
from typing import Optional, Union, Dict, List
import requests
from waifu.exceptions import APIException, InvalidCategory
from waifu.utils import BASE_URL, ImageCategories, ImageTypes
log = logging.getLogger(__name__)
class WaifuClient:
"""Wrapper client for the waifu.pics API.
This class is used to interact with the API.
Attributes:
session: A requests session.
"""
def __init__(self, session: Optional[requests.Session] = None) -> None:
"""Initializes the WaifuClient.
Args:
session: A requests session.
"""
self.session = session
def _session(self) -> requests.Session:
"""Gets a requests session by creating it if it does not already exist."""
if self.session is None:
self.session = requests.Session()
return self.session
def _request(self, url: str, method: str, *args, **kwargs) -> Dict[str, str]:
"""Performs an HTTP request."""
session = self._session()
response = getattr(session, method)(url, *args, **kwargs)
log.debug(f'{method.upper()} {url} {response.status_code} {response.reason}')
if response.status_code != 200:
raise APIException(response.status_code, response.reason, (response.json()).get('message'))
data = response.json()
return data
def _get(self, url: str, *args, **kwargs) -> _request:
"""Performs an HTTP GET request."""
return self._request(url, 'get', *args, **kwargs)
def _post(self, url: str, *args, **kwargs) -> _request:
"""Performs an HTTP POST request."""
return self._request(url, 'post', *args, **kwargs)
def _fetch(
self,
type_: str,
category: str,
many: bool,
exclude: List[str]
) -> Union[str, List[str]]:
"""Returns a single or 30 unique images of the specific type and category."""
if category not in ImageCategories[type_]:
raise InvalidCategory(category)
if many is True:
data = self._post(f'{BASE_URL}/many/{type_}/{category}', json={'exclude': exclude})
else:
data = self._get(f'{BASE_URL}/{type_}/{category}')
if many is True:
return data.get('files')
return data.get('url')
def sfw(
self,
category: str,
many: Optional[bool] = False,
exclude: Optional[List[str]] = None
) -> Union[str, List[str]]:
"""Gets a single or 30 unique SFW (Safe For Work) images of the specific category.
Args:
category: The category of the image.
many: Get 30 unique images instead of one if true.
exclude: A list of URL's to not receive from the endpoint if many is true.
Returns:
A single or 30 unique image URL's.
Raises:
APIException: If the API response contains an error.
InvalidCategory: If the category is invalid.
"""
data = self._fetch(ImageTypes.sfw, category, many, exclude)
return data
def nsfw(
self,
category: str,
many: Optional[bool] = False,
exclude: Optional[List[str]] = None
) -> Union[str, List[str]]:
"""Gets a single or 30 unique NSFW (Not Safe For Work) images of the specific category.
Args:
category: The category of the image.
many: Get 30 unique images instead of one if true.
exclude: A list of URL's to not receive from the endpoint if many is true.
Returns:
A single or 30 unique image URL's.
Raises:
APIException: If the API response contains an error.
InvalidCategory: If the category is invalid.
"""
data = self._fetch(ImageTypes.nsfw, category, many, exclude)
return data | PypiClean |
/odoo14_addon_brand-14.0.1.0.2-py3-none-any.whl/odoo/addons/brand/models/res_brand_mixin.py | from lxml import etree
from odoo import _, api, fields, models
from odoo.exceptions import ValidationError
from odoo.addons.base.models import ir_ui_view
from .res_company import BRAND_USE_LEVEL_NO_USE_LEVEL, BRAND_USE_LEVEL_REQUIRED_LEVEL
class ResBrandMixin(models.AbstractModel):
_name = "res.brand.mixin"
_description = "Brand Mixin"
brand_id = fields.Many2one(
comodel_name="res.brand",
string="Brand",
help="Brand to use for this sale",
)
brand_use_level = fields.Selection(
string="Brand Use Level",
default=BRAND_USE_LEVEL_NO_USE_LEVEL,
related="company_id.brand_use_level",
)
company_id = fields.Many2one(
comodel_name="res.company",
)
def _is_brand_required(self):
self.ensure_one()
return self.company_id.brand_use_level == BRAND_USE_LEVEL_REQUIRED_LEVEL
@api.constrains("brand_id", "company_id")
def _check_brand_requirement(self):
for rec in self:
if rec._is_brand_required() and not rec.brand_id:
raise ValidationError(_("Brand is required"))
@api.constrains("brand_id", "company_id")
def _check_brand_company_id(self):
for rec in self:
if rec.brand_id.company_id and rec.brand_id.company_id != rec.company_id:
raise ValidationError(
_("Brand company must match document company for %s")
% rec.display_name
)
@api.onchange("brand_id")
def _onchange_brand_id(self):
for rec in self.filtered("brand_id.company_id"):
if rec.brand_id and rec.brand_id.company_id:
rec.company_id = rec.brand_id.company_id
def setup_modifiers(self, node, field=None, context=None, current_node_path=None):
modifiers = {}
if field is not None:
ir_ui_view.transfer_field_to_modifiers(field, modifiers)
ir_ui_view.transfer_node_to_modifiers(
node, modifiers, context=context, current_node_path=current_node_path
)
ir_ui_view.transfer_modifiers_to_node(modifiers, node)
def fields_view_get(
self, view_id=None, view_type="form", toolbar=False, submenu=False
):
"""set visibility and requirement rules"""
result = super(ResBrandMixin, self).fields_view_get(
view_id=view_id,
view_type=view_type,
toolbar=toolbar,
submenu=submenu,
)
if view_type in ["tree", "form"]:
doc = etree.XML(result["arch"])
result["fields"].update(self.fields_get(["brand_use_level"]))
for node in doc.xpath("//field[@name='brand_id']"):
in_tree_view = node.tag == "tree"
elem = etree.Element(
"field", {"name": "brand_use_level", "invisible": "True"}
)
field = result["fields"]["brand_use_level"]
self.setup_modifiers(
elem,
field=field,
context=self._context,
current_node_path=in_tree_view,
)
node.addprevious(elem)
node.set(
"attrs",
'{"invisible": '
'[("brand_use_level", "=", "%s")], '
'"required": '
'[("brand_use_level", "=", "%s")]}'
% (
BRAND_USE_LEVEL_NO_USE_LEVEL,
BRAND_USE_LEVEL_REQUIRED_LEVEL,
),
)
field = result["fields"]["brand_id"]
self.setup_modifiers(
node,
field=field,
context=self._context,
current_node_path=in_tree_view,
)
result["arch"] = etree.tostring(doc, encoding="unicode")
return result | PypiClean |