id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
/relations_sql-0.6.7-py3-none-any.whl/relations_sql/clause.py | import relations_sql
class CLAUSE(relations_sql.CRITERIA):
"""
Base class for clauses
"""
KWARG = None
KWARGS = None
DELIMITTER = ","
PARENTHESES = False
NAME = None
query = None
def __init__(self, *args, **kwargs):
self.expressions = []
self(*args, **kwargs)
def __call__(self, *args, **kwargs):
"""
Shorthand for add
"""
return self.add(*args, **kwargs)
def add(self, *args, **kwargs):
"""
Add expressiona
"""
if len(args) == 1 and isinstance(args[0], dict) and not kwargs:
kwargs = args[0]
args = []
super().add(*args)
for key in sorted(kwargs.keys()):
if self.KWARG is None or isinstance(kwargs[key], relations_sql.SQL):
expression = kwargs[key]
else:
expression = self.KWARG(kwargs[key])
self.expressions.append(self.KWARGS(key, expression))
return self.query or self
def bind(self, query):
"""
Bind this statment to this clause for adding
"""
self.query = query
return self
def generate(self, indent=0, count=0, pad=" ", **kwargs):
"""
Concats the values
"""
super().generate(indent=indent, count=count, pad=pad, **kwargs)
if self.sql:
one = pad * indent
current = pad * (count * indent)
next = current + one
line = "\n" if indent else ' '
self.sql = f"{self.NAME}{line}{next}{self.sql}" if self.NAME else f"{one}{self.sql}"
class ARGS(CLAUSE):
"""
Clauses that never have keyword arguments
"""
def __call__(self, *args):
"""
Shorthand for add
"""
return super().add(*args)
class OPTIONS(ARGS):
"""
Beginning of a SELECT query
"""
ARGS = relations_sql.SQL
DELIMITTER = ' '
class FIELDS(CLAUSE):
"""
FIELDS part of SELECT query
"""
ARGS = relations_sql.COLUMN_NAME
KWARG = relations_sql.COLUMN_NAME
KWARGS = relations_sql.AS
class FROM(CLAUSE):
"""
Clause for FROM
"""
NAME = "FROM"
ARGS = relations_sql.TABLE_NAME
KWARG = relations_sql.TABLE_NAME
KWARGS = relations_sql.AS
class WHERE(CLAUSE):
"""
Clause for WHERE
"""
NAME = "WHERE"
ARGS = relations_sql.VALUE
KWARGS = relations_sql.OP
DELIMITTER = " AND "
class GROUP_BY(ARGS):
"""
Clasuse for GROUP BY
"""
NAME = "GROUP BY"
ARGS = relations_sql.COLUMN_NAME
class HAVING(CLAUSE):
"""
Clause for HAVING
"""
NAME = "HAVING"
ARGS = relations_sql.VALUE
KWARGS = relations_sql.OP
DELIMITTER = " AND "
class ORDER_BY(CLAUSE):
"""
Clause for the bORDER
"""
NAME = "ORDER BY"
ARGS = relations_sql.ORDER
KWARGS = relations_sql.ORDER
class LIMIT(CLAUSE):
"""
Base class for clauses
"""
NAME = "LIMIT"
ARGS = relations_sql.VALUE
DELIMITTER = " OFFSET "
def add(self, *args, total=None, offset=None):
"""
Add total and offset
"""
if len(args) == 1 and isinstance(args[0], dict) and total is None and offset is None:
total = args[0].get("total")
offset = args[0].get("offset")
else:
if len(args) > 2 - len(self.expressions):
raise relations_sql.SQLError(self, "cannot add when LIMIT set")
args = list(args)
if args and len(self.expressions) == 0 and total is None:
total = args.pop(0)
if args and offset is None:
offset = args.pop(0)
if total is not None and not isinstance(total, int):
raise relations_sql.SQLError(self, "LIMIT total must be int")
if offset is not None and not isinstance(offset, int):
raise relations_sql.SQLError(self, "LIMIT offset must be int")
if total is not None:
self.expressions.append(self.ARGS(total))
if offset is not None:
self.expressions.append(self.ARGS(offset))
return self.query or self
def generate(self, indent=0, count=0, pad=" ", **kwargs):
"""
Concats the values
"""
super().generate(**kwargs)
class SET(CLAUSE):
"""
relations_sql.CRITERIA for SET
"""
NAME = "SET"
KWARGS = relations_sql.ASSIGN
class VALUES(CLAUSE):
"""
relations_sql.CRITERIA for VALUES
"""
NAME = "VALUES"
ARGS = relations_sql.LIST
DELIMITTER = None
columns = None
def column(self, columns):
"""
Field the columns
"""
if self.columns:
return
if self.query:
if not self.query.COLUMNS:
self.query.column(columns)
self.columns = [expresion.name for expresion in self.query.COLUMNS.expressions]
else:
self.columns = columns
def add(self, *args, **kwargs):
"""
Add a row to VALUES
"""
if kwargs.get("COLUMNS"):
self.column(kwargs.pop("COLUMNS"))
if args and kwargs:
raise relations_sql.SQLError(self, "add list or dict but not both")
if len(args) == 1 and isinstance(args[0], dict):
kwargs = args[0]
args = []
if kwargs:
self.column(sorted(kwargs.keys()))
args = []
for column in self.columns:
if column not in kwargs:
raise relations_sql.SQLError(self, f"missing column {column} in {kwargs}")
args.append(kwargs[column])
if args:
if self.columns is not None and len(args) != len(self.columns):
raise relations_sql.SQLError(self, f"wrong values {args} for columns {self.columns}")
self.expressions.append(self.ARGS(args))
return self.query or self
def generate(self, indent=0, count=0, pad=" ", **kwargs):
"""
Concats the values
"""
sql = []
self.args = []
count += 1
current = pad * (count * indent)
next = current + (indent * pad)
line = "\n" if indent else ' '
left, right = (f"(\n{next}", f"\n{current})") if indent else ('(', ')')
delimitter = f"{right},{left}"
self.express(self.expressions, sql, indent=indent, count=count+1, pad=pad, **kwargs)
self.sql = f"{self.NAME}{line}{current}{left}{delimitter.join(sql)}{right}" | PypiClean |
/cacophonyapi-0.0.2.tar.gz/cacophonyapi-0.0.2/README.md | # Cacophony Project API Client for Python
Python client for the [Cacophony REST API](https://github.com/TheCacophonyProject/cacophony-api).
## Installation
This API client requires Python 3.6 or later. At present the library is not yet available on PyPI.
To install, create a virtualenv using your preferred method then:
```
git clone https://github.com/TheCacophonyProject/python-api.git
cd python-api
pip install .
```
If you plan on making changes to python-api, you'll want to install the test dependencies as well:
```
pip install -r .[test]
```
## Using the package (User API)
### Client setup
Open an instance of the client and use credentials directly
```python
from cacophonyapi.user import UserAPI
client = UserAPI(baseurl=<SERVER-URL>,
username=<USER-NAME>,
password=<USER-PASSWORD>)
```
Alternatively, using credentials stored in a configuration file:
```python
from cacophonyapi.user import UserAPI
from cacophonyapi.config import Config
config=Config().load_config(config_file=os.path.join(
os.getcwd(),'defaultconfig.json'))
cp_client = UserAPI(config.api_url,
username=config.admin_username ,
password=config.admin_password)
```
## API calls
By default the most recent 100 recordings accessible to the user
account are queried but `UserAPI.query()` does support a number of
filtering options. The API server supports arbitrary queries so feel
free to extend `UserAPI.query()` if required.
## Testing
#TODO: expand testing in both `test_client_user_without_server.py` and `test_client_user_with_server.py`
Testing uses the pythony unittest framework where by both unit and integration testing is done.
`test\test_client_user_without_server.py` is tests without requiring a server `nose2 --verbosity 2 cacophonyapi.test.test_client_user_without_server`
and `test\test_client_user_with_server` is full integration testing against a server. This is also part of the travis test `nose2 --verbosity 2 CacophonyClient.test.test_client_user_with_server`.
This integration testing does require a local server setup see [travis.yml](travis.yml)
For individual test `nose2 --verbosity 2 cacophonyapi.test.test_client_user_with_server.mockedCacophonyServer.test_query`
#TODO: Docs improve PEP257 compliance for cacophonyapi UserApi etc, don't know why it is not failing `tox -e pep257`
| PypiClean |
/python_pptx_fork-0.6.18-py3-none-any.whl/pptx/parts/image.py | from __future__ import absolute_import, division, print_function, unicode_literals
import hashlib
import os
try:
from PIL import Image as PIL_Image
except ImportError:
import Image as PIL_Image
from ..compat import BytesIO, is_string
from ..opc.package import Part
from ..opc.spec import image_content_types
from ..util import lazyproperty
class ImagePart(Part):
"""
An image part, generally having a partname matching the regex
``ppt/media/image[1-9][0-9]*.*``.
"""
def __init__(self, partname, content_type, blob, package, filename=None):
super(ImagePart, self).__init__(partname, content_type, blob, package)
self._filename = filename
@classmethod
def load(cls, partname, content_type, blob, package):
return cls(partname, content_type, blob, package)
@classmethod
def new(cls, package, image):
"""
Return a new |ImagePart| instance containing *image*, which is an
|Image| object.
"""
partname = package.next_image_partname(image.ext)
return cls(partname, image.content_type, image.blob, package, image.filename)
@property
def desc(self):
"""
The filename associated with this image, either the filename of
the original image or a generic name of the form ``image.ext``
where ``ext`` is appropriate to the image file format, e.g.
``'jpg'``. An image created using a path will have that filename; one
created with a file-like object will have a generic name.
"""
# return generic filename if original filename is unknown
if self._filename is None:
return "image.%s" % self.ext
return self._filename
@property
def ext(self):
"""
Return file extension for this image e.g. ``'png'``.
"""
return self.partname.ext
@property
def image(self):
"""
An |Image| object containing the image in this image part.
"""
return Image(self.blob, self.desc)
def scale(self, scaled_cx, scaled_cy):
"""
Return scaled image dimensions in EMU based on the combination of
parameters supplied. If *scaled_cx* and *scaled_cy* are both |None|,
the native image size is returned. If neither *scaled_cx* nor
*scaled_cy* is |None|, their values are returned unchanged. If
a value is provided for either *scaled_cx* or *scaled_cy* and the
other is |None|, the missing value is calculated such that the
image's aspect ratio is preserved.
"""
image_cx, image_cy = self._native_size
if scaled_cx is None and scaled_cy is None:
scaled_cx = image_cx
scaled_cy = image_cy
elif scaled_cx is None:
scaling_factor = float(scaled_cy) / float(image_cy)
scaled_cx = int(round(image_cx * scaling_factor))
elif scaled_cy is None:
scaling_factor = float(scaled_cx) / float(image_cx)
scaled_cy = int(round(image_cy * scaling_factor))
return scaled_cx, scaled_cy
@lazyproperty
def sha1(self):
"""
The SHA1 hash digest for the image binary of this image part, like:
``'1be010ea47803b00e140b852765cdf84f491da47'``.
"""
return hashlib.sha1(self._blob).hexdigest()
@property
def _dpi(self):
"""
A (horz_dpi, vert_dpi) 2-tuple (ints) representing the dots-per-inch
property of this image.
"""
image = Image.from_blob(self.blob)
return image.dpi
@property
def _native_size(self):
"""
A (width, height) 2-tuple representing the native dimensions of the
image in EMU, calculated based on the image DPI value, if present,
assuming 72 dpi as a default.
"""
EMU_PER_INCH = 914400
horz_dpi, vert_dpi = self._dpi
width_px, height_px = self._px_size
width = EMU_PER_INCH * width_px / horz_dpi
height = EMU_PER_INCH * height_px / vert_dpi
return width, height
@property
def _px_size(self):
"""
A (width, height) 2-tuple representing the dimensions of this image
in pixels.
"""
image = Image.from_blob(self.blob)
return image.size
class Image(object):
"""
Immutable value object representing an image such as a JPEG, PNG, or GIF.
"""
def __init__(self, blob, filename):
super(Image, self).__init__()
self._blob = blob
self._filename = filename
@classmethod
def from_blob(cls, blob, filename=None):
"""
Return a new |Image| object loaded from the image binary in *blob*.
"""
return cls(blob, filename)
@classmethod
def from_file(cls, image_file):
"""
Return a new |Image| object loaded from *image_file*, which can be
either a path (string) or a file-like object.
"""
if is_string(image_file):
# treat image_file as a path
with open(image_file, "rb") as f:
blob = f.read()
filename = os.path.basename(image_file)
else:
# assume image_file is a file-like object
# ---reposition file cursor if it has one---
if callable(getattr(image_file, "seek")):
image_file.seek(0)
blob = image_file.read()
filename = None
return cls.from_blob(blob, filename)
@property
def blob(self):
"""
The binary image bytestream of this image.
"""
return self._blob
@lazyproperty
def content_type(self):
"""
MIME-type of this image, e.g. ``'image/jpeg'``.
"""
return image_content_types[self.ext]
@lazyproperty
def dpi(self):
"""
A (horz_dpi, vert_dpi) 2-tuple specifying the dots-per-inch
resolution of this image. A default value of (72, 72) is used if the
dpi is not specified in the image file.
"""
def int_dpi(dpi):
"""
Return an integer dots-per-inch value corresponding to *dpi*. If
*dpi* is |None|, a non-numeric type, less than 1 or greater than
2048, 72 is returned.
"""
try:
int_dpi = int(round(float(dpi)))
if int_dpi < 1 or int_dpi > 2048:
int_dpi = 72
except (TypeError, ValueError):
int_dpi = 72
return int_dpi
def normalize_pil_dpi(pil_dpi):
"""
Return a (horz_dpi, vert_dpi) 2-tuple corresponding to *pil_dpi*,
the value for the 'dpi' key in the ``info`` dict of a PIL image.
If the 'dpi' key is not present or contains an invalid value,
``(72, 72)`` is returned.
"""
if isinstance(pil_dpi, tuple):
return (int_dpi(pil_dpi[0]), int_dpi(pil_dpi[1]))
return (72, 72)
return normalize_pil_dpi(self._pil_props[2])
@lazyproperty
def ext(self):
"""
Canonical file extension for this image e.g. ``'png'``. The returned
extension is all lowercase and is the canonical extension for the
content type of this image, regardless of what extension may have
been used in its filename, if any.
"""
ext_map = {
"BMP": "bmp",
"GIF": "gif",
"JPEG": "jpg",
"PNG": "png",
"TIFF": "tiff",
"WMF": "wmf",
}
format = self._format
if format not in ext_map:
tmpl = "unsupported image format, expected one of: %s, got '%s'"
raise ValueError(tmpl % (ext_map.keys(), format))
return ext_map[format]
@property
def filename(self):
"""
The filename from the path from which this image was loaded, if
loaded from the filesystem. |None| if no filename was used in
loading, such as when loaded from an in-memory stream.
"""
return self._filename
@lazyproperty
def sha1(self):
"""
SHA1 hash digest of the image blob
"""
return hashlib.sha1(self._blob).hexdigest()
@lazyproperty
def size(self):
"""
A (width, height) 2-tuple specifying the dimensions of this image in
pixels.
"""
return self._pil_props[1]
@property
def _format(self):
"""
The PIL Image format of this image, e.g. 'PNG'.
"""
return self._pil_props[0]
@lazyproperty
def _pil_props(self):
"""
A tuple containing useful image properties extracted from this image
using Pillow (Python Imaging Library, or 'PIL').
"""
stream = BytesIO(self._blob)
pil_image = PIL_Image.open(stream)
format = pil_image.format
width_px, height_px = pil_image.size
dpi = pil_image.info.get("dpi")
stream.close()
return (format, (width_px, height_px), dpi) | PypiClean |
/ais_dom-2023.7.2-py3-none-any.whl/homeassistant/helpers/schema_config_entry_flow.py | from __future__ import annotations
from abc import ABC, abstractmethod
from collections.abc import Callable, Coroutine, Mapping
import copy
from dataclasses import dataclass
import types
from typing import Any, cast
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.core import HomeAssistant, callback, split_entity_id
from homeassistant.data_entry_flow import FlowResult, UnknownHandler
from . import entity_registry as er, selector
from .typing import UNDEFINED, UndefinedType
class SchemaFlowError(Exception):
"""Validation failed."""
@dataclass
class SchemaFlowStep:
"""Define a config or options flow step."""
@dataclass(slots=True)
class SchemaFlowFormStep(SchemaFlowStep):
"""Define a config or options flow form step."""
schema: vol.Schema | Callable[
[SchemaCommonFlowHandler], Coroutine[Any, Any, vol.Schema | None]
] | None = None
"""Optional voluptuous schema, or function which returns a schema or None, for
requesting and validating user input.
- If a function is specified, the function will be passed the current
`SchemaCommonFlowHandler`.
- If schema validation fails, the step will be retried. If the schema is None, no
user input is requested.
"""
validate_user_input: Callable[
[SchemaCommonFlowHandler, dict[str, Any]], Coroutine[Any, Any, dict[str, Any]]
] | None = None
"""Optional function to validate user input.
- The `validate_user_input` function is called if the schema validates successfully.
- The first argument is a reference to the current `SchemaCommonFlowHandler`.
- The second argument is the user input from the current step.
- The `validate_user_input` should raise `SchemaFlowError` if user input is invalid.
"""
next_step: Callable[
[dict[str, Any]], Coroutine[Any, Any, str | None]
] | str | None = None
"""Optional property to identify next step.
- If `next_step` is a function, it is called if the schema validates successfully or
if no schema is defined. The `next_step` function is passed the union of
config entry options and user input from previous steps. If the function returns
None, the flow is ended with `FlowResultType.CREATE_ENTRY`.
- If `next_step` is None, the flow is ended with `FlowResultType.CREATE_ENTRY`.
"""
suggested_values: Callable[
[SchemaCommonFlowHandler], Coroutine[Any, Any, dict[str, Any]]
] | None | UndefinedType = UNDEFINED
"""Optional property to populate suggested values.
- If `suggested_values` is UNDEFINED, each key in the schema will get a suggested
value from an option with the same key.
Note: if a step is retried due to a validation failure, then the user input will
have priority over the suggested values.
"""
@dataclass(slots=True)
class SchemaFlowMenuStep(SchemaFlowStep):
"""Define a config or options flow menu step."""
# Menu options
options: list[str] | dict[str, str]
class SchemaCommonFlowHandler:
"""Handle a schema based config or options flow."""
def __init__(
self,
handler: SchemaConfigFlowHandler | SchemaOptionsFlowHandler,
flow: Mapping[str, SchemaFlowStep],
options: dict[str, Any] | None,
) -> None:
"""Initialize a common handler."""
self._flow = flow
self._handler = handler
self._options = options if options is not None else {}
self._flow_state: dict[str, Any] = {}
@property
def parent_handler(self) -> SchemaConfigFlowHandler | SchemaOptionsFlowHandler:
"""Return parent handler."""
return self._handler
@property
def options(self) -> dict[str, Any]:
"""Return the options linked to the current flow handler."""
return self._options
@property
def flow_state(self) -> dict[str, Any]:
"""Return the flow state, used to store temporary data.
It can be used for example to store the key or the index of a sub-item
that will be edited in the next step.
"""
return self._flow_state
async def async_step(
self, step_id: str, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle a step."""
if isinstance(self._flow[step_id], SchemaFlowFormStep):
return await self._async_form_step(step_id, user_input)
return await self._async_menu_step(step_id, user_input)
async def _get_schema(self, form_step: SchemaFlowFormStep) -> vol.Schema | None:
if form_step.schema is None:
return None
if isinstance(form_step.schema, vol.Schema):
return form_step.schema
return await form_step.schema(self)
async def _async_form_step(
self, step_id: str, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle a form step."""
form_step: SchemaFlowFormStep = cast(SchemaFlowFormStep, self._flow[step_id])
if (
user_input is not None
and (data_schema := await self._get_schema(form_step))
and data_schema.schema
and not self._handler.show_advanced_options
):
# Add advanced field default if not set
for key in data_schema.schema:
if isinstance(key, (vol.Optional, vol.Required)):
if (
key.description
and key.description.get("advanced")
and key.default is not vol.UNDEFINED
and key not in self._options
):
user_input[str(key.schema)] = key.default()
if user_input is not None and form_step.validate_user_input is not None:
# Do extra validation of user input
try:
user_input = await form_step.validate_user_input(self, user_input)
except SchemaFlowError as exc:
return await self._show_next_step(step_id, exc, user_input)
if user_input is not None:
# User input was validated successfully, update options
self._options.update(user_input)
if user_input is not None or form_step.schema is None:
return await self._show_next_step_or_create_entry(form_step)
return await self._show_next_step(step_id)
async def _show_next_step_or_create_entry(
self, form_step: SchemaFlowFormStep
) -> FlowResult:
next_step_id_or_end_flow: str | None
if callable(form_step.next_step):
next_step_id_or_end_flow = await form_step.next_step(self._options)
else:
next_step_id_or_end_flow = form_step.next_step
if next_step_id_or_end_flow is None:
# Flow done, create entry or update config entry options
return self._handler.async_create_entry(data=self._options)
return await self._show_next_step(next_step_id_or_end_flow)
async def _show_next_step(
self,
next_step_id: str,
error: SchemaFlowError | None = None,
user_input: dict[str, Any] | None = None,
) -> FlowResult:
"""Show form for next step."""
if isinstance(self._flow[next_step_id], SchemaFlowMenuStep):
menu_step = cast(SchemaFlowMenuStep, self._flow[next_step_id])
return self._handler.async_show_menu(
step_id=next_step_id,
menu_options=menu_step.options,
)
form_step = cast(SchemaFlowFormStep, self._flow[next_step_id])
if (data_schema := await self._get_schema(form_step)) is None:
return await self._show_next_step_or_create_entry(form_step)
suggested_values: dict[str, Any] = {}
if form_step.suggested_values is UNDEFINED:
suggested_values = self._options
elif form_step.suggested_values:
suggested_values = await form_step.suggested_values(self)
if user_input:
# We don't want to mutate the existing options
suggested_values = copy.deepcopy(suggested_values)
suggested_values.update(user_input)
if data_schema.schema:
# Make a copy of the schema with suggested values set to saved options
data_schema = self._handler.add_suggested_values_to_schema(
data_schema, suggested_values
)
errors = {"base": str(error)} if error else None
# Show form for next step
last_step = None
if not callable(form_step.next_step):
last_step = form_step.next_step is None
return self._handler.async_show_form(
step_id=next_step_id,
data_schema=data_schema,
errors=errors,
last_step=last_step,
)
async def _async_menu_step(
self, step_id: str, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle a menu step."""
menu_step: SchemaFlowMenuStep = cast(SchemaFlowMenuStep, self._flow[step_id])
return self._handler.async_show_menu(
step_id=step_id,
menu_options=menu_step.options,
)
class SchemaConfigFlowHandler(config_entries.ConfigFlow, ABC):
"""Handle a schema based config flow."""
config_flow: Mapping[str, SchemaFlowStep]
options_flow: Mapping[str, SchemaFlowStep] | None = None
VERSION = 1
def __init_subclass__(cls, **kwargs: Any) -> None:
"""Initialize a subclass."""
super().__init_subclass__(**kwargs)
@callback
def _async_get_options_flow(
config_entry: config_entries.ConfigEntry,
) -> config_entries.OptionsFlow:
"""Get the options flow for this handler."""
if cls.options_flow is None:
raise UnknownHandler
return SchemaOptionsFlowHandler(
config_entry, cls.options_flow, cls.async_options_flow_finished
)
# Create an async_get_options_flow method
cls.async_get_options_flow = _async_get_options_flow # type: ignore[method-assign]
# Create flow step methods for each step defined in the flow schema
for step in cls.config_flow:
setattr(cls, f"async_step_{step}", cls._async_step(step))
def __init__(self) -> None:
"""Initialize config flow."""
self._common_handler = SchemaCommonFlowHandler(self, self.config_flow, None)
@classmethod
@callback
def async_supports_options_flow(
cls, config_entry: config_entries.ConfigEntry
) -> bool:
"""Return options flow support for this handler."""
return cls.options_flow is not None
@staticmethod
def _async_step(step_id: str) -> Callable:
"""Generate a step handler."""
async def _async_step(
self: SchemaConfigFlowHandler, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle a config flow step."""
# pylint: disable-next=protected-access
result = await self._common_handler.async_step(step_id, user_input)
return result
return _async_step
@abstractmethod
@callback
def async_config_entry_title(self, options: Mapping[str, Any]) -> str:
"""Return config entry title.
The options parameter contains config entry options, which is the union of user
input from the config flow steps.
"""
@callback
def async_config_flow_finished(self, options: Mapping[str, Any]) -> None:
"""Take necessary actions after the config flow is finished, if needed.
The options parameter contains config entry options, which is the union of user
input from the config flow steps.
"""
@callback
@staticmethod
def async_options_flow_finished(
hass: HomeAssistant, options: Mapping[str, Any]
) -> None:
"""Take necessary actions after the options flow is finished, if needed.
The options parameter contains config entry options, which is the union of
stored options and user input from the options flow steps.
"""
@callback
def async_create_entry( # pylint: disable=arguments-differ
self,
data: Mapping[str, Any],
**kwargs: Any,
) -> FlowResult:
"""Finish config flow and create a config entry."""
self.async_config_flow_finished(data)
return super().async_create_entry(
data={}, options=data, title=self.async_config_entry_title(data), **kwargs
)
class SchemaOptionsFlowHandler(config_entries.OptionsFlowWithConfigEntry):
"""Handle a schema based options flow."""
def __init__(
self,
config_entry: config_entries.ConfigEntry,
options_flow: Mapping[str, SchemaFlowStep],
async_options_flow_finished: Callable[[HomeAssistant, Mapping[str, Any]], None]
| None = None,
) -> None:
"""Initialize options flow.
If needed, `async_options_flow_finished` can be set to take necessary actions
after the options flow is finished. The second parameter contains config entry
options, which is the union of stored options and user input from the options
flow steps.
"""
super().__init__(config_entry)
self._common_handler = SchemaCommonFlowHandler(
self, options_flow, self._options
)
self._async_options_flow_finished = async_options_flow_finished
for step in options_flow:
setattr(
self,
f"async_step_{step}",
types.MethodType(self._async_step(step), self),
)
@staticmethod
def _async_step(step_id: str) -> Callable:
"""Generate a step handler."""
async def _async_step(
self: SchemaConfigFlowHandler, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle an options flow step."""
# pylint: disable-next=protected-access
result = await self._common_handler.async_step(step_id, user_input)
return result
return _async_step
@callback
def async_create_entry( # pylint: disable=arguments-differ
self,
data: Mapping[str, Any],
**kwargs: Any,
) -> FlowResult:
"""Finish config flow and create a config entry."""
if self._async_options_flow_finished:
self._async_options_flow_finished(self.hass, data)
return super().async_create_entry(data=data, **kwargs)
@callback
def wrapped_entity_config_entry_title(
hass: HomeAssistant, entity_id_or_uuid: str
) -> str:
"""Generate title for a config entry wrapping a single entity.
If the entity is registered, use the registry entry's name.
If the entity is in the state machine, use the name from the state.
Otherwise, fall back to the object ID.
"""
registry = er.async_get(hass)
entity_id = er.async_validate_entity_id(registry, entity_id_or_uuid)
object_id = split_entity_id(entity_id)[1]
entry = registry.async_get(entity_id)
if entry:
return entry.name or entry.original_name or object_id
state = hass.states.get(entity_id)
if state:
return state.name or object_id
return object_id
@callback
def entity_selector_without_own_entities(
handler: SchemaOptionsFlowHandler,
entity_selector_config: selector.EntitySelectorConfig,
) -> vol.Schema:
"""Return an entity selector which excludes own entities."""
entity_registry = er.async_get(handler.hass)
entities = er.async_entries_for_config_entry(
entity_registry,
handler.config_entry.entry_id,
)
entity_ids = [ent.entity_id for ent in entities]
final_selector_config = entity_selector_config.copy()
final_selector_config["exclude_entities"] = entity_ids
return selector.EntitySelector(final_selector_config) | PypiClean |
/rabbitstew-0.1.0.tar.gz/rabbitstew-0.1.0/README.rst | rabbitstew
==========
A small command-line tool that adheres to the Unix philospohy for publishing
messages to RabbitMQ.
``rabbitstew`` takes input from ``stdin`` and publishes a message per line
received. You can customize the exchange and routing key used, along with
message properties. Additionally, you can enable publisher confirmations if
desired.
|Version| |Downloads| |Status| |License|
Installation
------------
rabbitstew is available from the `Python Package Index <https://pypi.python.org/pypi/rabbitstew>`_
and can be installed via ``pip`` or ``easy_install``.
Usage Example
-------------
.. code:: bash
cat /var/log/messages | rabbitstew -H rabbit-server -r syslog.messages
CLI Options
-----------
.. code::
usage: rabbitstew [-h] [-H HOST] [-p PORT] [-s] [-v VHOST] [-u USER]
[-P PASSWORD] [-W] [-e EXCHANGE] [-r ROUTING_KEY] [-c]
[--add-user] [--app-id APP_ID] [--auto-id]
[--content-type VALUE] [--type TYPE] [-V] [--version]
RabbitMQ message publisher
optional arguments:
-h, --help show this help message and exit
-H HOST Server hostname (default: localhost)
-p PORT Server port (default: 5672)
-s Use SSL to connect (default: False)
-v VHOST Server virtual host (default: /)
-u USER Server username (default: guest)
-P PASSWORD Server password (default: guest)
-W Prompt for password (default: False)
-f PATH Read password from a file (default: None)
-e EXCHANGE Exchange to publish to (default: None)
-r ROUTING_KEY Routing Key to use (default: None)
-c Confirm delivery of each message, exiting if a message
delivery could not be confirmed (default: False)
--add-user Include the user in the message properties (default: False)
--app-id APP_ID Specify the app-id property of the message (default: rabbitstew)
--auto-id Create a unique message ID for each message (default: False)
--content-type VALUE Specify the content type of the message (default: None)
--type TYPE Specify the message type (default: None)
-V Verbose output (default: False)
--version show program's version number and exit
Version History
---------------
- 0.1.0 - released *2015-02-02*
- Initial Release
.. |Version| image:: https://badge.fury.io/py/rabbitstew.svg?
:target: http://badge.fury.io/py/rabbitstew
.. |Status| image:: https://travis-ci.org/gmr/rabbitstew.svg?branch=master
:target: https://travis-ci.org/gmr/rabbitstew
.. |Downloads| image:: https://pypip.in/d/rabbitstew/badge.svg?
:target: https://pypi.python.org/pypi/rabbitstew
.. |License| image:: https://pypip.in/license/rabbitstew/badge.svg?
:target: https://rabbitstew.readthedocs.org
| PypiClean |
/pulumi_azure_native-2.5.1a1693590910.tar.gz/pulumi_azure_native-2.5.1a1693590910/pulumi_azure_native/purview/v20210701/private_endpoint_connection.py |
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['PrivateEndpointConnectionArgs', 'PrivateEndpointConnection']
@pulumi.input_type
class PrivateEndpointConnectionArgs:
def __init__(__self__, *,
account_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
private_endpoint: Optional[pulumi.Input['PrivateEndpointArgs']] = None,
private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
private_link_service_connection_state: Optional[pulumi.Input['PrivateLinkServiceConnectionStateArgs']] = None):
"""
The set of arguments for constructing a PrivateEndpointConnection resource.
:param pulumi.Input[str] account_name: The name of the account.
:param pulumi.Input[str] resource_group_name: The resource group name.
:param pulumi.Input['PrivateEndpointArgs'] private_endpoint: The private endpoint information.
:param pulumi.Input[str] private_endpoint_connection_name: Name of the private endpoint connection.
:param pulumi.Input['PrivateLinkServiceConnectionStateArgs'] private_link_service_connection_state: The private link service connection state.
"""
pulumi.set(__self__, "account_name", account_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if private_endpoint is not None:
pulumi.set(__self__, "private_endpoint", private_endpoint)
if private_endpoint_connection_name is not None:
pulumi.set(__self__, "private_endpoint_connection_name", private_endpoint_connection_name)
if private_link_service_connection_state is not None:
pulumi.set(__self__, "private_link_service_connection_state", private_link_service_connection_state)
@property
@pulumi.getter(name="accountName")
def account_name(self) -> pulumi.Input[str]:
"""
The name of the account.
"""
return pulumi.get(self, "account_name")
@account_name.setter
def account_name(self, value: pulumi.Input[str]):
pulumi.set(self, "account_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The resource group name.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> Optional[pulumi.Input['PrivateEndpointArgs']]:
"""
The private endpoint information.
"""
return pulumi.get(self, "private_endpoint")
@private_endpoint.setter
def private_endpoint(self, value: Optional[pulumi.Input['PrivateEndpointArgs']]):
pulumi.set(self, "private_endpoint", value)
@property
@pulumi.getter(name="privateEndpointConnectionName")
def private_endpoint_connection_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the private endpoint connection.
"""
return pulumi.get(self, "private_endpoint_connection_name")
@private_endpoint_connection_name.setter
def private_endpoint_connection_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_endpoint_connection_name", value)
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> Optional[pulumi.Input['PrivateLinkServiceConnectionStateArgs']]:
"""
The private link service connection state.
"""
return pulumi.get(self, "private_link_service_connection_state")
@private_link_service_connection_state.setter
def private_link_service_connection_state(self, value: Optional[pulumi.Input['PrivateLinkServiceConnectionStateArgs']]):
pulumi.set(self, "private_link_service_connection_state", value)
class PrivateEndpointConnection(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
private_endpoint: Optional[pulumi.Input[pulumi.InputType['PrivateEndpointArgs']]] = None,
private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
private_link_service_connection_state: Optional[pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionStateArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
A private endpoint connection class.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_name: The name of the account.
:param pulumi.Input[pulumi.InputType['PrivateEndpointArgs']] private_endpoint: The private endpoint information.
:param pulumi.Input[str] private_endpoint_connection_name: Name of the private endpoint connection.
:param pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionStateArgs']] private_link_service_connection_state: The private link service connection state.
:param pulumi.Input[str] resource_group_name: The resource group name.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: PrivateEndpointConnectionArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
A private endpoint connection class.
:param str resource_name: The name of the resource.
:param PrivateEndpointConnectionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(PrivateEndpointConnectionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
private_endpoint: Optional[pulumi.Input[pulumi.InputType['PrivateEndpointArgs']]] = None,
private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
private_link_service_connection_state: Optional[pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionStateArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = PrivateEndpointConnectionArgs.__new__(PrivateEndpointConnectionArgs)
if account_name is None and not opts.urn:
raise TypeError("Missing required property 'account_name'")
__props__.__dict__["account_name"] = account_name
__props__.__dict__["private_endpoint"] = private_endpoint
__props__.__dict__["private_endpoint_connection_name"] = private_endpoint_connection_name
__props__.__dict__["private_link_service_connection_state"] = private_link_service_connection_state
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-native:purview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:purview/v20201201preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:purview/v20211201:PrivateEndpointConnection")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(PrivateEndpointConnection, __self__).__init__(
'azure-native:purview/v20210701:PrivateEndpointConnection',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'PrivateEndpointConnection':
"""
Get an existing PrivateEndpointConnection resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = PrivateEndpointConnectionArgs.__new__(PrivateEndpointConnectionArgs)
__props__.__dict__["name"] = None
__props__.__dict__["private_endpoint"] = None
__props__.__dict__["private_link_service_connection_state"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["type"] = None
return PrivateEndpointConnection(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Gets or sets the name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> pulumi.Output[Optional['outputs.PrivateEndpointResponse']]:
"""
The private endpoint information.
"""
return pulumi.get(self, "private_endpoint")
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> pulumi.Output[Optional['outputs.PrivateLinkServiceConnectionStateResponse']]:
"""
The private link service connection state.
"""
return pulumi.get(self, "private_link_service_connection_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Gets or sets the type.
"""
return pulumi.get(self, "type") | PypiClean |
/flaightkit-0.4.0.tar.gz/flaightkit-0.4.0/flytekit/common/nodes.py | import abc as _abc
import logging as _logging
import os as _os
import six as _six
from flyteidl.core import literals_pb2 as _literals_pb2
from sortedcontainers import SortedDict as _SortedDict
from flytekit.clients.helpers import iterate_task_executions as _iterate_task_executions
from flytekit.common import component_nodes as _component_nodes
from flytekit.common import constants as _constants
from flytekit.common import promise as _promise
from flytekit.common import sdk_bases as _sdk_bases
from flytekit.common import utils as _common_utils
from flytekit.common.exceptions import scopes as _exception_scopes
from flytekit.common.exceptions import system as _system_exceptions
from flytekit.common.exceptions import user as _user_exceptions
from flytekit.common.mixins import artifact as _artifact_mixin
from flytekit.common.mixins import hash as _hash_mixin
from flytekit.common.tasks import executions as _task_executions
from flytekit.common.types import helpers as _type_helpers
from flytekit.common.utils import _dnsify
from flytekit.engines.flyte import engine as _flyte_engine
from flytekit.interfaces.data import data_proxy as _data_proxy
from flytekit.models import common as _common_models
from flytekit.models import literals as _literal_models
from flytekit.models import node_execution as _node_execution_models
from flytekit.models.core import execution as _execution_models
from flytekit.models.core import workflow as _workflow_model
class ParameterMapper(_SortedDict, metaclass=_common_models.FlyteABCMeta):
"""
This abstract class provides functionality to reference specific inputs and outputs for a task instance. This
allows for syntax such as:
my_task_instance.inputs.my_input
And is especially useful for linking tasks together via outputs -> inputs in workflow definitions:
my_second_task_instance(input=my_task_instances.outputs.my_output)
Attributes:
Dynamically discovered. Only the keys for inputs/outputs can be referenced.
Example:
.. code-block:: python
@inputs(a=Types.Integer)
@outputs(b=Types.String)
@python_task(version='1')
def my_task(wf_params, a, b):
pass
input_link = my_task.inputs.a # Success!
output_link = my_tasks.outputs.b # Success!
input_link = my_task.inputs.c # Attribute not found exception!
output_link = my_task.outputs.d # Attribute not found exception!
"""
def __init__(self, type_map, node):
"""
:param dict[Text, flytekit.models.interface.Variable] type_map:
:param SdkNode node:
"""
super(ParameterMapper, self).__init__()
for key, var in _six.iteritems(type_map):
self[key] = self._return_mapping_object(node, _type_helpers.get_sdk_type_from_literal_type(var.type), key)
self._initialized = True
def __getattr__(self, key):
if key == "iteritems" and hasattr(super(ParameterMapper, self), "items"):
return super(ParameterMapper, self).items
if hasattr(super(ParameterMapper, self), key):
return getattr(super(ParameterMapper, self), key)
if key not in self:
raise _user_exceptions.FlyteAssertion("{} doesn't exist.".format(key))
return self[key]
def __setattr__(self, key, value):
if "_initialized" in self.__dict__:
raise _user_exceptions.FlyteAssertion("Parameters are immutable.")
else:
super(ParameterMapper, self).__setattr__(key, value)
@_abc.abstractmethod
def _return_mapping_object(self, sdk_node, sdk_type, name):
"""
:param flytekit.common.nodes.Node sdk_node:
:param flytekit.common.types.FlyteSdkType sdk_type:
:param Text name:
"""
pass
class OutputParameterMapper(ParameterMapper):
"""
This subclass of ParameterMapper is used to represent outputs for a given node.
"""
def _return_mapping_object(self, sdk_node, sdk_type, name):
"""
:param flytekit.common.nodes.Node sdk_node:
:param flytekit.common.types.FlyteSdkType sdk_type:
:param Text name:
"""
return _promise.NodeOutput(sdk_node, sdk_type, name)
class SdkNode(_hash_mixin.HashOnReferenceMixin, _workflow_model.Node, metaclass=_sdk_bases.ExtendedSdkType):
def __init__(
self,
id,
upstream_nodes,
bindings,
metadata,
sdk_task=None,
sdk_workflow=None,
sdk_launch_plan=None,
sdk_branch=None,
parameter_mapping=True,
):
"""
:param Text id: A workflow-level unique identifier that identifies this node in the workflow. "inputs" and
"outputs" are reserved node ids that cannot be used by other nodes.
:param flytekit.models.core.workflow.NodeMetadata metadata: Extra metadata about the node.
:param list[flytekit.models.literals.Binding] bindings: Specifies how to bind the underlying
interface's inputs. All required inputs specified in the underlying interface must be fulfilled.
:param list[SdkNode] upstream_nodes: Specifies execution dependencies for this node ensuring it will
only get scheduled to run after all its upstream nodes have completed. This node will have
an implicit dependency on any node that appears in inputs field.
:param flytekit.common.tasks.task.SdkTask sdk_task: The task to execute in this
node.
:param flytekit.common.workflow.SdkWorkflow sdk_workflow: The workflow to execute in this node.
:param flytekit.common.launch_plan.SdkLaunchPlan sdk_launch_plan: The launch plan to execute in this
node.
:param TODO sdk_branch: TODO
"""
non_none_entities = [
entity for entity in [sdk_workflow, sdk_branch, sdk_launch_plan, sdk_task] if entity is not None
]
if len(non_none_entities) != 1:
raise _user_exceptions.FlyteAssertion(
"An SDK node must have one underlying entity specified at once. Received the following "
"entities: {}".format(non_none_entities)
)
workflow_node = None
if sdk_workflow is not None:
workflow_node = _component_nodes.SdkWorkflowNode(sdk_workflow=sdk_workflow)
elif sdk_launch_plan is not None:
workflow_node = _component_nodes.SdkWorkflowNode(sdk_launch_plan=sdk_launch_plan)
# TODO: this calls the constructor which means it will set all the upstream node ids to None if at the time of
# this instantiation, the upstream nodes have not had their nodes assigned yet.
super(SdkNode, self).__init__(
id=_dnsify(id) if id else None,
metadata=metadata,
inputs=bindings,
upstream_node_ids=[n.id for n in upstream_nodes],
output_aliases=[], # TODO: Are aliases a thing in SDK nodes
task_node=_component_nodes.SdkTaskNode(sdk_task) if sdk_task else None,
workflow_node=workflow_node,
branch_node=sdk_branch,
)
self._upstream = upstream_nodes
self._executable_sdk_object = sdk_task or sdk_workflow or sdk_launch_plan
if parameter_mapping:
if not sdk_branch:
self._outputs = OutputParameterMapper(self._executable_sdk_object.interface.outputs, self)
else:
self._outputs = None
@property
def executable_sdk_object(self):
return self._executable_sdk_object
@classmethod
def promote_from_model(cls, model, sub_workflows, tasks):
"""
:param flytekit.models.core.workflow.Node model:
:param dict[flytekit.models.core.identifier.Identifier, flytekit.models.core.workflow.WorkflowTemplate]
sub_workflows:
:param dict[flytekit.models.core.identifier.Identifier, flytekit.models.task.TaskTemplate] tasks: If specified,
these task templates will be passed to the SdkTaskNode promote_from_model call, and used
instead of fetching from Admin.
:rtype: SdkNode
"""
id = model.id
# This should never be called
if id == _constants.START_NODE_ID or id == _constants.END_NODE_ID:
_logging.warning("Should not call promote from model on a start node or end node {}".format(model))
return None
sdk_task_node, sdk_workflow_node = None, None
if model.task_node is not None:
sdk_task_node = _component_nodes.SdkTaskNode.promote_from_model(model.task_node, tasks)
elif model.workflow_node is not None:
sdk_workflow_node = _component_nodes.SdkWorkflowNode.promote_from_model(
model.workflow_node, sub_workflows, tasks
)
else:
raise _system_exceptions.FlyteSystemException("Bad Node model, neither task nor workflow detected")
# When WorkflowTemplate models (containing node models) are returned by Admin, they've been compiled with a
# start node. In order to make the promoted SdkWorkflow look the same, we strip the start-node text back out.
for i in model.inputs:
if i.binding.promise is not None and i.binding.promise.node_id == _constants.START_NODE_ID:
i.binding.promise._node_id = _constants.GLOBAL_INPUT_NODE_ID
if sdk_task_node is not None:
return cls(
id=id,
upstream_nodes=[], # set downstream, model doesn't contain this information
bindings=model.inputs,
metadata=model.metadata,
sdk_task=sdk_task_node.sdk_task,
)
elif sdk_workflow_node is not None:
if sdk_workflow_node.sdk_workflow is not None:
return cls(
id=id,
upstream_nodes=[], # set downstream, model doesn't contain this information
bindings=model.inputs,
metadata=model.metadata,
sdk_workflow=sdk_workflow_node.sdk_workflow,
)
elif sdk_workflow_node.sdk_launch_plan is not None:
return cls(
id=id,
upstream_nodes=[], # set downstream, model doesn't contain this information
bindings=model.inputs,
metadata=model.metadata,
sdk_launch_plan=sdk_workflow_node.sdk_launch_plan,
)
else:
raise _system_exceptions.FlyteSystemException(
"Bad SdkWorkflowNode model, both lp and workflow are None"
)
else:
raise _system_exceptions.FlyteSystemException("Bad SdkNode model, both task and workflow nodes are empty")
@property
def upstream_nodes(self):
"""
:rtype: list[SdkNode]
"""
return self._upstream
@property
def upstream_node_ids(self):
"""
:rtype: list[Text]
"""
return [n.id for n in sorted(self.upstream_nodes, key=lambda x: x.id)]
@property
def outputs(self):
"""
:rtype: dict[Text, flytekit.common.promise.NodeOutput]
"""
return self._outputs
def assign_id_and_return(self, id):
"""
:param Text id:
:rtype: None
"""
if self.id:
raise _user_exceptions.FlyteAssertion(
"Error assigning ID: {} because {} is already assigned. Has this node been assigned to another "
"workflow already?".format(id, self)
)
self._id = _dnsify(id) if id else None
self._metadata._name = id
return self
def with_overrides(self, *args, **kwargs):
# TODO: Implement overrides
raise NotImplementedError("Overrides are not supported in Flyte yet.")
@_exception_scopes.system_entry_point
def __lshift__(self, other):
"""
Add a node upstream of this node without necessarily mapping outputs -> inputs.
:param Node other: node to place upstream
"""
if hash(other) not in set(hash(n) for n in self.upstream_nodes):
self._upstream.append(other)
return other
@_exception_scopes.system_entry_point
def __rshift__(self, other):
"""
Add a node downstream of this node without necessarily mapping outputs -> inputs.
:param Node other: node to place downstream
"""
if hash(self) not in set(hash(n) for n in other.upstream_nodes):
other.upstream_nodes.append(self)
return other
def __repr__(self):
"""
:rtype: Text
"""
return "Node(ID: {} Executable: {})".format(self.id, self._executable_sdk_object)
class SdkNodeExecution(
_node_execution_models.NodeExecution, _artifact_mixin.ExecutionArtifact, metaclass=_sdk_bases.ExtendedSdkType
):
def __init__(self, *args, **kwargs):
super(SdkNodeExecution, self).__init__(*args, **kwargs)
self._task_executions = None
self._workflow_executions = None
self._inputs = None
self._outputs = None
@property
def task_executions(self):
"""
Returns the underlying task executions in order of try attempt.
:rtype: list[flytekit.common.tasks.executions.SdkTaskExecution]
"""
return self._task_executions or []
@property
def workflow_executions(self):
"""
Returns the underlying workflow executions in order of try attempt.
:rtype: list[flytekit.common.workflow_execution.SdkWorkflowExecution]
"""
return self._workflow_executions or []
@property
def executions(self):
"""
Returns a list of generic execution artifacts.
:rtype: list[flytekit.common.mixins.artifact.ExecutionArtifact]
"""
return self.task_executions or self.workflow_executions or []
@property
def inputs(self):
"""
Returns the inputs to the execution in the standard Python format as dictated by the type engine.
:rtype: dict[Text, T]
"""
if self._inputs is None:
client = _flyte_engine.get_client()
execution_data = client.get_node_execution_data(self.id)
# Inputs are returned inline unless they are too big, in which case a url blob pointing to them is returned.
if bool(execution_data.full_inputs.literals):
input_map = execution_data.full_inputs
elif execution_data.inputs.bytes > 0:
with _common_utils.AutoDeletingTempDir() as t:
tmp_name = _os.path.join(t.name, "inputs.pb")
_data_proxy.Data.get_data(execution_data.inputs.url, tmp_name)
input_map = _literal_models.LiteralMap.from_flyte_idl(
_common_utils.load_proto_from_file(_literals_pb2.LiteralMap, tmp_name)
)
else:
input_map = _literal_models.LiteralMap({})
self._inputs = _type_helpers.unpack_literal_map_to_sdk_python_std(input_map)
return self._inputs
@property
def outputs(self):
"""
Returns the outputs to the execution in the standard Python format as dictated by the type engine. If the
execution ended in error or the execution is in progress, an exception will be raised.
:rtype: dict[Text, T]
"""
if not self.is_complete:
raise _user_exceptions.FlyteAssertion(
"Please what until the node execution has completed before requesting the outputs."
)
if self.error:
raise _user_exceptions.FlyteAssertion("Outputs could not be found because the execution ended in failure.")
if self._outputs is None:
client = _flyte_engine.get_client()
execution_data = client.get_node_execution_data(self.id)
# Outputs are returned inline unless they are too big, in which case a url blob pointing to them is returned.
if bool(execution_data.full_outputs.literals):
output_map = execution_data.full_outputs
elif execution_data.outputs.bytes > 0:
with _common_utils.AutoDeletingTempDir() as t:
tmp_name = _os.path.join(t.name, "outputs.pb")
_data_proxy.Data.get_data(execution_data.outputs.url, tmp_name)
output_map = _literal_models.LiteralMap.from_flyte_idl(
_common_utils.load_proto_from_file(_literals_pb2.LiteralMap, tmp_name)
)
else:
output_map = _literal_models.LiteralMap({})
self._outputs = _type_helpers.unpack_literal_map_to_sdk_python_std(output_map)
return self._outputs
@property
def error(self):
"""
If execution is in progress, raise an exception. Otherwise, return None if no error was present upon
reaching completion.
:rtype: flytekit.models.core.execution.ExecutionError or None
"""
if not self.is_complete:
raise _user_exceptions.FlyteAssertion(
"Please wait until the node execution has completed before requesting error information."
)
return self.closure.error
@property
def is_complete(self):
"""
Dictates whether or not the execution is complete.
:rtype: bool
"""
return self.closure.phase in {
_execution_models.NodeExecutionPhase.ABORTED,
_execution_models.NodeExecutionPhase.FAILED,
_execution_models.NodeExecutionPhase.SKIPPED,
_execution_models.NodeExecutionPhase.SUCCEEDED,
_execution_models.NodeExecutionPhase.TIMED_OUT,
}
@classmethod
def promote_from_model(cls, base_model):
"""
:param _node_execution_models.NodeExecution base_model:
:rtype: SdkNodeExecution
"""
return cls(
closure=base_model.closure, id=base_model.id, input_uri=base_model.input_uri, metadata=base_model.metadata
)
def sync(self):
"""
Syncs the state of this object with that held by the platform.
:rtype: None
"""
if not self.is_complete or self.task_executions is not None:
client = _flyte_engine.get_client()
self._closure = client.get_node_execution(self.id).closure
task_executions = list(_iterate_task_executions(client, self.id))
self._task_executions = [_task_executions.SdkTaskExecution.promote_from_model(te) for te in task_executions]
# TODO: Sub-workflows too once implemented
def _sync_closure(self):
"""
Syncs the closure of the underlying execution artifact with the state observed by the platform.
:rtype: None
"""
client = _flyte_engine.get_client()
self._closure = client.get_node_execution(self.id).closure | PypiClean |
/ivy-testing-release-0.0.0.1.tar.gz/ivy-testing-release-0.0.0.1/ivy/utils/inspection.py | from typing import get_type_hints
# local
import ivy
def _is_optional(typ):
# noinspection PyBroadException
try:
rep = typ.__repr__().split(".")[1]
if rep.startswith("Optional") or (
rep.startswith("Union") and type(None) in typ.__args__
):
return True
except BaseException as error:
print("Exception occured: {}".format(error))
return False
def _is_union(typ):
# noinspection PyBroadException
try:
rep = typ.__repr__().split(".")[1]
if rep.startswith("Union"):
return True
except BaseException as error:
print("Exception occured: {}".format(error))
return False
def _is_dict(typ):
# noinspection PyBroadException
try:
rep = typ.__repr__().split(".")[1]
if rep.startswith("Dict"):
return True
except BaseException as error:
print("Exception occured: {}".format(error))
return False
def _is_iterable(typ):
# noinspection PyBroadException
try:
rep = typ.__repr__().split(".")[1]
if rep.startswith("List") or rep.startswith("Tuple"):
return True
except BaseException as error:
print("Exception occured: {}".format(error))
return False
def _correct_index(is_opt, is_dict, is_iter):
if is_opt:
return ["optional"]
elif is_dict:
return [str]
elif is_iter:
return [int]
return []
def _get_array_idxs(typ, idx_so_far=None):
idx_so_far = ivy.default(idx_so_far, list())
these_idxs = list()
if not hasattr(typ, "__args__"):
return these_idxs
is_opt = _is_optional(typ)
is_union = _is_union(typ)
is_dict = _is_dict(typ)
is_iter = _is_iterable(typ)
for a in typ.__args__:
a_repr = repr(a)
if (
"[" not in a_repr
and "]" not in a_repr
and "ivy." in a_repr
and (".Array" in a_repr or ".NativeArray" in a_repr)
):
these_idxs.append(idx_so_far + _correct_index(is_opt, is_dict, is_iter))
if is_union:
break
else:
these_idxs += _get_array_idxs(
a, idx_so_far + _correct_index(is_opt, is_dict, is_iter)
)
return these_idxs
def fn_array_spec(fn):
"""
Return a specification of the function, indicating all arguments which include
arrays, and the indexes of these.
Parameters
----------
fn
function to inspect
Returns
-------
ret
specification
"""
try: # this is because it raises error if python version 3.8.0, in certain cases
type_hints = get_type_hints(fn)
except Exception:
type_hints = dict()
array_idxs = list()
for i, (k, v) in enumerate(type_hints.items()):
a_idxs = _get_array_idxs(v)
if not a_idxs:
continue
a_idxs = [[(i, k)] + a for a in a_idxs]
array_idxs += a_idxs
return array_idxs
def add_array_specs():
for k, v in ivy.__dict__.items():
if callable(v) and k[0].islower():
v.array_spec = fn_array_spec(v) | PypiClean |
/ams_dott_runtime-1.1.0-py3-none-win_amd64.whl/ams_dott_runtime-1.1.0.data/data/dott_data/apps/python27/python-2.7.13/Lib/bdb.py |
import fnmatch
import sys
import os
import types
__all__ = ["BdbQuit","Bdb","Breakpoint"]
class BdbQuit(Exception):
"""Exception to give up completely"""
class Bdb:
"""Generic Python debugger base class.
This class takes care of details of the trace facility;
a derived class should implement user interaction.
The standard debugger class (pdb.Pdb) is an example.
"""
def __init__(self, skip=None):
self.skip = set(skip) if skip else None
self.breaks = {}
self.fncache = {}
self.frame_returning = None
def canonic(self, filename):
if filename == "<" + filename[1:-1] + ">":
return filename
canonic = self.fncache.get(filename)
if not canonic:
canonic = os.path.abspath(filename)
canonic = os.path.normcase(canonic)
self.fncache[filename] = canonic
return canonic
def reset(self):
import linecache
linecache.checkcache()
self.botframe = None
self._set_stopinfo(None, None)
def trace_dispatch(self, frame, event, arg):
if self.quitting:
return # None
if event == 'line':
return self.dispatch_line(frame)
if event == 'call':
return self.dispatch_call(frame, arg)
if event == 'return':
return self.dispatch_return(frame, arg)
if event == 'exception':
return self.dispatch_exception(frame, arg)
if event == 'c_call':
return self.trace_dispatch
if event == 'c_exception':
return self.trace_dispatch
if event == 'c_return':
return self.trace_dispatch
print 'bdb.Bdb.dispatch: unknown debugging event:', repr(event)
return self.trace_dispatch
def dispatch_line(self, frame):
if self.stop_here(frame) or self.break_here(frame):
self.user_line(frame)
if self.quitting: raise BdbQuit
return self.trace_dispatch
def dispatch_call(self, frame, arg):
# XXX 'arg' is no longer used
if self.botframe is None:
# First call of dispatch since reset()
self.botframe = frame.f_back # (CT) Note that this may also be None!
return self.trace_dispatch
if not (self.stop_here(frame) or self.break_anywhere(frame)):
# No need to trace this function
return # None
self.user_call(frame, arg)
if self.quitting: raise BdbQuit
return self.trace_dispatch
def dispatch_return(self, frame, arg):
if self.stop_here(frame) or frame == self.returnframe:
try:
self.frame_returning = frame
self.user_return(frame, arg)
finally:
self.frame_returning = None
if self.quitting: raise BdbQuit
return self.trace_dispatch
def dispatch_exception(self, frame, arg):
if self.stop_here(frame):
self.user_exception(frame, arg)
if self.quitting: raise BdbQuit
return self.trace_dispatch
# Normally derived classes don't override the following
# methods, but they may if they want to redefine the
# definition of stopping and breakpoints.
def is_skipped_module(self, module_name):
for pattern in self.skip:
if fnmatch.fnmatch(module_name, pattern):
return True
return False
def stop_here(self, frame):
# (CT) stopframe may now also be None, see dispatch_call.
# (CT) the former test for None is therefore removed from here.
if self.skip and \
self.is_skipped_module(frame.f_globals.get('__name__')):
return False
if frame is self.stopframe:
if self.stoplineno == -1:
return False
return frame.f_lineno >= self.stoplineno
while frame is not None and frame is not self.stopframe:
if frame is self.botframe:
return True
frame = frame.f_back
return False
def break_here(self, frame):
filename = self.canonic(frame.f_code.co_filename)
if not filename in self.breaks:
return False
lineno = frame.f_lineno
if not lineno in self.breaks[filename]:
# The line itself has no breakpoint, but maybe the line is the
# first line of a function with breakpoint set by function name.
lineno = frame.f_code.co_firstlineno
if not lineno in self.breaks[filename]:
return False
# flag says ok to delete temp. bp
(bp, flag) = effective(filename, lineno, frame)
if bp:
self.currentbp = bp.number
if (flag and bp.temporary):
self.do_clear(str(bp.number))
return True
else:
return False
def do_clear(self, arg):
raise NotImplementedError, "subclass of bdb must implement do_clear()"
def break_anywhere(self, frame):
return self.canonic(frame.f_code.co_filename) in self.breaks
# Derived classes should override the user_* methods
# to gain control.
def user_call(self, frame, argument_list):
"""This method is called when there is the remote possibility
that we ever need to stop in this function."""
pass
def user_line(self, frame):
"""This method is called when we stop or break at this line."""
pass
def user_return(self, frame, return_value):
"""This method is called when a return trap is set here."""
pass
def user_exception(self, frame, exc_info):
exc_type, exc_value, exc_traceback = exc_info
"""This method is called if an exception occurs,
but only if we are to stop at or just below this level."""
pass
def _set_stopinfo(self, stopframe, returnframe, stoplineno=0):
self.stopframe = stopframe
self.returnframe = returnframe
self.quitting = 0
# stoplineno >= 0 means: stop at line >= the stoplineno
# stoplineno -1 means: don't stop at all
self.stoplineno = stoplineno
# Derived classes and clients can call the following methods
# to affect the stepping state.
def set_until(self, frame): #the name "until" is borrowed from gdb
"""Stop when the line with the line no greater than the current one is
reached or when returning from current frame"""
self._set_stopinfo(frame, frame, frame.f_lineno+1)
def set_step(self):
"""Stop after one line of code."""
# Issue #13183: pdb skips frames after hitting a breakpoint and running
# step commands.
# Restore the trace function in the caller (that may not have been set
# for performance reasons) when returning from the current frame.
if self.frame_returning:
caller_frame = self.frame_returning.f_back
if caller_frame and not caller_frame.f_trace:
caller_frame.f_trace = self.trace_dispatch
self._set_stopinfo(None, None)
def set_next(self, frame):
"""Stop on the next line in or below the given frame."""
self._set_stopinfo(frame, None)
def set_return(self, frame):
"""Stop when returning from the given frame."""
self._set_stopinfo(frame.f_back, frame)
def set_trace(self, frame=None):
"""Start debugging from `frame`.
If frame is not specified, debugging starts from caller's frame.
"""
if frame is None:
frame = sys._getframe().f_back
self.reset()
while frame:
frame.f_trace = self.trace_dispatch
self.botframe = frame
frame = frame.f_back
self.set_step()
sys.settrace(self.trace_dispatch)
def set_continue(self):
# Don't stop except at breakpoints or when finished
self._set_stopinfo(self.botframe, None, -1)
if not self.breaks:
# no breakpoints; run without debugger overhead
sys.settrace(None)
frame = sys._getframe().f_back
while frame and frame is not self.botframe:
del frame.f_trace
frame = frame.f_back
def set_quit(self):
self.stopframe = self.botframe
self.returnframe = None
self.quitting = 1
sys.settrace(None)
# Derived classes and clients can call the following methods
# to manipulate breakpoints. These methods return an
# error message is something went wrong, None if all is well.
# Set_break prints out the breakpoint line and file:lineno.
# Call self.get_*break*() to see the breakpoints or better
# for bp in Breakpoint.bpbynumber: if bp: bp.bpprint().
def set_break(self, filename, lineno, temporary=0, cond = None,
funcname=None):
filename = self.canonic(filename)
import linecache # Import as late as possible
line = linecache.getline(filename, lineno)
if not line:
return 'Line %s:%d does not exist' % (filename,
lineno)
if not filename in self.breaks:
self.breaks[filename] = []
list = self.breaks[filename]
if not lineno in list:
list.append(lineno)
bp = Breakpoint(filename, lineno, temporary, cond, funcname)
def _prune_breaks(self, filename, lineno):
if (filename, lineno) not in Breakpoint.bplist:
self.breaks[filename].remove(lineno)
if not self.breaks[filename]:
del self.breaks[filename]
def clear_break(self, filename, lineno):
filename = self.canonic(filename)
if not filename in self.breaks:
return 'There are no breakpoints in %s' % filename
if lineno not in self.breaks[filename]:
return 'There is no breakpoint at %s:%d' % (filename,
lineno)
# If there's only one bp in the list for that file,line
# pair, then remove the breaks entry
for bp in Breakpoint.bplist[filename, lineno][:]:
bp.deleteMe()
self._prune_breaks(filename, lineno)
def clear_bpbynumber(self, arg):
try:
number = int(arg)
except:
return 'Non-numeric breakpoint number (%s)' % arg
try:
bp = Breakpoint.bpbynumber[number]
except IndexError:
return 'Breakpoint number (%d) out of range' % number
if not bp:
return 'Breakpoint (%d) already deleted' % number
bp.deleteMe()
self._prune_breaks(bp.file, bp.line)
def clear_all_file_breaks(self, filename):
filename = self.canonic(filename)
if not filename in self.breaks:
return 'There are no breakpoints in %s' % filename
for line in self.breaks[filename]:
blist = Breakpoint.bplist[filename, line]
for bp in blist:
bp.deleteMe()
del self.breaks[filename]
def clear_all_breaks(self):
if not self.breaks:
return 'There are no breakpoints'
for bp in Breakpoint.bpbynumber:
if bp:
bp.deleteMe()
self.breaks = {}
def get_break(self, filename, lineno):
filename = self.canonic(filename)
return filename in self.breaks and \
lineno in self.breaks[filename]
def get_breaks(self, filename, lineno):
filename = self.canonic(filename)
return filename in self.breaks and \
lineno in self.breaks[filename] and \
Breakpoint.bplist[filename, lineno] or []
def get_file_breaks(self, filename):
filename = self.canonic(filename)
if filename in self.breaks:
return self.breaks[filename]
else:
return []
def get_all_breaks(self):
return self.breaks
# Derived classes and clients can call the following method
# to get a data structure representing a stack trace.
def get_stack(self, f, t):
stack = []
if t and t.tb_frame is f:
t = t.tb_next
while f is not None:
stack.append((f, f.f_lineno))
if f is self.botframe:
break
f = f.f_back
stack.reverse()
i = max(0, len(stack) - 1)
while t is not None:
stack.append((t.tb_frame, t.tb_lineno))
t = t.tb_next
if f is None:
i = max(0, len(stack) - 1)
return stack, i
#
def format_stack_entry(self, frame_lineno, lprefix=': '):
import linecache, repr
frame, lineno = frame_lineno
filename = self.canonic(frame.f_code.co_filename)
s = '%s(%r)' % (filename, lineno)
if frame.f_code.co_name:
s = s + frame.f_code.co_name
else:
s = s + "<lambda>"
if '__args__' in frame.f_locals:
args = frame.f_locals['__args__']
else:
args = None
if args:
s = s + repr.repr(args)
else:
s = s + '()'
if '__return__' in frame.f_locals:
rv = frame.f_locals['__return__']
s = s + '->'
s = s + repr.repr(rv)
line = linecache.getline(filename, lineno, frame.f_globals)
if line: s = s + lprefix + line.strip()
return s
# The following two methods can be called by clients to use
# a debugger to debug a statement, given as a string.
def run(self, cmd, globals=None, locals=None):
if globals is None:
import __main__
globals = __main__.__dict__
if locals is None:
locals = globals
self.reset()
sys.settrace(self.trace_dispatch)
if not isinstance(cmd, types.CodeType):
cmd = cmd+'\n'
try:
exec cmd in globals, locals
except BdbQuit:
pass
finally:
self.quitting = 1
sys.settrace(None)
def runeval(self, expr, globals=None, locals=None):
if globals is None:
import __main__
globals = __main__.__dict__
if locals is None:
locals = globals
self.reset()
sys.settrace(self.trace_dispatch)
if not isinstance(expr, types.CodeType):
expr = expr+'\n'
try:
return eval(expr, globals, locals)
except BdbQuit:
pass
finally:
self.quitting = 1
sys.settrace(None)
def runctx(self, cmd, globals, locals):
# B/W compatibility
self.run(cmd, globals, locals)
# This method is more useful to debug a single function call.
def runcall(self, func, *args, **kwds):
self.reset()
sys.settrace(self.trace_dispatch)
res = None
try:
res = func(*args, **kwds)
except BdbQuit:
pass
finally:
self.quitting = 1
sys.settrace(None)
return res
def set_trace():
Bdb().set_trace()
class Breakpoint:
"""Breakpoint class
Implements temporary breakpoints, ignore counts, disabling and
(re)-enabling, and conditionals.
Breakpoints are indexed by number through bpbynumber and by
the file,line tuple using bplist. The former points to a
single instance of class Breakpoint. The latter points to a
list of such instances since there may be more than one
breakpoint per line.
"""
# XXX Keeping state in the class is a mistake -- this means
# you cannot have more than one active Bdb instance.
next = 1 # Next bp to be assigned
bplist = {} # indexed by (file, lineno) tuple
bpbynumber = [None] # Each entry is None or an instance of Bpt
# index 0 is unused, except for marking an
# effective break .... see effective()
def __init__(self, file, line, temporary=0, cond=None, funcname=None):
self.funcname = funcname
# Needed if funcname is not None.
self.func_first_executable_line = None
self.file = file # This better be in canonical form!
self.line = line
self.temporary = temporary
self.cond = cond
self.enabled = 1
self.ignore = 0
self.hits = 0
self.number = Breakpoint.next
Breakpoint.next = Breakpoint.next + 1
# Build the two lists
self.bpbynumber.append(self)
if (file, line) in self.bplist:
self.bplist[file, line].append(self)
else:
self.bplist[file, line] = [self]
def deleteMe(self):
index = (self.file, self.line)
self.bpbynumber[self.number] = None # No longer in list
self.bplist[index].remove(self)
if not self.bplist[index]:
# No more bp for this f:l combo
del self.bplist[index]
def enable(self):
self.enabled = 1
def disable(self):
self.enabled = 0
def bpprint(self, out=None):
if out is None:
out = sys.stdout
if self.temporary:
disp = 'del '
else:
disp = 'keep '
if self.enabled:
disp = disp + 'yes '
else:
disp = disp + 'no '
print >>out, '%-4dbreakpoint %s at %s:%d' % (self.number, disp,
self.file, self.line)
if self.cond:
print >>out, '\tstop only if %s' % (self.cond,)
if self.ignore:
print >>out, '\tignore next %d hits' % (self.ignore)
if (self.hits):
if (self.hits > 1): ss = 's'
else: ss = ''
print >>out, ('\tbreakpoint already hit %d time%s' %
(self.hits, ss))
# -----------end of Breakpoint class----------
def checkfuncname(b, frame):
"""Check whether we should break here because of `b.funcname`."""
if not b.funcname:
# Breakpoint was set via line number.
if b.line != frame.f_lineno:
# Breakpoint was set at a line with a def statement and the function
# defined is called: don't break.
return False
return True
# Breakpoint set via function name.
if frame.f_code.co_name != b.funcname:
# It's not a function call, but rather execution of def statement.
return False
# We are in the right frame.
if not b.func_first_executable_line:
# The function is entered for the 1st time.
b.func_first_executable_line = frame.f_lineno
if b.func_first_executable_line != frame.f_lineno:
# But we are not at the first line number: don't break.
return False
return True
# Determines if there is an effective (active) breakpoint at this
# line of code. Returns breakpoint number or 0 if none
def effective(file, line, frame):
"""Determine which breakpoint for this file:line is to be acted upon.
Called only if we know there is a bpt at this
location. Returns breakpoint that was triggered and a flag
that indicates if it is ok to delete a temporary bp.
"""
possibles = Breakpoint.bplist[file,line]
for i in range(0, len(possibles)):
b = possibles[i]
if b.enabled == 0:
continue
if not checkfuncname(b, frame):
continue
# Count every hit when bp is enabled
b.hits = b.hits + 1
if not b.cond:
# If unconditional, and ignoring,
# go on to next, else break
if b.ignore > 0:
b.ignore = b.ignore -1
continue
else:
# breakpoint and marker that's ok
# to delete if temporary
return (b,1)
else:
# Conditional bp.
# Ignore count applies only to those bpt hits where the
# condition evaluates to true.
try:
val = eval(b.cond, frame.f_globals,
frame.f_locals)
if val:
if b.ignore > 0:
b.ignore = b.ignore -1
# continue
else:
return (b,1)
# else:
# continue
except:
# if eval fails, most conservative
# thing is to stop on breakpoint
# regardless of ignore count.
# Don't delete temporary,
# as another hint to user.
return (b,0)
return (None, None)
# -------------------- testing --------------------
class Tdb(Bdb):
def user_call(self, frame, args):
name = frame.f_code.co_name
if not name: name = '???'
print '+++ call', name, args
def user_line(self, frame):
import linecache
name = frame.f_code.co_name
if not name: name = '???'
fn = self.canonic(frame.f_code.co_filename)
line = linecache.getline(fn, frame.f_lineno, frame.f_globals)
print '+++', fn, frame.f_lineno, name, ':', line.strip()
def user_return(self, frame, retval):
print '+++ return', retval
def user_exception(self, frame, exc_stuff):
print '+++ exception', exc_stuff
self.set_continue()
def foo(n):
print 'foo(', n, ')'
x = bar(n*10)
print 'bar returned', x
def bar(a):
print 'bar(', a, ')'
return a/2
def test():
t = Tdb()
t.run('import bdb; bdb.foo(10)')
# end | PypiClean |
/tf_agents-0.17.0rc1-py3-none-any.whl/tf_agents/networks/q_network.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gin
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tf_agents.networks import encoding_network
from tf_agents.networks import network
def validate_specs(action_spec, observation_spec):
"""Validates the spec contains a single action."""
del observation_spec # not currently validated
flat_action_spec = tf.nest.flatten(action_spec)
if len(flat_action_spec) > 1:
raise ValueError('Network only supports action_specs with a single action.')
if flat_action_spec[0].shape not in [(), (1,)]:
raise ValueError(
'Network only supports action_specs with shape in [(), (1,)])')
@gin.configurable
class QNetwork(network.Network):
"""Feed Forward network."""
def __init__(self,
input_tensor_spec,
action_spec,
preprocessing_layers=None,
preprocessing_combiner=None,
conv_layer_params=None,
fc_layer_params=(75, 40),
dropout_layer_params=None,
activation_fn=tf.keras.activations.relu,
kernel_initializer=None,
batch_squash=True,
dtype=tf.float32,
q_layer_activation_fn=None,
name='QNetwork'):
"""Creates an instance of `QNetwork`.
Args:
input_tensor_spec: A nest of `tensor_spec.TensorSpec` representing the
input observations.
action_spec: A nest of `tensor_spec.BoundedTensorSpec` representing the
actions.
preprocessing_layers: (Optional.) A nest of `tf.keras.layers.Layer`
representing preprocessing for the different observations.
All of these layers must not be already built. For more details see
the documentation of `networks.EncodingNetwork`.
preprocessing_combiner: (Optional.) A keras layer that takes a flat list
of tensors and combines them. Good options include
`tf.keras.layers.Add` and `tf.keras.layers.Concatenate(axis=-1)`.
This layer must not be already built. For more details see
the documentation of `networks.EncodingNetwork`.
conv_layer_params: Optional list of convolution layers parameters, where
each item is a length-three tuple indicating (filters, kernel_size,
stride).
fc_layer_params: Optional list of fully_connected parameters, where each
item is the number of units in the layer.
dropout_layer_params: Optional list of dropout layer parameters, where
each item is the fraction of input units to drop. The dropout layers are
interleaved with the fully connected layers; there is a dropout layer
after each fully connected layer, except if the entry in the list is
None. This list must have the same length of fc_layer_params, or be
None.
activation_fn: Activation function, e.g. tf.keras.activations.relu.
kernel_initializer: Initializer to use for the kernels of the conv and
dense layers. If none is provided a default variance_scaling_initializer
batch_squash: If True the outer_ranks of the observation are squashed into
the batch dimension. This allow encoding networks to be used with
observations with shape [BxTx...].
dtype: The dtype to use by the convolution and fully connected layers.
q_layer_activation_fn: Activation function for the Q layer.
name: A string representing the name of the network.
Raises:
ValueError: If `input_tensor_spec` contains more than one observation. Or
if `action_spec` contains more than one action.
"""
validate_specs(action_spec, input_tensor_spec)
action_spec = tf.nest.flatten(action_spec)[0]
num_actions = action_spec.maximum - action_spec.minimum + 1
encoder_input_tensor_spec = input_tensor_spec
encoder = encoding_network.EncodingNetwork(
encoder_input_tensor_spec,
preprocessing_layers=preprocessing_layers,
preprocessing_combiner=preprocessing_combiner,
conv_layer_params=conv_layer_params,
fc_layer_params=fc_layer_params,
dropout_layer_params=dropout_layer_params,
activation_fn=activation_fn,
kernel_initializer=kernel_initializer,
batch_squash=batch_squash,
dtype=dtype)
q_value_layer = tf.keras.layers.Dense(
num_actions,
activation=q_layer_activation_fn,
kernel_initializer=tf.random_uniform_initializer(
minval=-0.03, maxval=0.03),
bias_initializer=tf.constant_initializer(-0.2),
dtype=dtype)
super(QNetwork, self).__init__(
input_tensor_spec=input_tensor_spec,
state_spec=(),
name=name)
self._encoder = encoder
self._q_value_layer = q_value_layer
def call(self, observation, step_type=None, network_state=(), training=False):
"""Runs the given observation through the network.
Args:
observation: The observation to provide to the network.
step_type: The step type for the given observation. See `StepType` in
time_step.py.
network_state: A state tuple to pass to the network, mainly used by RNNs.
training: Whether the output is being used for training.
Returns:
A tuple `(logits, network_state)`.
"""
state, network_state = self._encoder(
observation, step_type=step_type, network_state=network_state,
training=training)
q_value = self._q_value_layer(state, training=training)
return q_value, network_state | PypiClean |
/libdw-4.3.0-py3-none-any.whl/eBot/Locator_EKF.py | import numpy as np
from math import pi
from time import *
class Locator_EKF:
def __init__(self, pos, heading, wheel_distance = 0.1):
self.l = wheel_distance
self.R = np.asmatrix( np.diag(np.array([1,1,1])) ) # The measurment covariance matrix
self.Q = np.asmatrix( 0.01*np.identity(5) ) # Process covariance matrix
self.H = np.matrix([[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1]])
self.P = np.asmatrix( np.identity(5) ) # Initial covariance matrix
self.x = np.matrix( [ [pos[0]] , [pos[1]] , [heading], [0.] , [0.] ])
return
def get_position(self):
return self.x[0,0] , self.x[1,0]
def get_heading(self):
return self.x[2,0]
def update_state(self,data,Ts):
"""
:param x: x is the latest position and heading of the robot.
It has to be in the form of a vector i.e. 5 by 1 matrix
:param P: the latest value of the covariance matrix
:param data: the measurement vector: 3 by 1 matrix which includes the rotational
velocity from Gyro, and right and left motor speeds from encoder
:param Ts: the sampling time
:return: it returns updated x which is a vector of updated
position and heading (x,y,theta) and the covariance matrix
"""
t1 = time()
z = np.matrix([ [data[0]] , [data[1]] , [data[2]] ])
x1 = np.array([[self.x[0] + Ts/2*(self.x[3]+self.x[4])*np.cos(self.x[2])],# Updates state
[self.x[1] + Ts/2*(self.x[3]+self.x[4])*np.sin(self.x[2])],
#[self.x[2] + Ts*data[0]],
[self.x[2] + Ts/self.l*(self.x[3]-self.x[4])],
[self.x[3]],
[self.x[4]]])
x1 = np.asmatrix(x1)
x1 = np.transpose(x1)
A = np.matrix([[1, 0, -Ts/2*(x1[3]+x1[4])*np.sin(x1[2]), Ts/2*np.cos(x1[2]), Ts/2*np.cos(x1[2])], # Jacoobian
[0, 1, Ts/2*(x1[3]+x1[4])*np.cos(x1[2]), Ts/2*np.sin(x1[2]), Ts/2*np.sin(x1[2])],
#[0, 0, 1, 0, 0],
[0, 0, 1, Ts/self.l, -Ts/self.l],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1]])
self.P = A*self.P*A.T+self.Q
z1 = np.array([[x1[2]],
[x1[3]],
[x1[4]]])
z1 = np.asmatrix(z1)
z1 = z1.T
P12 = self.P*self.H.T
R = np.linalg.cholesky(self.H*P12+self.R)
U = P12*np.linalg.inv(R)
self.x = x1 + U *( R.T.I*(z-z1) )
self.P = self.P-U*U.T
#if self.x[2,0]>pi:
# self.x[2,0]-=2*pi
#elif self.x[2,0]<-pi:
# self.x[2,0]+=2*pi
#print time()-t1,self.x[2,0]
return self.x[0,0] , self.x[1,0] , self.x[2,0] | PypiClean |
/rondsspark-0.0.4.23.tar.gz/rondsspark-0.0.4.23/ronds_sdk/tools/utils.py | import datetime
import json
from typing import Callable, List, Union
from ronds_sdk import error
class WrapperFunc(object):
def call(self, *args, **kwargs):
raise NotImplementedError
class ForeachBatchFunc(WrapperFunc):
def __init__(self,
func, # type: Callable
**kwargs
):
self._func = func
self._kwargs = kwargs
def call(self, *args, **kwargs):
new_kwargs = {**self._kwargs, **kwargs}
self._func(*args, **new_kwargs)
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super().__call__(*args, **kwargs)
return cls._instances[cls]
class RuleParser(object):
def __init__(self,
rule_path, # type: str
):
"""
从文件解析 phm 规则编辑器传入的规则配置信息, 用于从 Cassandra 根据指定规则读取测点数据;
包含: 规则 id 列表, 读取的 Cassandra 数据表类型, 测点 id 列表等信息.
:param rule_path:
"""
self._rule_path = rule_path
def load(self) -> list:
with open(self._rule_path, 'r', encoding='utf-8') as r:
config = r.read()
if config is None:
raise RuntimeError("config is None")
return json.loads(config.strip('\t\r\n'))
@staticmethod
def point_ids(rule: dict) -> List[str]:
"""
读取 rule 配置文件中的测点 id list
:param rule: 规则配置
:return: 测点 id list
"""
points = rule['points']
p_list = list()
if points:
for point in points:
p_list.append(point.point_id)
return p_list
@staticmethod
def datetime_format():
return '%Y-%m-%d %H:%M:%S'
class GraphParser(object):
def __init__(self,
file_path=None
):
"""
phm 规则编辑器 Cassandra, Kafka 等数据源配置信息解析
:param file_path: 配置文件地址
"""
self._file_path = file_path
self.__graph_dict = None
def load(self):
# type: () -> dict
with open(self._file_path, 'r', encoding='utf-8') as r:
config = r.read()
if config is None:
raise RuntimeError("config is None")
return json.loads(config.strip('\t\r\n'))
def get_graph(self):
# type: () -> dict
"""
lazy create
:return: graph dict
"""
if self.__graph_dict is None:
self.__graph_dict = self.load()
return self.__graph_dict
def _act_config(self, keys):
# type: (set) -> dict
"""
actConfig 节点中读取 Cassandra, Kafka 配置信息
:param keys: 需要读取的 keys
:return: 配置信息 dict
"""
graph = self.get_graph()
res_dict = dict()
if not graph.__contains__('acts'):
return res_dict
acts = graph.get('acts')
assert isinstance(acts, list)
for act in acts:
assert isinstance(act, dict)
if not act.__contains__('actConfig'):
continue
act_config = act.get('actConfig')
assert isinstance(act_config, dict)
for k, v in act_config.items():
if keys.__contains__(k):
res_dict[k] = v
return res_dict
def kafka_source_topics(self):
# type: () -> dict
"""
Kafka 相关的配置信息;
包括 告警, indices指标, graph json 等 topic信息.
:return:
"""
# noinspection SpellCheckingInspection
return self._act_config({'eventKafkaSource',
'indiceKafkaSource',
'graphKafkaSource',
'exceptionKafkaSource',
})
def kafka_config(self):
# type: () -> dict
"""
kafka 配置信息, 默认多个 topic 的 kafka 集群的配置信息相同
:return:
"""
kafka_dict = self.kafka_source_topics()
for source_name, kafka_config in kafka_dict.items():
assert isinstance(kafka_config, dict)
if kafka_config.__contains__('bootstraps') \
and kafka_config.__contains__('port'):
return kafka_config
def kafka_bootstraps(self):
# type: () -> str
kafka_config = self.kafka_config()
return '%s:%s' % (kafka_config['bootstraps'], kafka_config['port'])
def cassandra_sources(self):
# type: () -> dict
"""
Cassandra 数据源及表配置信息;
包括 振动表, 工艺表数据 .
:return: Cassandra 表及数据源配置信息
"""
return self._act_config({'processCassandraSource',
'vibCassandraSource',
})
# noinspection SpellCheckingInspection
def cassandra_process_table(self):
# type: () -> str
source_dict = self.cassandra_sources()
if source_dict.__contains__('processCassandraSource'):
process_dict = source_dict.get('processCassandraSource')
assert isinstance(process_dict, dict)
dt_names = process_dict.get('dtnames')
assert isinstance(dt_names, list)
return dt_names[0]
def cassandra_config(self):
# type: () -> dict
"""
Cassandra 数据源配置信息, 默认振动表, 工艺表等共用一个 Cassandra 集群;
:return: Cassandra 配置信息
"""
source_dict = self.cassandra_sources()
for source_name, source_config in source_dict.items():
assert isinstance(source_config, dict)
if source_config.__contains__('address') \
and source_config.__contains__('keyspace'):
return source_config
def cassandra_host(self):
# type: () -> list
config = self.cassandra_config()
if config.__contains__('address'):
address = config['address'] # type: str
return address.split(",")
def cassandra_keyspace(self):
# type: () -> str
config = self.cassandra_config()
if config.__contains__('keyspace'):
return config['keyspace']
def window_duration(self):
# type: () -> int
"""
返回 Cassandra 定期扫描的窗口长度
:return: 数据扫描的窗口长度, seconds
"""
duration_dict = self._act_config({'window_duration'})
if len(duration_dict) > 0:
return int(next(iter(duration_dict.values())))
return 300
def start_time(self):
# type: () -> str
collect_to_current = self._act_config({'collect_to_current'})
if len(collect_to_current) > 0:
offset = int(next(iter(collect_to_current.values())))
now_date = datetime.datetime.now()
delta = datetime.timedelta(minutes=offset)
start_date = now_date - delta
return start_date.strftime(RuleParser.datetime_format())
def to_bool(v):
# type: (Union[str, bool]) -> bool
if v is None:
return False
if type(v) == bool:
return v
if type(v) == str:
return v.lower() == 'true'
return False
def date_format_str(date):
# type: (datetime.datetime) -> str
"""
格式化日期为字符串: %Y-%m-%d %H:%M:%S
:param date: 日期
:return: 格式化的日期字符串
"""
if isinstance(date, datetime.datetime):
return date.strftime(RuleParser.datetime_format())
if isinstance(date, str):
return date
else:
raise TypeError('expected datetime, but found %s' % type(date))
def to_dict(value):
if isinstance(value, datetime.datetime):
return datetime.datetime.strftime(value, '%Y-%m-%d %H:%M:%S.%f')[0:-3]
else:
return {k: v for k, v in value.__dict__.items() if not str(k).startswith('_')} | PypiClean |
/dot_blaster-1.0.3.tar.gz/dot_blaster-1.0.3/gamelib/dot_blaster/sparks.py | import math
import random
import pygame
class Spark:
def __init__(self, loc, angle, speed, color, scale=1):
self.loc = loc
self.angle = angle
self.speed = speed
self.scale = scale
self.color = color
self.alive = True
def point_towards(self, angle, rate):
rotate_direction = (
(angle - self.angle + math.pi * 3) % (math.pi * 2)
) - math.pi
try:
rotate_sign = abs(rotate_direction) / rotate_direction
except ZeroDivisionError:
rotate_sing = 1
if abs(rotate_direction) < rate:
self.angle = angle
else:
self.angle += rate * rotate_sign
def calculate_movement(self, dt):
return [
math.cos(self.angle) * self.speed * dt,
math.sin(self.angle) * self.speed * dt,
]
# gravity and friction
def velocity_adjust(self, friction, force, terminal_velocity, dt):
movement = self.calculate_movement(dt)
movement[1] = min(terminal_velocity, movement[1] + force * dt)
movement[0] *= friction
self.angle = math.atan2(movement[1], movement[0])
# if you want to get more realistic, the speed should be adjusted here
def move(self, dt):
movement = self.calculate_movement(dt)
self.loc[0] += movement[0]
self.loc[1] += movement[1]
# a bunch of options to mess around with relating to angles...
# self.point_towards(math.pi / 2, 0.02)
# self.velocity_adjust(0.975, 0.2, 8, dt)
# self.angle += 0.1
self.speed -= 0.1
if self.speed <= 0:
self.alive = False
def draw(self, surf, offset=[0, 0]):
if self.alive:
points = [
[
self.loc[0]
+ math.cos(self.angle) * self.speed * self.scale,
self.loc[1]
+ math.sin(self.angle) * self.speed * self.scale,
],
[
self.loc[0]
+ math.cos(self.angle + math.pi / 2)
* self.speed
* self.scale
* 0.3,
self.loc[1]
+ math.sin(self.angle + math.pi / 2)
* self.speed
* self.scale
* 0.3,
],
[
self.loc[0]
- math.cos(self.angle) * self.speed * self.scale * 3.5,
self.loc[1]
- math.sin(self.angle) * self.speed * self.scale * 3.5,
],
[
self.loc[0]
+ math.cos(self.angle - math.pi / 2)
* self.speed
* self.scale
* 0.3,
self.loc[1]
- math.sin(self.angle + math.pi / 2)
* self.speed
* self.scale
* 0.3,
],
]
pygame.draw.polygon(surf, self.color, points)
class Sparks:
def __init__(
self,
screen,
color,
loc=[0, 0],
angle_range=(0, 360),
speed_range=(2, 4),
scale=1,
num_sparks=20,
loop=False,
):
self.num_sparks = num_sparks
self.screen = screen
self.loc = loc
self.angle_range = angle_range
self.speed_range = speed_range
self.scale = scale
self.color = color
self.sparks_list = []
self.alive = False
self.loop = False
self.generate_sparks()
def generate_sparks(self):
for _ in range(self.num_sparks):
self.sparks_list.append(self.generate_one_spark())
self.alive = True
def generate_one_spark(self):
return Spark(
loc=self.loc.copy(),
angle=math.radians(
random.randint(self.angle_range[0], self.angle_range[1])
),
speed=random.randint(self.speed_range[0], self.speed_range[1]),
color=self.color,
scale=self.scale,
)
def update(self):
if self.alive:
for i, spark in sorted(enumerate(self.sparks_list), reverse=True):
spark.move(1)
spark.draw(self.screen)
if not spark.alive:
self.sparks_list.pop(i)
if len(self.sparks_list) == 0:
self.alive = False | PypiClean |
/bip_utils-2.7.1-py3-none-any.whl/bip_utils/utils/misc/base32.py | # Imports
import base64
import binascii
from typing import Optional, Union
from bip_utils.utils.misc.algo import AlgoUtils
class Base32Const:
"""Class container for Base32 constants."""
# Alphabet
ALPHABET: str = "ABCDEFGHIJKLMNOPQRSTUVWXYZ234567"
# Padding character
PADDING_CHAR: str = "="
class _Base32Utils:
"""
Base32 utility class.
It provides some helper methods for decoding/encoding Base32 format.
"""
@staticmethod
def AddPadding(data: str) -> str:
"""
Add padding to an encoded Base32 string.
Used if the string was encoded with Base32Encoder.EncodeNoPadding
Args:
data (str): Data
Returns:
str: Padded string
"""
last_block_width = len(data) % 8
if last_block_width != 0:
data += (8 - last_block_width) * Base32Const.PADDING_CHAR
return data
@staticmethod
def TranslateAlphabet(data: str,
from_alphabet: str,
to_alphabet: str) -> str:
"""
Translate the standard Base32 alphabet to a custom one.
Args:
data (str) : Data
from_alphabet (str): Starting alphabet string
to_alphabet (str) : Final alphabet string
Returns:
str: String with translated alphabet
"""
return data.translate(str.maketrans(from_alphabet, to_alphabet))
class Base32Decoder:
"""
Base32 decoder class.
It provides methods for decoding to Base32 format.
"""
@staticmethod
def Decode(data: str,
custom_alphabet: Optional[str] = None) -> bytes:
"""
Decode from Base32.
Args:
data (str) : Data
custom_alphabet (str, optional): Custom alphabet string
Returns:
bytes: Decoded bytes
Raises:
ValueError: If the Base32 string is not valid
"""
try:
data_dec = _Base32Utils.AddPadding(data)
if custom_alphabet is not None:
data_dec = _Base32Utils.TranslateAlphabet(data_dec, custom_alphabet, Base32Const.ALPHABET)
return base64.b32decode(data_dec)
except binascii.Error as ex:
raise ValueError("Invalid Base32 string") from ex
class Base32Encoder:
"""
Base32 encoder class.
It provides methods for encoding to Base32 format.
"""
@staticmethod
def Encode(data: Union[bytes, str],
custom_alphabet: Optional[str] = None) -> str:
"""
Encode to Base32.
Args:
data (str or bytes) : Data
custom_alphabet (str, optional): Custom alphabet string
Returns:
str: Encoded string
"""
b32_enc = AlgoUtils.Decode(base64.b32encode(AlgoUtils.Encode(data)))
if custom_alphabet is not None:
b32_enc = _Base32Utils.TranslateAlphabet(b32_enc, Base32Const.ALPHABET, custom_alphabet)
return b32_enc
@staticmethod
def EncodeNoPadding(data: Union[bytes, str],
custom_alphabet: Optional[str] = None) -> str:
"""
Encode to Base32 by removing the final padding.
Args:
data (str or bytes) : Data
custom_alphabet (str, optional): Custom alphabet string
Returns:
str: Encoded string
"""
return Base32Encoder.Encode(data, custom_alphabet).rstrip(Base32Const.PADDING_CHAR) | PypiClean |
/gbptesthorizonui-0.9.0.tar.gz/horizon-2014.2.0.dev282.g7faf497/openstack_dashboard/openstack/common/log.py | import inspect
import itertools
import logging
import logging.config
import logging.handlers
import os
import re
import sys
import traceback
from oslo.config import cfg
import six
from six import moves
from openstack_dashboard.openstack.common.gettextutils import _
from openstack_dashboard.openstack.common import importutils
from openstack_dashboard.openstack.common import jsonutils
from openstack_dashboard.openstack.common import local
_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
_SANITIZE_KEYS = ['adminPass', 'admin_pass', 'password', 'admin_password']
# NOTE(ldbragst): Let's build a list of regex objects using the list of
# _SANITIZE_KEYS we already have. This way, we only have to add the new key
# to the list of _SANITIZE_KEYS and we can generate regular expressions
# for XML and JSON automatically.
_SANITIZE_PATTERNS = []
_FORMAT_PATTERNS = [r'(%(key)s\s*[=]\s*[\"\']).*?([\"\'])',
r'(<%(key)s>).*?(</%(key)s>)',
r'([\"\']%(key)s[\"\']\s*:\s*[\"\']).*?([\"\'])',
r'([\'"].*?%(key)s[\'"]\s*:\s*u?[\'"]).*?([\'"])']
for key in _SANITIZE_KEYS:
for pattern in _FORMAT_PATTERNS:
reg_ex = re.compile(pattern % {'key': key}, re.DOTALL)
_SANITIZE_PATTERNS.append(reg_ex)
common_cli_opts = [
cfg.BoolOpt('debug',
short='d',
default=False,
help='Print debugging output (set logging level to '
'DEBUG instead of default WARNING level).'),
cfg.BoolOpt('verbose',
short='v',
default=False,
help='Print more verbose output (set logging level to '
'INFO instead of default WARNING level).'),
]
logging_cli_opts = [
cfg.StrOpt('log-config-append',
metavar='PATH',
deprecated_name='log-config',
help='The name of logging configuration file. It does not '
'disable existing loggers, but just appends specified '
'logging configuration to any other existing logging '
'options. Please see the Python logging module '
'documentation for details on logging configuration '
'files.'),
cfg.StrOpt('log-format',
default=None,
metavar='FORMAT',
help='DEPRECATED. '
'A logging.Formatter log message format string which may '
'use any of the available logging.LogRecord attributes. '
'This option is deprecated. Please use '
'logging_context_format_string and '
'logging_default_format_string instead.'),
cfg.StrOpt('log-date-format',
default=_DEFAULT_LOG_DATE_FORMAT,
metavar='DATE_FORMAT',
help='Format string for %%(asctime)s in log records. '
'Default: %(default)s'),
cfg.StrOpt('log-file',
metavar='PATH',
deprecated_name='logfile',
help='(Optional) Name of log file to output to. '
'If no default is set, logging will go to stdout.'),
cfg.StrOpt('log-dir',
deprecated_name='logdir',
help='(Optional) The base directory used for relative '
'--log-file paths'),
cfg.BoolOpt('use-syslog',
default=False,
help='Use syslog for logging. '
'Existing syslog format is DEPRECATED during I, '
'and then will be changed in J to honor RFC5424'),
cfg.BoolOpt('use-syslog-rfc-format',
# TODO(bogdando) remove or use True after existing
# syslog format deprecation in J
default=False,
help='(Optional) Use syslog rfc5424 format for logging. '
'If enabled, will add APP-NAME (RFC5424) before the '
'MSG part of the syslog message. The old format '
'without APP-NAME is deprecated in I, '
'and will be removed in J.'),
cfg.StrOpt('syslog-log-facility',
default='LOG_USER',
help='Syslog facility to receive log lines')
]
generic_log_opts = [
cfg.BoolOpt('use_stderr',
default=True,
help='Log output to standard error')
]
log_opts = [
cfg.StrOpt('logging_context_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [%(request_id)s %(user_identity)s] '
'%(instance)s%(message)s',
help='Format string to use for log messages with context'),
cfg.StrOpt('logging_default_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [-] %(instance)s%(message)s',
help='Format string to use for log messages without context'),
cfg.StrOpt('logging_debug_format_suffix',
default='%(funcName)s %(pathname)s:%(lineno)d',
help='Data to append to log format when level is DEBUG'),
cfg.StrOpt('logging_exception_prefix',
default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s '
'%(instance)s',
help='Prefix each line of exception output with this format'),
cfg.ListOpt('default_log_levels',
default=[
'amqp=WARN',
'amqplib=WARN',
'boto=WARN',
'qpid=WARN',
'sqlalchemy=WARN',
'suds=INFO',
'oslo.messaging=INFO',
'iso8601=WARN',
'requests.packages.urllib3.connectionpool=WARN'
],
help='List of logger=LEVEL pairs'),
cfg.BoolOpt('publish_errors',
default=False,
help='Publish error events'),
cfg.BoolOpt('fatal_deprecations',
default=False,
help='Make deprecations fatal'),
# NOTE(mikal): there are two options here because sometimes we are handed
# a full instance (and could include more information), and other times we
# are just handed a UUID for the instance.
cfg.StrOpt('instance_format',
default='[instance: %(uuid)s] ',
help='If an instance is passed with the log message, format '
'it like this'),
cfg.StrOpt('instance_uuid_format',
default='[instance: %(uuid)s] ',
help='If an instance UUID is passed with the log message, '
'format it like this'),
]
CONF = cfg.CONF
CONF.register_cli_opts(common_cli_opts)
CONF.register_cli_opts(logging_cli_opts)
CONF.register_opts(generic_log_opts)
CONF.register_opts(log_opts)
# our new audit level
# NOTE(jkoelker) Since we synthesized an audit level, make the logging
# module aware of it so it acts like other levels.
logging.AUDIT = logging.INFO + 1
logging.addLevelName(logging.AUDIT, 'AUDIT')
try:
NullHandler = logging.NullHandler
except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7
class NullHandler(logging.Handler):
def handle(self, record):
pass
def emit(self, record):
pass
def createLock(self):
self.lock = None
def _dictify_context(context):
if context is None:
return None
if not isinstance(context, dict) and getattr(context, 'to_dict', None):
context = context.to_dict()
return context
def _get_binary_name():
return os.path.basename(inspect.stack()[-1][1])
def _get_log_file_path(binary=None):
logfile = CONF.log_file
logdir = CONF.log_dir
if logfile and not logdir:
return logfile
if logfile and logdir:
return os.path.join(logdir, logfile)
if logdir:
binary = binary or _get_binary_name()
return '%s.log' % (os.path.join(logdir, binary),)
return None
def mask_password(message, secret="***"):
"""Replace password with 'secret' in message.
:param message: The string which includes security information.
:param secret: value with which to replace passwords.
:returns: The unicode value of message with the password fields masked.
For example:
>>> mask_password("'adminPass' : 'aaaaa'")
"'adminPass' : '***'"
>>> mask_password("'admin_pass' : 'aaaaa'")
"'admin_pass' : '***'"
>>> mask_password('"password" : "aaaaa"')
'"password" : "***"'
>>> mask_password("'original_password' : 'aaaaa'")
"'original_password' : '***'"
>>> mask_password("u'original_password' : u'aaaaa'")
"u'original_password' : u'***'"
"""
message = six.text_type(message)
# NOTE(ldbragst): Check to see if anything in message contains any key
# specified in _SANITIZE_KEYS, if not then just return the message since
# we don't have to mask any passwords.
if not any(key in message for key in _SANITIZE_KEYS):
return message
secret = r'\g<1>' + secret + r'\g<2>'
for pattern in _SANITIZE_PATTERNS:
message = re.sub(pattern, secret, message)
return message
class BaseLoggerAdapter(logging.LoggerAdapter):
def audit(self, msg, *args, **kwargs):
self.log(logging.AUDIT, msg, *args, **kwargs)
class LazyAdapter(BaseLoggerAdapter):
def __init__(self, name='unknown', version='unknown'):
self._logger = None
self.extra = {}
self.name = name
self.version = version
@property
def logger(self):
if not self._logger:
self._logger = getLogger(self.name, self.version)
return self._logger
class ContextAdapter(BaseLoggerAdapter):
warn = logging.LoggerAdapter.warning
def __init__(self, logger, project_name, version_string):
self.logger = logger
self.project = project_name
self.version = version_string
self._deprecated_messages_sent = dict()
@property
def handlers(self):
return self.logger.handlers
def deprecated(self, msg, *args, **kwargs):
"""Call this method when a deprecated feature is used.
If the system is configured for fatal deprecations then the message
is logged at the 'critical' level and :class:`DeprecatedConfig` will
be raised.
Otherwise, the message will be logged (once) at the 'warn' level.
:raises: :class:`DeprecatedConfig` if the system is configured for
fatal deprecations.
"""
stdmsg = _("Deprecated: %s") % msg
if CONF.fatal_deprecations:
self.critical(stdmsg, *args, **kwargs)
raise DeprecatedConfig(msg=stdmsg)
# Using a list because a tuple with dict can't be stored in a set.
sent_args = self._deprecated_messages_sent.setdefault(msg, list())
if args in sent_args:
# Already logged this message, so don't log it again.
return
sent_args.append(args)
self.warn(stdmsg, *args, **kwargs)
def process(self, msg, kwargs):
# NOTE(mrodden): catch any Message/other object and
# coerce to unicode before they can get
# to the python logging and possibly
# cause string encoding trouble
if not isinstance(msg, six.string_types):
msg = six.text_type(msg)
if 'extra' not in kwargs:
kwargs['extra'] = {}
extra = kwargs['extra']
context = kwargs.pop('context', None)
if not context:
context = getattr(local.store, 'context', None)
if context:
extra.update(_dictify_context(context))
instance = kwargs.pop('instance', None)
instance_uuid = (extra.get('instance_uuid') or
kwargs.pop('instance_uuid', None))
instance_extra = ''
if instance:
instance_extra = CONF.instance_format % instance
elif instance_uuid:
instance_extra = (CONF.instance_uuid_format
% {'uuid': instance_uuid})
extra['instance'] = instance_extra
extra.setdefault('user_identity', kwargs.pop('user_identity', None))
extra['project'] = self.project
extra['version'] = self.version
extra['extra'] = extra.copy()
return msg, kwargs
class JSONFormatter(logging.Formatter):
def __init__(self, fmt=None, datefmt=None):
# NOTE(jkoelker) we ignore the fmt argument, but its still there
# since logging.config.fileConfig passes it.
self.datefmt = datefmt
def formatException(self, ei, strip_newlines=True):
lines = traceback.format_exception(*ei)
if strip_newlines:
lines = [moves.filter(
lambda x: x,
line.rstrip().splitlines()) for line in lines]
lines = list(itertools.chain(*lines))
return lines
def format(self, record):
message = {'message': record.getMessage(),
'asctime': self.formatTime(record, self.datefmt),
'name': record.name,
'msg': record.msg,
'args': record.args,
'levelname': record.levelname,
'levelno': record.levelno,
'pathname': record.pathname,
'filename': record.filename,
'module': record.module,
'lineno': record.lineno,
'funcname': record.funcName,
'created': record.created,
'msecs': record.msecs,
'relative_created': record.relativeCreated,
'thread': record.thread,
'thread_name': record.threadName,
'process_name': record.processName,
'process': record.process,
'traceback': None}
if hasattr(record, 'extra'):
message['extra'] = record.extra
if record.exc_info:
message['traceback'] = self.formatException(record.exc_info)
return jsonutils.dumps(message)
def _create_logging_excepthook(product_name):
def logging_excepthook(exc_type, value, tb):
extra = {}
if CONF.verbose or CONF.debug:
extra['exc_info'] = (exc_type, value, tb)
getLogger(product_name).critical(
"".join(traceback.format_exception_only(exc_type, value)),
**extra)
return logging_excepthook
class LogConfigError(Exception):
message = _('Error loading logging config %(log_config)s: %(err_msg)s')
def __init__(self, log_config, err_msg):
self.log_config = log_config
self.err_msg = err_msg
def __str__(self):
return self.message % dict(log_config=self.log_config,
err_msg=self.err_msg)
def _load_log_config(log_config_append):
try:
logging.config.fileConfig(log_config_append,
disable_existing_loggers=False)
except moves.configparser.Error as exc:
raise LogConfigError(log_config_append, str(exc))
def setup(product_name, version='unknown'):
"""Setup logging."""
if CONF.log_config_append:
_load_log_config(CONF.log_config_append)
else:
_setup_logging_from_conf(product_name, version)
sys.excepthook = _create_logging_excepthook(product_name)
def set_defaults(logging_context_format_string):
cfg.set_defaults(log_opts,
logging_context_format_string=
logging_context_format_string)
def _find_facility_from_conf():
facility_names = logging.handlers.SysLogHandler.facility_names
facility = getattr(logging.handlers.SysLogHandler,
CONF.syslog_log_facility,
None)
if facility is None and CONF.syslog_log_facility in facility_names:
facility = facility_names.get(CONF.syslog_log_facility)
if facility is None:
valid_facilities = facility_names.keys()
consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON',
'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS',
'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP',
'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3',
'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7']
valid_facilities.extend(consts)
raise TypeError(_('syslog facility must be one of: %s') %
', '.join("'%s'" % fac
for fac in valid_facilities))
return facility
class RFCSysLogHandler(logging.handlers.SysLogHandler):
def __init__(self, *args, **kwargs):
self.binary_name = _get_binary_name()
super(RFCSysLogHandler, self).__init__(*args, **kwargs)
def format(self, record):
msg = super(RFCSysLogHandler, self).format(record)
msg = self.binary_name + ' ' + msg
return msg
def _setup_logging_from_conf(project, version):
log_root = getLogger(None).logger
for handler in log_root.handlers:
log_root.removeHandler(handler)
if CONF.use_syslog:
facility = _find_facility_from_conf()
# TODO(bogdando) use the format provided by RFCSysLogHandler
# after existing syslog format deprecation in J
if CONF.use_syslog_rfc_format:
syslog = RFCSysLogHandler(address='/dev/log',
facility=facility)
else:
syslog = logging.handlers.SysLogHandler(address='/dev/log',
facility=facility)
log_root.addHandler(syslog)
logpath = _get_log_file_path()
if logpath:
filelog = logging.handlers.WatchedFileHandler(logpath)
log_root.addHandler(filelog)
if CONF.use_stderr:
streamlog = ColorHandler()
log_root.addHandler(streamlog)
elif not logpath:
# pass sys.stdout as a positional argument
# python2.6 calls the argument strm, in 2.7 it's stream
streamlog = logging.StreamHandler(sys.stdout)
log_root.addHandler(streamlog)
if CONF.publish_errors:
handler = importutils.import_object(
"openstack_dashboard.openstack.common.log_handler.PublishErrorsHandler",
logging.ERROR)
log_root.addHandler(handler)
datefmt = CONF.log_date_format
for handler in log_root.handlers:
# NOTE(alaski): CONF.log_format overrides everything currently. This
# should be deprecated in favor of context aware formatting.
if CONF.log_format:
handler.setFormatter(logging.Formatter(fmt=CONF.log_format,
datefmt=datefmt))
log_root.info('Deprecated: log_format is now deprecated and will '
'be removed in the next release')
else:
handler.setFormatter(ContextFormatter(project=project,
version=version,
datefmt=datefmt))
if CONF.debug:
log_root.setLevel(logging.DEBUG)
elif CONF.verbose:
log_root.setLevel(logging.INFO)
else:
log_root.setLevel(logging.WARNING)
for pair in CONF.default_log_levels:
mod, _sep, level_name = pair.partition('=')
level = logging.getLevelName(level_name)
logger = logging.getLogger(mod)
logger.setLevel(level)
_loggers = {}
def getLogger(name='unknown', version='unknown'):
if name not in _loggers:
_loggers[name] = ContextAdapter(logging.getLogger(name),
name,
version)
return _loggers[name]
def getLazyLogger(name='unknown', version='unknown'):
"""Returns lazy logger.
Creates a pass-through logger that does not create the real logger
until it is really needed and delegates all calls to the real logger
once it is created.
"""
return LazyAdapter(name, version)
class WritableLogger(object):
"""A thin wrapper that responds to `write` and logs."""
def __init__(self, logger, level=logging.INFO):
self.logger = logger
self.level = level
def write(self, msg):
self.logger.log(self.level, msg.rstrip())
class ContextFormatter(logging.Formatter):
"""A context.RequestContext aware formatter configured through flags.
The flags used to set format strings are: logging_context_format_string
and logging_default_format_string. You can also specify
logging_debug_format_suffix to append extra formatting if the log level is
debug.
For information about what variables are available for the formatter see:
http://docs.python.org/library/logging.html#formatter
If available, uses the context value stored in TLS - local.store.context
"""
def __init__(self, *args, **kwargs):
"""Initialize ContextFormatter instance
Takes additional keyword arguments which can be used in the message
format string.
:keyword project: project name
:type project: string
:keyword version: project version
:type version: string
"""
self.project = kwargs.pop('project', 'unknown')
self.version = kwargs.pop('version', 'unknown')
logging.Formatter.__init__(self, *args, **kwargs)
def format(self, record):
"""Uses contextstring if request_id is set, otherwise default."""
# store project info
record.project = self.project
record.version = self.version
# store request info
context = getattr(local.store, 'context', None)
if context:
d = _dictify_context(context)
for k, v in d.items():
setattr(record, k, v)
# NOTE(sdague): default the fancier formatting params
# to an empty string so we don't throw an exception if
# they get used
for key in ('instance', 'color', 'user_identity'):
if key not in record.__dict__:
record.__dict__[key] = ''
if record.__dict__.get('request_id'):
self._fmt = CONF.logging_context_format_string
else:
self._fmt = CONF.logging_default_format_string
if (record.levelno == logging.DEBUG and
CONF.logging_debug_format_suffix):
self._fmt += " " + CONF.logging_debug_format_suffix
# Cache this on the record, Logger will respect our formatted copy
if record.exc_info:
record.exc_text = self.formatException(record.exc_info, record)
return logging.Formatter.format(self, record)
def formatException(self, exc_info, record=None):
"""Format exception output with CONF.logging_exception_prefix."""
if not record:
return logging.Formatter.formatException(self, exc_info)
stringbuffer = moves.StringIO()
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
None, stringbuffer)
lines = stringbuffer.getvalue().split('\n')
stringbuffer.close()
if CONF.logging_exception_prefix.find('%(asctime)') != -1:
record.asctime = self.formatTime(record, self.datefmt)
formatted_lines = []
for line in lines:
pl = CONF.logging_exception_prefix % record.__dict__
fl = '%s%s' % (pl, line)
formatted_lines.append(fl)
return '\n'.join(formatted_lines)
class ColorHandler(logging.StreamHandler):
LEVEL_COLORS = {
logging.DEBUG: '\033[00;32m', # GREEN
logging.INFO: '\033[00;36m', # CYAN
logging.AUDIT: '\033[01;36m', # BOLD CYAN
logging.WARN: '\033[01;33m', # BOLD YELLOW
logging.ERROR: '\033[01;31m', # BOLD RED
logging.CRITICAL: '\033[01;31m', # BOLD RED
}
def format(self, record):
record.color = self.LEVEL_COLORS[record.levelno]
return logging.StreamHandler.format(self, record)
class DeprecatedConfig(Exception):
message = _("Fatal call to deprecated config: %(msg)s")
def __init__(self, msg):
super(Exception, self).__init__(self.message % dict(msg=msg)) | PypiClean |
/catana-2.0.0b3.tar.gz/catana-2.0.0b3/external/pybind11/docs/advanced/cast/custom.rst | Custom type casters
===================
In very rare cases, applications may require custom type casters that cannot be
expressed using the abstractions provided by pybind11, thus requiring raw
Python C API calls. This is fairly advanced usage and should only be pursued by
experts who are familiar with the intricacies of Python reference counting.
The following snippets demonstrate how this works for a very simple ``inty``
type that that should be convertible from Python types that provide a
``__int__(self)`` method.
.. code-block:: cpp
struct inty { long long_value; };
void print(inty s) {
std::cout << s.long_value << std::endl;
}
The following Python snippet demonstrates the intended usage from the Python side:
.. code-block:: python
class A:
def __int__(self):
return 123
from example import print
print(A())
To register the necessary conversion routines, it is necessary to add
a partial overload to the ``pybind11::detail::type_caster<T>`` template.
Although this is an implementation detail, adding partial overloads to this
type is explicitly allowed.
.. code-block:: cpp
namespace pybind11 { namespace detail {
template <> struct type_caster<inty> {
public:
/**
* This macro establishes the name 'inty' in
* function signatures and declares a local variable
* 'value' of type inty
*/
PYBIND11_TYPE_CASTER(inty, _("inty"));
/**
* Conversion part 1 (Python->C++): convert a PyObject into a inty
* instance or return false upon failure. The second argument
* indicates whether implicit conversions should be applied.
*/
bool load(handle src, bool) {
/* Extract PyObject from handle */
PyObject *source = src.ptr();
/* Try converting into a Python integer value */
PyObject *tmp = PyNumber_Long(source);
if (!tmp)
return false;
/* Now try to convert into a C++ int */
value.long_value = PyLong_AsLong(tmp);
Py_DECREF(tmp);
/* Ensure return code was OK (to avoid out-of-range errors etc) */
return !(value.long_value == -1 && !PyErr_Occurred());
}
/**
* Conversion part 2 (C++ -> Python): convert an inty instance into
* a Python object. The second and third arguments are used to
* indicate the return value policy and parent object (for
* ``return_value_policy::reference_internal``) and are generally
* ignored by implicit casters.
*/
static handle cast(inty src, return_value_policy /* policy */, handle /* parent */) {
return PyLong_FromLong(src.long_value);
}
};
}} // namespace pybind11::detail
.. note::
A ``type_caster<T>`` defined with ``PYBIND11_TYPE_CASTER(T, ...)`` requires
that ``T`` is default-constructible (``value`` is first default constructed
and then ``load()`` assigns to it).
.. warning::
When using custom type casters, it's important to declare them consistently
in every compilation unit of the Python extension module. Otherwise,
undefined behavior can ensue.
| PypiClean |
/PyRECONSTRUCT-2.2.0.tar.gz/PyRECONSTRUCT-2.2.0/pyrecon/classes/Series.py | import os, re
from Section import Section as Section
# handleXML is imported in Series.update()
class Series:
def __init__(self, *args, **kwargs):
self.index = None
self.viewport = None
self.units = None
self.autoSaveSeries = None
self.autoSaveSection = None
self.warnSaveSection = None
self.beepDeleting = None
self.beepPaging = None
self.hideTraces = None
self.unhideTraces = None
self.hideDomains = None
self.unhideDomains = None
self.useAbsolutePaths = None
self.defaultThickness = None
self.zMidSection = None
self.thumbWidth = None
self.thumbHeight = None
self.fitThumbSections = None
self.firstThumbSection = None
self.lastThumbSection = None
self.skipSections = None
self.displayThumbContours = None
self.useFlipbookStyle = None
self.flipRate = None
self.useProxies = None
self.widthUseProxies = None
self.heightUseProxies = None
self.scaleProxies = None
self.significantDigits = None
self.defaultBorder = None
self.defaultFill = None
self.defaultMode = None
self.defaultName = None
self.defaultComment = None
self.listSectionThickness = None
self.listDomainSource = None
self.listDomainPixelsize = None
self.listDomainLength = None
self.listDomainArea = None
self.listDomainMidpoint = None
self.listTraceComment = None
self.listTraceLength = None
self.listTraceArea = None
self.listTraceCentroid = None
self.listTraceExtent = None
self.listTraceZ = None
self.listTraceThickness = None
self.listObjectRange = None
self.listObjectCount = None
self.listObjectSurfarea = None
self.listObjectFlatarea = None
self.listObjectVolume = None
self.listZTraceNote = None
self.listZTraceRange = None
self.listZTraceLength = None
self.borderColors = None
self.fillColors = None
self.offset3D = None
self.type3Dobject = None
self.first3Dsection = None
self.last3Dsection = None
self.max3Dconnection = None
self.upper3Dfaces = None
self.lower3Dfaces = None
self.faceNormals = None
self.vertexNormals = None
self.facets3D = None
self.dim3D = None
self.gridType = None
self.gridSize = None
self.gridDistance = None
self.gridNumber = None
self.hueStopWhen = None
self.hueStopValue = None
self.satStopWhen = None
self.satStopValue = None
self.brightStopWhen = None
self.brightStopValue = None
self.tracesStopWhen = None
self.areaStopPercent = None
self.areaStopSize = None
self.ContourMaskWidth = None
self.smoothingLength = None
self.mvmtIncrement = None
self.ctrlIncrement = None
self.shiftIncrement = None
#Non-attributes
self.name = None
self.path = None
self.contours = []
self.zcontours = []
self.sections = []
self.processArguments(args, kwargs)
def processArguments(self, args, kwargs):
# 1) ARGS
try:
self.update(*args)
except Exception, e:
print('Could not process Series arg:%s\n\t'%str(args)+str(e))
# 2) KWARGS
try:
self.update(**kwargs)
except Exception, e:
print('Could not process Series kwarg:%s\n\t'%str(kwargs)+str(e))
# MUTATORS
def update(self, *args, **kwargs):
for arg in args:
# String argument
if type(arg) == type(''): # Possible path to XML?
import pyrecon.tools.handleXML as xml
try: # given full path to .ser file
self.update(*xml.process(arg))
self.path = arg
self.name = arg.split('/')[len(arg.split('/'))-1].replace('.ser','')
except: # given directory path instead of path to .ser file
path = arg
if path[-1] != '/':
path += '/'
path = path+str([f for f in os.listdir(path) if '.ser' in f].pop())
self.update(*xml.process(path))
self.path = path
self.name = path.split('/')[len(path.split('/'))-1].replace('.ser','')
# Dictionary
elif type(arg) == type({}):
for key in arg:
if key in self.__dict__:
self.__dict__[key] = arg[key]
# List
elif type(arg) == type([]):
for item in arg:
# Contour
if item.__class__.__name__ == 'Contour':
self.contours.append(item)
# ZSection
elif item.__class__.__name__ == 'ZContour':
self.zcontours.append(item)
# Section
elif item.__class__.__name__ == 'Section':
self.sections.append(item)
# Contour
elif arg.__class__.__name__ == 'Contour':
self.contours.append(arg)
# ZSection
elif arg.__class__.__name__ == 'ZContour':
self.zcontours.append(item)
# Section
elif arg.__class__.__name__ == 'Section':
self.sections.append(arg)
for kwarg in kwargs:
# Load sections
if 'sections' in kwargs:
if kwargs['sections'] == True:
print('Attempting to load sections...'),
ser = os.path.basename(self.path)
serfixer = re.compile(re.escape('.ser'), re.IGNORECASE)
sername = serfixer.sub('', ser)
# look for files with 'seriesname'+'.'+'number'
p = re.compile('^'+sername+'[.][0-9]*$')
sectionlist = [f for f in os.listdir(self.path.replace(ser,'')) if p.match(f)]
# create and append Sections for each section file
path = self.path.replace(os.path.basename(self.path),'')
for sec in sectionlist:
section = Section(path+sec)
if section.index is not None: #===
self.update(section)
# sort sections by index
self.sections = sorted(self.sections, key=lambda Section: Section.index)
print(' SUCCESS!')
# ACCESSORS
def attributes(self):
'''Returns a dict of this Serie's attributes'''
not_attributes = ['name','path','contours','zcontours','sections']
attributes = {}
for att in self.__dict__:
if att not in not_attributes: # if att is considered a desired attribute
attributes[att] = self.__dict__[att]
return attributes
def deleteTraces(self, exceptions=[]):
'''Deletes all traces except the regex found in exceptions list'''
for section in self.sections:
for contour in section.contours:
for regex in exceptions:
if re.compile(regex).match(contour.name):
pass
else:
print 'Removing:', contour.name
section.contours.remove(contour)
# calibrationTool functions
def zeroIdentity(self):
'''Converts points for all sections in a series to identity transform'''
for sec in self.sections:
for c in sec.contours:
if c.image is None: # Don't alter image contours i.e. domain1
c.points = c.transform.worldpts(c.points)
c.transform.dim = 0
c.transform.ycoef = [0,0,1,0,0,0]
c.transform.xcoef = [0,1,0,0,0,0]
c._tform = c.transform.tform()
# curationTool functions
def locateInvalidTraces(self, delete=False):
invalidDict = {}
for section in self.sections:
invalids = []
for contour in section.contours:
if contour.isInvalid():
invalids.append(contour.name)
if delete:
print 'deleted: ',contour.name,'at section',section.index
section.contours.remove(contour)
if len(invalids) != 0:
invalidDict[section.index] = invalids
return invalidDict
def locateReverseTraces(self):
reverseDict = {}
for section in self.sections:
revTraces = []
for contour in section.contours:
try:
if contour.isReverse():
revTraces.append(contour)
except:
print('Invalid contour (%s on section %d) was ignored')%(contour.name, section.index)
print('\t check coordinates in XML file')
if len(revTraces) != 0:
reverseDict[section.index] = revTraces
return reverseDict
def locateDistantTraces(self, threshold=7):
'''Returns a dictionary of indexes containing traces that exist after <threshold (def: 7)> sections of non-existence'''
# Build a list of lists for all the contours in each section
allSectionContours = []
for section in self.sections:
contours = list(set([cont.name for cont in section.contours]))
allSectionContours.append(contours)
# Go through list of contours and check for distances
index = int(self.sections[0].index) # correct starting index (can be 0 or 1)
distantTraces = {}
for sec in range(len(allSectionContours)):
traces = []
for contour in allSectionContours[sec]:
# Check above
if sec+threshold+1 <= len(self.sections):
# Check and ignore if in section:section+threshold
sectionToThresholdContours = []
for contList in allSectionContours[sec+1:sec+threshold+1]:
sectionToThresholdContours.extend(contList)
if contour not in sectionToThresholdContours:
# Check if contour is in section+threshold and up
thresholdToEndContours = []
for contList in allSectionContours[sec+threshold+1:]:
thresholdToEndContours.extend(contList)
if contour in thresholdToEndContours:
traces.append(contour)
# Check below
if sec-threshold-1 >= 0:
# Check and ignore if in section-threshold:section
minusThresholdToSectionContours = []
for contList in allSectionContours[sec-threshold:sec]:
minusThresholdToSectionContours.extend(contList)
if contour not in minusThresholdToSectionContours:
# Check if contour is in section-threshold and down
beginToMinusThresholdContours = []
for contList in allSectionContours[:sec-threshold]:
beginToMinusThresholdContours.extend(contList)
if contour in beginToMinusThresholdContours:
traces.append(contour)
# Add traces to distantTraces dictionary
if len(traces) != 0:
distantTraces[index] = traces
index += 1
return distantTraces
def locateDuplicates(self):
'''Locates overlapping traces of the same name in a section. Returns a dictionary of section numbers with duplicates'''
# Build dictionary of sections w/ contours whose name appear more than once in that section
duplicateNames = {}
for section in self.sections:
duplicates = []
contourNames = [cont.name for cont in section.contours] # List of contour names
# Go through each contour, see if name appears in contourName > 1 time
for contour in section.contours:
if contourNames.count(contour.name) > 1:
duplicates.append(contour)
if len(duplicates) != 0:
duplicateNames[section.index] = duplicates
# Go through each list of >1 contour names and check if actually overlaps
duplicateDict = {}
for key in duplicateNames:
duplicates = []
for contour in duplicateNames[key]:
# Filter contours of same memory address so that overlap isn't tested on itself
copyContours = [cont for cont in duplicateNames[key] if id(cont) != id(contour) and cont.name == contour.name]
for cont in copyContours:
try:
if contour.overlaps(cont) == 1: # Perfect overlap (within threshold)
duplicates.append(cont)
except:
print('Invalid contour (%s on section %d) was ignored')%(cont.name, key)
print('\t check coordinates in XML file')
if len(duplicates) != 0:
duplicateDict[key] = duplicates
return duplicateDict
# excelTool functions
def getObject(self, regex):
'''Returns a list of 1 list per section containing all the contour that match the regex'''
objects = []
for section in self.sections:
section.append(section.getObject(regex))
return objects
def getObjectLists(self):
'''Returns sorted lists of dendrite names, protrusion names, trace names, and a list of other objects in series'''
dendrite_expression = 'd[0-9]{2,}' # represents base dendrite name (d##)
protrusion_expression = 'd[0-9]{2,}p[0-9]{2,}$' # represents base protrusion name (d##p##)
trace_expression = 'd[0-9]{2,}.{1,}[0-9]{2,}' # represents trace name (d##<tracetype>##)
# Convert expressions to usable regular expressions
dendrite_expression = re.compile(dendrite_expression)
protrusion_expression = re.compile(protrusion_expression, re.I)
trace_expression = re.compile(trace_expression, re.I)
# Create lists for names of dendrites, protrusions, traces, and other objects
dendrites = []
protrusions = []
traces = []
others = []
for section in self.sections:
for contour in section.contours:
# Dendrite
if dendrite_expression.match(contour.name) != None:
dendrites.append(contour.name[0:dendrite_expression.match(contour.name).end()])
# Protrusion
if protrusion_expression.match(contour.name) != None:
protrusions.append(contour.name)
# Trace === expand to > 2 digits!
if (trace_expression.match(contour.name) != None and
protrusion_expression.match(contour.name) == None):
traces.append(contour.name)
# Make sure a d##p## exists for this trace
thisProt = contour.name[0:3]+'p'+contour.name[4:6]
if (protrusion_expression.match(thisProt) and
thisProt not in protrusions):
protrusions.append(thisProt)
# Everything else (other)
if (dendrite_expression.match(contour.name) == None and
protrusion_expression.match(contour.name) == None and
trace_expression.match(contour.name) == None):
others.append(contour.name)
return sorted(list(set(dendrites))), sorted(list(set(protrusions))), sorted(list(set(traces))), sorted(list(set(others)))
def getData(self, object_name, data_string):
string = str(data_string).lower()
if string == 'volume':
return self.getVolume(object_name)
elif string == 'total volume':
return self.getTotalVolume(object_name)
elif string == 'surface area':
return self.getSurfaceArea(object_name)
elif string == 'flat area':
return self.getFlatArea(object_name)
elif string == 'start':
return self.getStartEndCount(object_name)[0]
elif string == 'end':
return self.getStartEndCount(object_name)[1]
elif string == 'count':
return self.getStartEndCount(object_name)[2]
def getVolume(self, object_name):
'''Returns volume of the object throughout the series. Volume calculated by summing the value obtained by
multiplying the area by section thickness over all sections.'''
vol = 0
for section in self.sections:
for contour in section.contours:
if contour.name == object_name:
try:
contour.popShape()
vol += (contour.shape.area * section.thickness)
except:
print 'getVolume(): Invalid contour:', contour.name, 'in section index:', section.index, '\nCheck XML file and fix before trusting data.\n'
return vol
def getTotalVolume(self, object_name):
related_objects = []
if object_name[-1].isalpha():
object_name = object_name[:-1]
# Get all related objects by base object name
for section in self.sections:
for contour in section.contours:
if object_name in contour.name:
related_objects.append(contour.name)
# Find total volume by summing volume for all related objects
totVol = 0
for obj in list(set(related_objects)):
totVol+=self.getVolume(obj)
return totVol
def getSurfaceArea(self, object_name):
'''Returns surface area of the object throughout the series. Surface area calculated by summing
the length multiplied by section thickness across sections.'''
sArea = 0
for section in self.sections:
for contour in section.contours:
if contour.name == object_name:
try:
sArea += (contour.getLength() * section.thickness)
except:
print 'getSurfaceArea(): Invalid contour:', contour.name, 'in section index:', section.index, '\nCheck XML file and fix before trusting data.\n'
return sArea
def getFlatArea(self, object_name):
'''Returns the flat area of the object throughout the series. Flat area calculated by summing the area of
the object across all sections.'''
fArea = 0
for section in self.sections:
for contour in section.contours:
if contour.name == object_name:
try:
contour.popShape()
if contour.closed:
fArea += contour.shape.area
else:
fArea += (contour.getLength() * section.thickness)
except:
print 'getFlatArea(): Invalid contour:', contour.name, 'in section index:', section.index, '\nCheck XML file and fix before trusting data.\n'
return fArea
def getStartEndCount(self, object_name):
'''Returns a tuple containing the start index, end index, and count of the item in series.'''
start = 0
end = 0
count = 0
# Count
for section in self.sections:
for contour in section.contours:
if contour.name == object_name:
count += 1
# Start/End
if object_name in [cont.name for cont in section.contours]:
# Start index
if start == 0:
start = section.index
# End index
end = section.index
return start, end, count | PypiClean |
/cloudformation_validator-0.6.36-py3-none-any.whl/cloudformation_validator/custom_rules/IamManagedPolicyWildcardResourceRule.py | from __future__ import absolute_import, division, print_function
import sys
import inspect
from builtins import (str)
from cloudformation_validator.custom_rules.BaseRule import BaseRule
def lineno():
"""Returns the current line number in our program."""
return str(' - IamManagedPolicyWildcardResourceRule - caller: '+str(inspect.stack()[1][3])+' - line number: '+str(inspect.currentframe().f_back.f_lineno))
class IamManagedPolicyWildcardResourceRule(BaseRule):
def __init__(self, cfn_model=None, debug=None):
"""
Initialize IamManagedPolicyWildcardResourceRule
:param cfn_model:
"""
BaseRule.__init__(self, cfn_model, debug=debug)
def rule_text(self):
"""
Return rule text
:return:
"""
if self.debug:
print('rule_text'+lineno())
return 'IAM managed policy should not allow * resource'
def rule_type(self):
"""
Return rule type
:return:
"""
self.type= 'VIOLATION::WARNING'
return 'VIOLATION::WARNING'
def rule_id(self):
"""
Return rule id
:return:
"""
if self.debug:
print('rule_id'+lineno())
self.id ='W13'
return 'W13'
def audit_impl(self):
"""
Audit
:return: violations
"""
if self.debug:
print('IamMangedPolicyWildcardResourceRule - audit_impl'+lineno())
violating_policies = []
resources = self.cfn_model.resources_by_type('AWS::IAM::ManagedPolicy')
if len(resources)>0:
for resource in resources:
if self.debug:
print('resources: '+str(resource)+lineno())
print('vars: '+str(vars(resource))+lineno())
if hasattr(resource,'policy_document'):
if self.debug:
print('has policy document '+lineno())
print('vars: '+str(vars(resource.policy_document)))
if resource.policy_document:
if self.debug:
print(resource.policy_document.statements)
for statements in resource.policy_document.statements:
if self.debug:
print('statements: '+str(statements))
print('vars: '+str(vars(statements)))
if resource.policy_document.wildcard_allowed_resources():
if self.debug:
print('has wildcard allow resources')
violating_policies.append(str(resource.logical_resource_id))
else:
if self.debug:
print('no violating_policies' + lineno())
return violating_policies | PypiClean |
/discord-ui-5.1.6.tar.gz/discord-ui-5.1.6/discord_ui/receive.py | from __future__ import annotations
from .enums import InteractionResponseType
from .slash.http import ModifiedSlashState
from .errors import InvalidEvent, WrongType
from .http import BetterRoute, get_message_payload, send_files
from .slash.errors import AlreadyDeferred, EphemeralDeletion
from .tools import EMPTY_CHECK, MISSING, All, deprecated, setup_logger, get
from .slash.types import ContextCommand, SlashCommand, SlashPermission, SlashSubcommand
from .components import ActionRow, Button, ComponentStore, LinkButton, SelectMenu, SelectOption, UseableComponent, make_component
import discord
from discord import utils
from discord.ext import commands
from discord.state import ConnectionState
from typing import Any, List, Union, Dict
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal
logging = setup_logger("discord-ui")
__all__ = (
'Message',
'EphemeralMessage',
'EphemeralResponseMessage',
'ButtonInteraction',
'PressedButton', # deprecated
'SelectInteraction',
'SelectedMenu', # deprecated
'AutocompleteInteraction',
'ChoiceGeneratorContext', # deprecated
'SlashInteraction',
'SlashedCommand', # deprecated
'SubSlashInteraction',
'SlashedSubCommand', # deprecated
'ContextInteraction',
'Interaction',
)
class InteractionType:
PING = Ping = 1
APPLICATION_COMMAND = Command = 2
MESSAGE_COMPONENT = Component = 3
APPLICATION_COMMAND_AUTOCOMPLETE = Autocomplete = 4
class Interaction():
def __init__(self, state, data, user=None, message=None) -> None:
self._state: ModifiedSlashState = state
self.deferred: bool = False
self.responded: bool = False
self._deferred_hidden: bool = False
self._original_payload: dict = data
self.author: Union[discord.Member, discord.User] = user
"""The user who created the interaction"""
self.application_id: int = data["application_id"]
"""The ID of the bot application"""
self.token: str = data["token"]
"""The token for responding to the interaction"""
self.id: int = int(data["id"])
"""The id of the interaction"""
self.type: int = data["type"]
"""The type of the interaction. See :class:`~InteractionType` for more information"""
self.version: int = data["version"]
self.data: dict = data["data"]
"""The passed data of the interaction"""
self.channel_id: int = int(data.get("channel_id")) if data.get("channel_id") is not None else None
"""The channel-id where the interaction was created"""
self.guild_id: int = int(data["guild_id"]) if data.get("guild_id") is not None else None
"""The guild-id where the interaction was created"""
self.message: Message = message
"""The message in which the interaction was created"""
@property
def created_at(self):
"""The interaction's creation time in UTC"""
return utils.snowflake_time(self.id)
@property
def guild(self) -> discord.Guild:
"""The guild where the interaction was created"""
return self._state._get_guild(self.guild_id)
@property
def channel(self) -> Union[discord.abc.GuildChannel, discord.abc.PrivateChannel]:
"""The channel where the interaction was created"""
return self._state.get_channel(self.channel_id) or self._state.get_channel(self.author.id)
async def defer(self, hidden=False):
"""
This will acknowledge the interaction. This will show the (*Bot* is thinking...) Dialog
.. note::
This function should be used if the bot needs more than 15 seconds to respond
Parameters
----------
hidden: :class:`bool`, optional
Whether the loading thing should be only visible to the user; default False.
"""
if self.deferred:
logging.error(AlreadyDeferred())
return
payload = None
if hidden is True:
payload = {"flags": 64}
self._deferred_hidden = True
await self._state.slash_http.respond_to(self.id, self.token, InteractionResponseType.Deferred_channel_message, payload)
self.deferred = True
async def respond(self, content=None, *, tts=False, embed=None, embeds=None, file=None, files=None, nonce=None,
allowed_mentions=None, mention_author=None, components=None, delete_after=None, listener=None,
hidden=False, ninja_mode=False) -> Union['Message', 'EphemeralMessage']:
"""
Responds to the interaction
Parameters
----------
content: :class:`str`, optional
The raw message content
tts: :class:`bool`
Whether the message should be send with text-to-speech
embed: :class:`discord.Embed`
Embed rich content
embeds: List[:class:`discord.Embed`]
A list of embeds for the message
file: :class:`discord.File`
The file which will be attached to the message
files: List[:class:`discord.File`]
A list of files which will be attached to the message
nonce: :class:`int`
The nonce to use for sending this message
allowed_mentions: :class:`discord.AllowedMentions`
Controls the mentions being processed in this message
mention_author: :class:`bool`
Whether the author should be mentioned
components: List[:class:`~Button` | :class:`~LinkButton` | :class:`~SelectMenu`]
A list of message components to be included
delete_after: :class:`float`
After how many seconds the message should be deleted, only works for non-hiddend messages; default MISSING
listener: :class:`Listener`
A component-listener for this message
hidden: :class:`bool`
Whether the response should be visible only to the user
ninja_mode: :class:`bool`
If true, the client will respond to the button interaction with almost nothing and returns nothing
Returns
-------
:class:`~Message` | :class:`~EphemeralMessage`
Returns the sent message
"""
if ninja_mode is True or all(y in [None, False] for x, y in locals().items() if x not in ["self"]):
try:
await self._state.slash_http.respond_to(self.id, self.token, InteractionResponseType.Deferred_message_update)
self.responded = True
return
except discord.errors.HTTPException as x:
if "value must be one of (4, 5)" in str(x).lower():
logging.error(str(x) + "\n" + "The 'ninja_mode' parameter is not supported for slash commands!")
ninja_mode = False
else:
raise x
if self.responded is True:
return await self.send(content=content, tts=tts, embed=embed, embeds=embeds, nonce=nonce, allowed_mentions=allowed_mentions, mention_author=mention_author,
components=components, listener=listener, hidden=hidden
)
if components is None and listener is not None:
components = listener.to_components()
payload = get_message_payload(content=content, tts=tts, embed=embed, embeds=embeds, nonce=nonce, allowed_mentions=allowed_mentions,
mention_author=mention_author, components=components
)
if self._deferred_hidden is hidden:
if self._deferred_hidden is False and hidden is True:
logging.warning("Your response should be hidden, but the interaction was deferred public. This results in a public response.")
if self._deferred_hidden is True and hidden is False:
logging.warning("Your response should be public, but the interaction was deferred hidden. This results in a hidden response.")
hide_message = self._deferred_hidden or not self.deferred and hidden is True
r = None
if delete_after is not None and hide_message is True:
raise EphemeralDeletion()
if hide_message:
payload["flags"] = 64
if self.deferred:
route = BetterRoute("PATCH", f'/webhooks/{self.application_id}/{self.token}/messages/@original')
if file is not None or files is not None:
await send_files(route=route, files=files or ([file] if file is not None else None), payload=payload, http=self._state.http)
else:
await self._state.http.request(route, json=payload)
else:
await self._state.slash_http.respond_to(self.id, self.token, InteractionResponseType.Channel_message, payload, files=files or [file] if file is not None else None)
self.responded = True
r = await self._state.http.request(BetterRoute("GET", f"/webhooks/{self.application_id}/{self.token}/messages/@original"))
if hide_message is True:
msg = EphemeralMessage(state=self._state, channel=self.channel, data=r, application_id=self.application_id, token=self.token)
else:
msg = await getMessage(self._state, data=r, response=False)
if listener is not None:
listener._start(msg)
if delete_after is not None:
await msg.delete(delete_after)
return msg
async def send(self, content=None, *, tts=None, embed=None, embeds=None, file=None, files=None, nonce=None,
allowed_mentions=None, mention_author=None, components=None, delete_after=None, listener=None, hidden=False,
force=False
) -> Union[Message, EphemeralMessage]:
"""
Sends a message to the interaction using a webhook
Parameters
----------
content: :class:`str`, optional
The raw message content
tts: :class:`bool`, optional
Whether the message should be send with text-to-speech
embed: :class:`discord.Embed`, optional
Embed rich content
embeds: List[:class:`discord.Embed`], optional
A list of embeds for the message
file: :class:`discord.File`, optional
The file which will be attached to the message
files: List[:class:`discord.File`], optional
A list of files which will be attached to the message
nonce: :class:`int`, optional
The nonce to use for sending this message
allowed_mentions: :class:`discord.AllowedMentions`, optional
Controls the mentions being processed in this message
mention_author: :class:`bool`, optional
Whether the author should be mentioned
components: List[:class:`~Button` | :class:`~LinkButton` | :class:`~SelectMenu`]
A list of message components to be included
delete_after: :class:`float`, optional
After how many seconds the message should be deleted, only works for non-hiddend messages; default MISSING
listener: :class:`Listener`, optional
A component-listener for this message
hidden: :class:`bool`, optional
Whether the response should be visible only to the user
ninja_mode: :class:`bool`, optional
If true, the client will respond to the button interaction with almost nothing and returns nothing
force: :class:`bool`, optional
Whether sending the follow-up message should be forced.
If ``False``, then a follow-up message will only be send if ``.responded`` is True; default False
Returns
-------
:class:`~Message` | :class:`EphemeralMessage`
Returns the sent message
"""
if force is False and self.responded is False:
return await self.respond(content=content, tts=tts, embed=embed, embeds=embeds, file=file, files=files, nonce=nonce, allowed_mentions=allowed_mentions, mention_author=mention_author, components=components, delete_after=delete_after, listener=listener, hidden=hidden)
if components is None and listener is not None:
components = listener.to_components()
payload = get_message_payload(content=content, tts=tts, embed=embed, embeds=embeds, nonce=nonce, allowed_mentions=allowed_mentions, mention_author=mention_author, components=components)
if hidden:
payload["flags"] = 64
route = BetterRoute("POST", f'/webhooks/{self.application_id}/{self.token}')
if file is not None or files is not None:
r = await send_files(route=route, files=files or ([file] if file is None else None), payload=payload, http=self._state.http)
else:
r = await self._state.http.request(route, json=payload)
if hidden is True:
msg = EphemeralMessage(state=self._state, channel=self._state.get_channel(int(r["channel_id"])), data=r, application_id=self.application_id, token=self.token)
else:
msg = await getMessage(self._state, r, response=False)
if delete_after is not None:
await msg.delete(delete_after)
if listener is not None:
listener._start(msg)
return msg
def _handle_auto_defer(self, auto_defer):
self.deferred = auto_defer[0]
self._deferred_hidden = auto_defer[1]
class AutocompleteInteraction(Interaction):
"""Autocomplete interaction"""
def __init__(self, command, state, data, options, user=None) -> None:
Interaction.__init__(self, state, data, user=user)
self.focused_option: dict = options[get(options, check=lambda x: options[x].get("focused", False))]
"""The option for which the choices should be generated"""
self.value_query: Union[str, int] = self.focused_option["value"]
"""The current input of the focused option"""
self.selected_options: Dict[str, Any] = {options[x]["name"]: options[x]["value"] for x in options}
"""All the options that were already selected. Format: ``{"option name": value}``"""
self.command: Union[SlashInteraction, SlashInteraction, ContextInteraction] = command
"""The slash command for which the choices should be generated"""
async def defer(self, *args, **kwargs):
"""Cannot defer this type of interaction"""
raise NotImplementedError()
async def respond(self, *args, **kwargs):
"""Response will be made automatically with the choices that are returned"""
raise NotImplementedError()
async def send(self, *args, **kwargs):
"""Cannot send followup message to this type of interaction"""
raise NotImplementedError()
class ChoiceGeneratorContext(AutocompleteInteraction):
"""Deprecated, please use :class:`AutocompleteInteraction` instead"""
...
class ComponentInteraction(Interaction):
"""A received component"""
def __init__(self, state, data, user, message) -> None:
Interaction.__init__(self, state, data, user=user, message=message)
self.component: UseableComponent = UseableComponent(data["data"]["component_type"])
self.component._custom_id = data["data"]["custom_id"]
class ComponentContext(ComponentInteraction):
"""Deprecated, please use :class:`ComponentInteraction` instead"""
...
class SelectInteraction(Interaction):
"""An interaction that was created by a :class:`~SelectMenu`"""
def __init__(self, data, user, s, msg, client) -> None:
Interaction.__init__(self, client._connection, data, user, msg)
self.component: SelectMenu = s
self.bot: commands.Bot = client
self.custom_id: str = data['data']['custom_id']
self.selected_options: List[SelectOption] = []
"""The list of the selected options"""
self.selected_values: List[str] = []
"""The list of raw values which were selected"""
for val in data["data"]["values"]:
for x in self.component.options:
if x.value == val:
self.selected_options.append(x)
self.selected_values.append(x.value)
self.author: discord.Member = user
"""The user who selected the value"""
class SelectedMenu(SelectInteraction):
"""deprecated, please use :class:`SelectInteraction` instead"""
...
class ButtonInteraction(Interaction):
"""An interaction that was created by a :class:`~Button`"""
def __init__(self, data, user, b, message, client) -> None:
Interaction.__init__(self, client._connection, data, user, message)
self.custom_id: str = data['data']['custom_id']
self.component: Button = b
"""The component that created the interaction"""
self.bot: commands.Bot = client
self.author: discord.Member = user
"""The user who pressed the button"""
class PressedButton(ButtonInteraction):
"""deprecated, please use :class:`ButtonInteraction` instead"""
...
class SlashInteraction(Interaction):
"""An interaction created by a :class:`~SlashCommand`"""
def __init__(self, client, command: SlashCommand, data, user, args = None) -> None:
Interaction.__init__(self, client._connection, data, user)
self.command: SlashCommand = command
"""The original command instance that was used. If you change things here, the changes will be applied globally"""
self.bot: commands.Bot = client
self.author: discord.Member = user
"""The user who used the command"""
self.args: Dict[str, Union[str, int, bool, discord.Member, discord.TextChannel, discord.Role, float]] = args
"""The options that were received"""
self.permissions: SlashPermission = command.guild_permissions.get(self.guild_id) if command.guild_permissions is not None else None
"""The permissions for this guild"""
class SlashedCommand(SlashInteraction):
"""deprecated, please use :class:`SlashInteraction` instead"""
...
class SubSlashInteraction(SlashInteraction):
"""An interaction created by a :class:`~SlashSubcommand`"""
command: SlashSubcommand
def __init__(self, client, command, data, user, args = None) -> None:
SlashInteraction.__init__(self, client, command, data, user, args)
class SlashedSubCommand(SubSlashInteraction):
"""deprecated, please use ``SubSlashInteraction`` instead"""
...
class ContextInteraction(Interaction):
"""An interaction created by a :class:`~MessageCommand` or a :class:`~UserCommand`"""
def __init__(self, client, command: ContextCommand, data, user, target) -> None:
Interaction.__init__(self, client._connection, data, user)
self.command: ContextCommand = command
"""The original command instance that was used"""
self.bot: commands.Bot = client
self.target: Union[Message, Union[discord.Member, discord.User]] = target
"""The target object on which the interaction was used"""
self.permissions: SlashPermission = command.guild_permissions.get(self.guild_id) if command.guild_permissions is not None else None
"""The permissions for this guild"""
async def getMessage(state: discord.state.ConnectionState, data, response=True):
"""
Async function to get the response message
Parameters
-----------------
state: :class:`discord.state.ConnectionState`
The discord bot client
data: :class:`dict`
The raw data
user: :class:`discord.User`
The User which pressed the button
response: :class:`bool`
Whether the Message returned should be of type `ResponseMessage` or `Message`
channel: :class:`discord.Messageable`
An alternative channel that will be used when no channel was found
Returns
-------
:class:`~Message` | :class:`~EphemeralMessage`
The sent message
"""
msg_base = data.get("message", data)
channel = state.get_channel(int(data["channel_id"])) or state.get_channel(int(msg_base["author"]["id"]))
if response:
if msg_base["flags"] == 64:
return EphemeralResponseMessage(state=state, channel=channel, data=data.get("message", data))
return Message(state=state, channel=channel, data=msg_base)
if msg_base["flags"] == 64:
return EphemeralMessage(state=state, channel=channel, data=msg_base)
return Message(state=state, channel=channel, data=msg_base)
class Message(discord.Message):
"""A :class:`discord.Message` optimized for components"""
_state: ConnectionState
def __init__(self, *, state, channel, data):
self.__slots__ = discord.Message.__slots__ + ("components",)
discord.Message.__init__(self, state=state, channel=channel, data=data)
self.components = ComponentStore()
"""The components in the message"""
self._update_components(data)
# region attributes
@property
@deprecated(".components.buttons")
def buttons(self) -> List[Union[Button, LinkButton]]:
"""The button components in the message"""
return self.components.buttons
@property
@deprecated(".components.selects")
def select_menus(self) -> List[SelectMenu]:
"""The select menus components in the message"""
return self.components.selects
@property
@deprecated(".components.get_rows()")
def action_rows(self) -> List[ActionRow]:
return self.components.get_rows()
def _update_components(self, data):
"""Updates the message components"""
if data.get("components") is None:
self.components = ComponentStore()
return
self.components = ComponentStore()
if len(data["components"]) == 0:
pass
elif len(data["components"]) > 1:
# multiple lines
for componentWrapper in data["components"]:
# newline
for index, com in enumerate(componentWrapper["components"]):
self.components.append(make_component(com, index==0))
elif len(data["components"][0]["components"]) > 1:
# All inline
for index, com in enumerate(data["components"][0]["components"]):
self.components.append(make_component(com, index==0))
else:
# One button
component = data["components"][0]["components"][0]
self.components.append(make_component(component))
def _update(self, data):
super()._update(data)
self._update_components(data)
async def edit(self, content=MISSING, *, embed=MISSING, embeds=MISSING, attachments=MISSING, suppress=MISSING,
delete_after=MISSING, allowed_mentions=MISSING, components=MISSING):
"""Edits the message and updates its properties
.. note::
If a paremeter is `None`, the attribute will be removed from the message
Parameters
----------------
content: :class:`str`
The new message content
embed: :class:`discord.Embed`
The new embed of the message
embeds: List[:class:`discord.Embed`]
The new list of discord embeds
attachments: List[:class:`discord.Attachment`]
A list of new attachments
supress: :class:`bool`
Whether the embeds should be shown
delete_after: :class:`float`
After how many seconds the message should be deleted
allowed_mentions: :class:`discord.AllowedMentions`
The mentions proceeded in the message
components: List[:class:`~Button` | :class:`~LinkButton` | :class:`~SelectMenu`]
A list of components to be included the message
"""
payload = get_message_payload(content, embed=embed, embeds=embeds, allowed_mentions=allowed_mentions, attachments=attachments, suppress=suppress, flags=self.flags.value, components=components)
data = await self._state.http.edit_message(self.channel.id, self.id, **payload)
self._update(data)
if delete_after is not MISSING:
await self.delete(delay=delete_after)
async def disable_components(self, index=All, disable=True, **fields):
"""Disables component(s) in the message
Parameters
----------
index: :class:`int` | :class:`str` | :class:`range` | List[:class:`int` | :class:`str`], optional
Index(es) or custom_id(s) for the components that should be disabled or enabled; default all components
disable: :class:`bool`, optional
Whether to disable (``True``) or enable (``False``) components; default True
``**fields``
Other parameters for editing the message (like `content=`, `embed=`)
"""
self.components.disable(index, disable)
await self.edit(components=self.components, **fields)
async def wait_for(self, event_name: Literal["select", "button", "component"], client, custom_id=None, by=None, check=EMPTY_CHECK, timeout=None) -> Union[ButtonInteraction, SelectInteraction, ComponentContext]:
"""Waits for a message component to be invoked in this message
Parameters
-----------
event_name: :class:`str`
The name of the event which will be awaited [``"select"`` | ``"button"`` | ``"component"``]
.. note::
``event_name`` must be ``select`` for a select menu selection, ``button`` for a button press and ``component`` for any component
client: :class:`discord.ext.commands.Bot`
The discord client
custom_id: :class:`str`, Optional
Filters the waiting for a custom_id
by: :class:`discord.User` | :class:`discord.Member` | :class:`int` | :class:`str`, Optional
The user or the user id by that has to create the component interaction
check: :class:`function`, Optional
A check that has to return True in order to break from the event and return the received component
The function takes the received component as the parameter
timeout: :class:`float`, Optional
After how many seconds the waiting should be canceled.
Throws an :class:`asyncio.TimeoutError` Exception
Raises
------
:class:`discord_ui.errors.InvalidEvent`
The event name passed was invalid
Returns
--------
:class:`~ButtonInteraction` | :class:`~SelectInteraction`
The component that was waited for
Example
-------
.. code-block::
# send a message with comoponents
msg = await ctx.send("okay", components=[Button(custom_id="a_custom_id", ...)])
try:
# wait for the button
btn = await msg.wait_for("button", client, "a_custom_id", by=ctx.author, timeout=20)
# send response
btn.respond()
except asyncio.TimeoutError:
# no button press was received in 20 seconds timespan
"""
def _check(com):
if com.message.id == self.id:
statements = []
if custom_id is not None:
statements.append(com.custom_id == custom_id)
if by is not None:
statements.append(com.author.id == (by.id if hasattr(by, "id") else int(by)))
if check is not None:
statements.append(check(com))
return all(statements)
return False
if not isinstance(client, commands.Bot):
raise WrongType("client", client, "discord.ext.commands.Bot")
if event_name.lower() == "button":
return await client.wait_for('button', check=_check, timeout=timeout)
if event_name.lower() == "select":
return await client.wait_for("select", check=_check, timeout=timeout)
if event_name.lower() == "component":
return await client.wait_for("component", check=_check, timeout=timeout)
raise InvalidEvent(event_name, ["button", "select", "component"])
async def put_listener(self, listener):
"""Adds a listener to this message and edits the message if the components of the listener are missing in this message
Parameters
----------
listener: :class:`Listener`
The listener which should be put to the message
"""
if len(self.components) == 0:
await self.edit(components=listener.to_components())
self.attach_listener(listener)
def attach_listener(self, listener):
"""Attaches a listener to this message after it was sent
Parameters
----------
listener: :class:`Listener`
The listener that should be attached
"""
listener._start(self)
def remove_listener(self):
"""Removes the listener from this message"""
try:
del self._state._component_listeners[str(self.id)]
except KeyError:
pass
class EphemeralMessage(Message):
"""Represents a hidden (ephemeral) message"""
def __init__(self, state, channel, data, application_id=None, token=None):
#region fix reference keyerror
if data.get("message_reference"):
if data["message_reference"].get("channel_id") is None:
data["message_reference"]["channel_id"] = str(channel.id)
#endregion
Message.__init__(self, state=state, channel=channel, data=data)
self._application_id = application_id
self._interaction_token = token
async def edit(self, *args, **fields):
r = BetterRoute("PATCH", f"/webhooks/{self._application_id}/{self._interaction_token}/messages/{self.id}")
self._update(await self._state.http.request(r, json=get_message_payload(*args, **fields)))
async def delete(self):
"""Override for delete function that will throw an exception"""
raise EphemeralDeletion()
class EphemeralResponseMessage(Message):
"""A ephemeral message which was created from an interaction
.. important::
Methods like `.edit()`, which change the original message, need a `token` paremeter passed in order to work
"""
def __init__(self, *, state, channel, data):
Message.__init__(self, state=state, channel=channel, data=data)
async def edit(self, token, *args, **fields):
"""Edits the message
Parameters
----------
token: :class:`str`
The token of the interaction with wich this ephemeral message was sent
fields: :class:`kwargs`
The fields to edit (ex. `content="...", embed=..., attachments=[...]`)
Example
.. code-block::
async def testing(ctx):
msg = await ctx.send("hello hidden world", components=[Button("test")])
btn = await msg.wait_for("button", client)
await btn.message.edit(ctx.token, content="edited", components=None)
"""
route = BetterRoute("PATCH", f"/webhooks/{self.interaction.application_id}/{token}/messages/{self.id}")
self._update(await self._state.http.request(route, json=get_message_payload(*args, **fields)))
async def delete(self):
"""Override for delete function that will throw an exception"""
raise EphemeralDeletion()
async def disable_components(self, token, disable = True, **fields):
"""Disables all component in the message
Parameters
----------
disable: :class:`bool`, optional
Whether to disable (``True``) or enable (``False``) all components; default True
"""
self.components.disable(disable=disable)
await self.edit(token, components=self.components, **fields) | PypiClean |
/tati-0.9.5-py3-none-any.whl/TATi/samplers/dynamics/hamiltonianmontecarlosamplersecondordersampler.py |
# This is heavily inspired by https://github.com/openai/iaf/blob/master/tf_utils/adamax.py
import tensorflow as tf
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from TATi.samplers.dynamics.hamiltonianmontecarlosamplerfirstordersampler import \
HamiltonianMonteCarloSamplerFirstOrderSampler
class HamiltonianMonteCarloSamplerSecondOrderSampler(HamiltonianMonteCarloSamplerFirstOrderSampler):
"""Implements a Hamiltonian Monte Carlo Sampler
in the form of a TensorFlow Optimizer, overriding tensorflow.python.training.Optimizer.
Args:
Returns:
"""
def __init__(self, calculate_accumulates,
covariance_blending, step_width, inverse_temperature,
loss, current_step, next_eval_step, hd_steps, accept_seed,
seed=None, use_locking=False, name='HamiltonianMonteCarlo_2ndOrder'):
"""Init function for this class.
Args:
calculate_accumulates: whether accumulates (gradient norm, noise, norm, kinetic energy, ...) are calculated
every step (extra work but required for run info dataframe/file and averages dataframe/file)
covariance_blending: covariance identity blending value eta to use in creating the preconditioning matrix
step_width: placeholder for step width for gradient
inverse_temperature: placeholder for scale for noise
loss: placeholder for loss value of the current state for evaluating acceptance
current_step: placeholder for current step
next_eval_step: placeholder for step number at which accept/reject is evaluated next
hd_steps: placeholder with number of hamilton dynamics steps
accept_seed: extra seed value for random numbers used for acceptance evaluation
seed: seed value of the random number generator for generating reproducible runs (Default value = None)
use_locking: whether to lock in the context of multi-threaded operations (Default value = False)
name: internal name of optimizer (Default value = 'HamiltonianMonteCarlo_2ndOrder')
Returns:
"""
super(HamiltonianMonteCarloSamplerSecondOrderSampler, self).__init__(
calculate_accumulates, covariance_blending, step_width, inverse_temperature,
loss, current_step, next_eval_step, accept_seed,
seed, use_locking, name)
self._hd_steps = hd_steps
def _prepare(self):
"""Converts step width into a tensor, if given as a floating-point
number.
Args:
Returns:
"""
super(HamiltonianMonteCarloSamplerSecondOrderSampler, self)._prepare()
self._hd_steps_t = ops.convert_to_tensor(self._hd_steps, name="hd_steps")
def _get_momentum_criterion_block(self, var,
scaled_gradient, scaled_noise,
current_step_t, next_eval_step_t, hd_steps_t):
momentum = self.get_slot(var, "momentum")
def momentum_id_block():
return tf.identity(momentum)
# update momentum
def momentum_step_block():
with tf.control_dependencies([
state_ops.assign_sub(momentum, scaled_gradient)]):
return tf.identity(momentum)
# L=5, step 0 was a criterion evaluation:
# 1 (BA), 2 (BBA), 3 (BBA), 4 (BBA), 5 (BBA), 6(B), 7 criterion
# in the very first step we have to skip the first "B" step:
# e.g., for L=5, we execute at steps 2,3,4,5,6, and skip at 1,7
momentum_first_step_block_t = tf.cond(
tf.logical_and(
tf.greater_equal(current_step_t, next_eval_step_t - (hd_steps_t)),
tf.less(current_step_t, next_eval_step_t)),
momentum_step_block, momentum_id_block)
def integrated_momentum():
return tf.identity(momentum_first_step_block_t)
def moment_reinit_block():
with tf.control_dependencies([momentum.assign(scaled_noise)]):
return tf.identity(momentum)
def momentum_criterion_block():
return tf.cond(
tf.equal(current_step_t, next_eval_step_t),
moment_reinit_block, momentum_step_block)
# skip second "B" step on the extra step (as both "BA" is skipped)
# before criterion evaluation
with tf.control_dependencies([momentum_first_step_block_t]):
momentum_second_step_block_t = tf.cond(
tf.equal(current_step_t, next_eval_step_t - 1),
momentum_id_block, momentum_criterion_block)
def redrawn_momentum():
return tf.identity(momentum_second_step_block_t)
# calculate kinetic energy and momentum after first "B" step or on redrawn momenta
momentum_kinetic_energy_t = tf.cond(
tf.equal(current_step_t, next_eval_step_t),
redrawn_momentum, integrated_momentum)
momentum_sq = tf.reduce_sum(tf.multiply(momentum_kinetic_energy_t,momentum_kinetic_energy_t))
momentum_global_t = self._add_momentum_contribution(momentum_sq)
inertia_global_t = self._add_inertia_contribution(momentum_kinetic_energy_t, var)
kinetic_energy_t = self._add_kinetic_energy_contribution(momentum_sq)
return momentum_second_step_block_t, momentum_global_t, inertia_global_t, kinetic_energy_t
def _create_criterion_integration_block(self, var,
virial_global_t,
scaled_momentum, current_energy,
p_accept, uniform_random_t,
current_step_t, next_eval_step_t):
initial_parameters = self.get_slot(var, "initial_parameters")
old_total_energy_t = self._get_old_total_energy()
accepted_t, rejected_t = self._get_accepted_rejected()
# see https://stackoverflow.com/questions/37063952/confused-by-the-behavior-of-tf-cond
# In other words, each branch inside a tf.cond is evaluated. All "side effects"
# need to be hidden away inside tf.control_dependencies.
# I.E. DONT PLACE INSIDE NODES (confusing indeed)
def accept_block():
with tf.control_dependencies([virial_global_t]):
with tf.control_dependencies([
old_total_energy_t.assign(current_energy),
initial_parameters.assign(var),
accepted_t.assign_add(1)]):
return tf.identity(old_total_energy_t)
# DONT use nodes in the control_dependencies, always functions!
def reject_block():
with tf.control_dependencies([virial_global_t]):
with tf.control_dependencies([
var.assign(initial_parameters),
rejected_t.assign_add(1)]):
return tf.identity(old_total_energy_t)
def accept_reject_block():
return tf.cond(tf.greater(p_accept, uniform_random_t),
accept_block, reject_block)
# DONT use nodes in the control_dependencies, always functions!
def step_block():
with tf.control_dependencies([virial_global_t]):
with tf.control_dependencies([state_ops.assign_add(var, scaled_momentum)]):
return tf.identity(old_total_energy_t)
def id_block():
with tf.control_dependencies([virial_global_t]):
return tf.identity(old_total_energy_t)
# skip "A" step in extra step before criterion evaluation
def step_or_id_block():
return tf.cond(
tf.equal(current_step_t, next_eval_step_t - 1),
id_block, step_block)
# make sure virial and gradients are evaluated before we update variables
criterion_block_t = tf.cond(
tf.equal(current_step_t, next_eval_step_t),
accept_reject_block, step_or_id_block)
return criterion_block_t
def _apply_dense(self, grads_and_vars, var):
"""Adds nodes to TensorFlow's computational graph in the case of densely
occupied tensors to perform the actual sampling.
We perform a number of Leapfrog steps on a hamiltonian (loss+kinetic energy)
and at step number next_eval_step we check the acceptance criterion,
either resetting back to the initial parameters or resetting the
initial parameters to the current ones.
NOTE:
Due to Tensorflow enforcing loss and gradient evaluation at
the begin of the sampling step, we need to cyclically permute the
BAB steps to become BBA, i.e. the last "B" step is delayed till the
next step. This means that we need to skip the additional "B" step
in the very first time integration step and we need an additional
step to compute the delayed "B" for the last time integration and
subsequently to compute the kinetic energy before the criterion
evaluation.
Effectively, we compute L+2 steps if L is the number of Hamiltonian
dynamics steps.
Args:
grads_and_vars: gradient nodes over all walkers and all variables
var: parameters of the neural network
Returns:
a group of operations to be added to the graph
"""
_, grad = self._pick_grad(grads_and_vars, var)
step_width_t, inverse_temperature_t, current_step_t, next_eval_step_t, random_noise_t, uniform_random_t = \
self._prepare_dense(grad, var)
hd_steps_t = math_ops.cast(self._hd_steps_t, tf.int64)
# 1/2 * \nabla V (q^n ) \Delta t
scaled_gradient = .5 * step_width_t * grad
gradient_global_t = self._add_gradient_contribution(scaled_gradient)
virial_global_t = self._add_virial_contribution(grad, var)
# update momentum: B, BB or redraw momenta
scaled_noise = tf.sqrt(1./inverse_temperature_t)*random_noise_t
momentum_criterion_block_t, momentum_global_t, inertia_global_t, kinetic_energy_t = \
self._get_momentum_criterion_block(var,
scaled_gradient, scaled_noise,
current_step_t, next_eval_step_t, hd_steps_t)
current_energy = self._get_current_total_energy()
# prior force act directly on var
#ub_repell, lb_repell = self._apply_prior(var)
#prior_force = step_width_t * (ub_repell + lb_repell)
#scaled_momentum = step_width_t * momentum_criterion_block_t - prior_force
# update variables: A, skip or evaluate criterion (accept/reject)
scaled_momentum = step_width_t * momentum_criterion_block_t
p_accept = self._create_p_accept(inverse_temperature_t, current_energy)
criterion_block_t = self._create_criterion_integration_block(var,
virial_global_t, scaled_momentum, current_energy,
p_accept, uniform_random_t,
current_step_t, next_eval_step_t
)
# note: these are evaluated in any order, use control_dependencies if required
return control_flow_ops.group(*([momentum_criterion_block_t, criterion_block_t,
virial_global_t, inertia_global_t, gradient_global_t,
momentum_global_t, kinetic_energy_t]))
def _apply_sparse(self, grad, var):
"""Adds nodes to TensorFlow's computational graph in the case of sparsely
occupied tensors to perform the actual sampling.
Note that this is not implemented so far.
Args:
grad: gradient nodes, i.e. they contain the gradient per parameter in `var`
var: parameters of the neural network
Returns:
a group of operations to be added to the graph
"""
raise NotImplementedError("Sparse gradient updates are not supported.") | PypiClean |
/pysnmp-bw-5.0.3.tar.gz/pysnmp-bw-5.0.3/docs/source/examples/hlapi/v1arch/asyncore/manager/cmdgen/modifying-variables.rst |
.. toctree::
:maxdepth: 2
Modifying variables
-------------------
.. include:: /../../examples/hlapi/v1arch/asyncore/sync/manager/cmdgen/coerce-set-value-to-mib-spec.py
:start-after: """
:end-before: """#
.. literalinclude:: /../../examples/hlapi/v1arch/asyncore/sync/manager/cmdgen/coerce-set-value-to-mib-spec.py
:start-after: """#
:language: python
:download:`Download</../../examples/hlapi/v1arch/asyncore/sync/manager/cmdgen/coerce-set-value-to-mib-spec.py>` script.
.. include:: /../../examples/hlapi/v1arch/asyncore/sync/manager/cmdgen/set-multiple-scalar-values.py
:start-after: """
:end-before: """#
.. literalinclude:: /../../examples/hlapi/v1arch/asyncore/sync/manager/cmdgen/set-multiple-scalar-values.py
:start-after: """#
:language: python
:download:`Download</../../examples/hlapi/v1arch/asyncore/sync/manager/cmdgen/set-multiple-scalar-values.py>` script.
See also: :doc:`library-reference </docs/api-reference>`.
| PypiClean |
/ResourceReservation-1.0.4-src.tar.gz/ResourceReservation-1.0.4/resreservation/README.txt | Resource Reservation plugin for Trac
Copyright 2010 Roberto Longobardi
Project web page on TracHacks: http://trac-hacks.org/wiki/ResourceReservationPlugin
Project web page on SourceForge.net: http://sourceforge.net/projects/resreserv4trac/
Project web page on Pypi: http://pypi.python.org/pypi/ResourceReservation
A Trac plugin and macro to allow for visually planning and reserving the use of resources in your environment, e.g. test machines, consumable test data, etc..., with just one click.
=================================================================================================
Change History:
Release 1.0.4 (2011-04-30):
o Fixed bug #8746 Unicode trouble.
Actually this bug could have also been named "It simpy doesn't work".
I mistakenly removed a single token from the database creation code, just ruining it.
Thanks Thorsen for reporting it.
o Fixed bug #8464 Project environment upgrade fails with database error
Release 1.0.3 (2011-04-14):
o Implemented security permissions RES_RESERVE_VIEW and RES_RESERVE_MODIFY
o Added date and time tooltip on each table cell, to easy the reading of large time sheets
Release 1.0.1 (2010-08-12):
o Fixed bug #7480 Does not work on IE
o Begin externalizing strings into catalog
o Fixed some problems with concurrent reservation of same resource in same day by different users
Release 1.0 (2010-08-10):
o First release publicly available
| PypiClean |
/sdss-opscore-3.0.4.tar.gz/sdss-opscore-3.0.4/python/opscore/RO/Astro/Cnv/ICRSFromFK4.py | import numpy
__all__ = ["icrsFromFK4"]
import opscore.RO.PhysConst
import opscore.RO.MathUtil
from opscore.RO.Astro import llv
# Constants
_MatPP = numpy.array((
(+0.999925678186902E+00, -0.111820596422470E-01, -0.485794655896000E-02),
(+0.111820595717660E-01, +0.999937478448132E+00, -0.271764411850000E-04),
(+0.485794672118600E-02, -0.271474264980000E-04, +0.999988199738770E+00),
))
_MatPV = numpy.array ((
(+0.499975613405255E+02, -0.559114316616731E+00, -0.242908945412769E+00),
(+0.559114316616731E+00, +0.499981514022567E+02, -0.135874878467212E-02),
(+0.242908966039250E+00, -0.135755244879589E-02, +0.500006874693025E+02),
))
_MatVP = numpy.array((
(-0.262600477903207E-10, -0.115370204968080E-07, +0.211489087156010E-07),
(+0.115345713338304E-07, -0.128997445928004E-09, -0.413922822287973E-09),
(-0.211432713109975E-07, +0.594337564639027E-09, +0.102737391643701E-09),
))
_MatVV = numpy.array ((
(+0.999947035154614E+00, -0.111825061218050E-01, -0.485766968495900E-02),
(+0.111825060072420E-01, +0.999958833818833E+00, -0.271844713710000E-04),
(+0.485766994865000E-02, -0.271373095390000E-04, +0.100000956036356E+01),
))
def icrsFromFK4 (fk4P, fk4V, fk4Epoch):
"""
Converts mean catalog FK4 equatorial coordinates to ICRS coordinates.
Uses the approximation that ICRS is FK5 J2000.
Inputs:
- fk4Epoch TDB date of fk4 coordinates (Besselian epoch)
note: TDT will always do and UTC is usually adequate
- fk4P(3) mean catalog fk4 cartesian position (au)
- fk4V(3) mean FK4 cartesian velocity (au per Besselian year),
i.e. proper motion and radial velocity
Returns a tuple containg:
- icrsP(3) mean ICRS cartesian position (au), a numpy.array
- icrsV(3) mean ICRS cartesian velocity (au/year), a numpy.array
Error Conditions:
none
Warnings:
The FK4 date is in Besselian years.
The FK4 proper motion is in au/Besselian year,
whereas the FK5 J2000 proper motion is in au/Julian year.
The FK4 system refers to a specific set of precession constants;
not all Besselian-epoch data was precessed using these constants
(especially data for epochs before B1950).
References:
P.T. Wallace's routine FK425
"""
fk4P = numpy.asarray(fk4P, dtype = float)
fk4V = numpy.asarray(fk4V, dtype = float)
# compute new precession constants
# note: ETrms and PreBn both want Besselian date
eTerms = llv.etrms (fk4Epoch)
precMat = llv.prebn (fk4Epoch, 1950.0)
# subtract e-terms from position. As a minor approximation,
# we don't bother to subtract variation in e-terms from proper motion.
magP = opscore.RO.MathUtil.vecMag(fk4P)
meanFK4P = fk4P - (eTerms * magP)
# correct position for velocity (PM and rad. vel.) to B1950
tempP = meanFK4P + fk4V * (1950.0 - fk4Epoch)
# precess position and velocity to B1950
b1950P = numpy.dot(precMat, tempP)
b1950V = numpy.dot(precMat, fk4V)
# convert position and velocity to ICRS (actually FK5 J2000.0)
icrsP = numpy.dot(_MatPP, b1950P) + numpy.dot(_MatPV, b1950V)
icrsV = numpy.dot(_MatVP, b1950P) + numpy.dot(_MatVV, b1950V)
return (icrsP, icrsV)
if __name__ == "__main__":
import opscore.RO.SeqUtil
print("testing icrsFromFK4")
# test data is formatted as follows:
# a list of entries, each consisting of:
# - the input argument
# - the expected result
testData = (
(((1000000, 2000000, 3000000), (40, 50, 60), 1900),
(
( 929683.244963302 , 2026616.27886940 , 3015395.98838120 ),
( 38.3286807625452 , 50.8858334065567 , 60.3627612257013 ),
)),
(((1000000, 0, 0), (40, 0, 0), 1900),
(
( 1003703.41007840 , 22442.8991233262 , 9755.09375276802 ),
( 39.9889184862787 , 0.905706818443208 , 0.367459579186115 ),
)),
(((0, 2000000, 0), (0, 50, 0), 1900),
(
( -44814.8232632364 , 2004499.74395964 , -217.381652380232 ),
( -1.14079387796531 , 49.9880577020497 , -3.764515028559184E-003),
)),
(((0, 0, 30000000), (0, 0, 60), 1900),
(
( -291492.058777010 , -3250.24235322943 , 30004587.8907069 ),
( 5.158182703394798E-002, -2.061946781951834E-002, 60.0046123695534 ),
)),
(((-1000000, -2000000, -3000000), (-40, -50, 60), 1950),
(
( -964968.174481507 , -2013496.57858614 , -3001777.31085417 ),
( -39.7705627092815 , -50.4569077377000 , 59.8272699455138 ),
)),
(((1000000, -2000000, -3000000), (-40, 50, 60), 2000),
(
( 1000018.21231382 , -1999992.74401960 , -2999999.24417909 ),
( -40.0415521563411 , 50.0134577025947 , 59.9793839067662 ),
)),
(((1000000, -2000000, 3000000), (40, 50, 60), 2050),
(
( 990112.301515263 , -2013606.16505919 , 2992172.70812960 ),
( 40.9344740739882 , 49.5597035007198 , 59.7833122391920 ),
)),
)
for testInput, expectedOutput in testData:
actualOutput = icrsFromFK4(*testInput)
expectedFlat = opscore.RO.SeqUtil.flatten(expectedOutput)
actualFlat = opscore.RO.SeqUtil.flatten(actualOutput)
if opscore.RO.SeqUtil.matchSequences(actualFlat, expectedFlat, rtol=1.0e-14):
print("failed on input:", testInput)
print("expected output:\n", expectedOutput)
print("actual output:\n", actualOutput) | PypiClean |
/sents_client_chat-1.0-py3-none-any.whl/client/Client/add_contact.py | import sys
import logging
sys.path.append('../')
from PyQt5.QtWidgets import QDialog, QLabel, QComboBox, QPushButton
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QStandardItemModel, QStandardItem
from client_db import ClientStorage
from PyQt5.QtWidgets import QMainWindow, qApp, QMessageBox, QApplication, QListView
# Диалог выбора контакта для добавления
class AddContactDialog(QDialog):
"""
Диалог добавления пользователя в список контактов.
Предлагает пользователю список возможных контактов и
добавляет выбранный в контакты.
"""
def __init__(self, database, transport):
super().__init__()
self.transport = transport
self.database = database
self.setFixedSize(350, 120)
self.setWindowTitle('Выберите контакт для добавления:')
self.setAttribute(Qt.WA_DeleteOnClose)
self.setModal(True)
self.selector_label = QLabel('Выберите контакт для добавления:', self)
self.selector_label.setFixedSize(200, 20)
self.selector_label.move(10, 0)
self.selector = QComboBox(self)
self.selector.setFixedSize(200, 20)
self.selector.move(10, 30)
self.btn_refresh = QPushButton('Обновить список', self)
self.btn_refresh.setFixedSize(100, 30)
self.btn_refresh.move(60, 60)
self.btn_ok = QPushButton('Добавить', self)
self.btn_ok.setFixedSize(100, 30)
self.btn_ok.move(230, 20)
self.btn_cancel = QPushButton('Отмена', self)
self.btn_cancel.setFixedSize(100, 30)
self.btn_cancel.move(230, 60)
self.btn_cancel.clicked.connect(self.close)
# Заполняем список возможных контактов
self.possible_contacts_update()
# Назначаем действие на кнопку обновить
self.btn_refresh.clicked.connect(self.update_possible_contacts)
# Заполняем список возможных контактов разницей между всеми пользователями и
def possible_contacts_update(self):
"""
Метод заполнения списка возможных контактов.
Создаёт список всех зарегистрированных пользователей
за исключением уже добавленных в контакты и самого себя.
:return:
"""
self.selector.clear()
# множества всех контактов и контактов клиента
contacts_list = set(self.database.get_contacts())
users_list = set(self.database.get_users())
# Удалим сами себя из списка пользователей, чтобы нельзя было добавить самого себя
users_list.remove(self.transport.username)
#users_list.remove('sent')
# Добавляем список возможных контактов
self.selector.addItems(users_list - contacts_list)
# Обновлялка возможных контактов. Обновляет таблицу известных пользователей,
# затем содержимое предполагаемых контактов
def update_possible_contacts(self):
"""
Метод обновления списка возможных контактов. Запрашивает с сервера
список известных пользователей и обносляет содержимое окна.
:return:
"""
try:
self.transport.user_list_request()
except OSError:
pass
else:
self.possible_contacts_update()
if __name__ == '__main__':
app = QApplication([])
database = ClientStorage('sent')
window = AddContactDialog(database, None)
window.show()
app.exec_() | PypiClean |
/numtostr_rus-1.0.1.tar.gz/numtostr_rus-1.0.1/numtostr_rus/mult.py | from itertools import chain, repeat
from typing import MutableSequence, Tuple, Iterable, Sequence, Iterator
from numtostr_rus import db
# Currently all powers of multipliers for both long and short scales are
# multiples of 3. But let's not rely on this fact and implement more general
# logic.
class AnchorMult:
def __init__(self, mults: Tuple[db.MultData, ...]):
self.mults = mults
self._pow = sum(mult.pow for mult in mults)
__slots__ = 'mults', '_pow'
@property
def pow(self):
return self._pow
def __str__(self):
return f"({self._pow}={'+'.join(str(mult.pow) for mult in self.mults)})"
__repr__ = __str__
ZERO_ANCHOR_MULT = AnchorMult((db.BASIC_MULTS_DATA[0],))
SS_ANCHOR_MULTS = []
LS_ANCHOR_MULTS = []
def _fill_anchor_mults(mults_data: Iterable[db.MultData], anchor_mults: MutableSequence[AnchorMult]):
mults_data_it = iter(mults_data)
# Consume zero power mult.
next(mults_data_it)
# Consume a thousand.
prev_mult_data = next(mults_data_it)
anchor_mults.append(AnchorMult(
(prev_mult_data,)
))
# Process all other multipliers.
for mult_data in mults_data_it:
for i, anchor_mult in enumerate(anchor_mults):
if prev_mult_data.pow + anchor_mult.pow >= mult_data.pow:
break
anchor_mults.append(AnchorMult(
(*anchor_mult.mults, prev_mult_data)
))
anchor_mults.append(AnchorMult(
(mult_data,)
))
prev_mult_data = mult_data
_fill_anchor_mults(db.SS_MULTS_DATA, SS_ANCHOR_MULTS)
_fill_anchor_mults(db.LS_MULTS_DATA, LS_ANCHOR_MULTS)
def get_mults(
anchor_mults: Sequence[AnchorMult],
step_q: int,
step_r: int
) -> Iterator[db.MultData]:
"""Get mults of `step_q * len(anchor_mults) + step_r`-th anchor."""
assert 0 <= step_r < len(anchor_mults)
# Special case.
# It will yield empty string, but just for uniformity...
if not step_q and not step_r:
return iter(ZERO_ANCHOR_MULT.mults)
last_mult = anchor_mults[-1].mults[0]
repeat_mults = repeat(last_mult, step_q)
if not step_r:
return repeat_mults
anchor_mult = anchor_mults[step_r - 1]
return chain(anchor_mult.mults, repeat_mults)
def main():
for anchor_mult in SS_ANCHOR_MULTS:
print(anchor_mult)
if __name__ == "__main__":
main() | PypiClean |
/megadetector-5.0.0.tar.gz/megadetector-5.0.0/archive/classification_marcel/tf-slim/nets/inception_v1.py | """Contains the definition for inception v1 classification network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nets import inception_utils
slim = tf.contrib.slim
trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev)
def inception_v1_base(inputs,
final_endpoint='Mixed_5c',
scope='InceptionV1'):
"""Defines the Inception V1 base architecture.
This architecture is defined in:
Going deeper with convolutions
Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed,
Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, Andrew Rabinovich.
http://arxiv.org/pdf/1409.4842v1.pdf.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
final_endpoint: specifies the endpoint to construct the network up to. It
can be one of ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',
'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c',
'MaxPool_4a_3x3', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d', 'Mixed_4e',
'Mixed_4f', 'MaxPool_5a_2x2', 'Mixed_5b', 'Mixed_5c']
scope: Optional variable_scope.
Returns:
A dictionary from components of the network to the corresponding activation.
Raises:
ValueError: if final_endpoint is not set to one of the predefined values.
"""
end_points = {}
with tf.variable_scope(scope, 'InceptionV1', [inputs]):
with slim.arg_scope(
[slim.conv2d, slim.fully_connected],
weights_initializer=trunc_normal(0.01)):
with slim.arg_scope([slim.conv2d, slim.max_pool2d],
stride=1, padding='SAME'):
end_point = 'Conv2d_1a_7x7'
net = slim.conv2d(inputs, 64, [7, 7], stride=2, scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'MaxPool_2a_3x3'
net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Conv2d_2b_1x1'
net = slim.conv2d(net, 64, [1, 1], scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Conv2d_2c_3x3'
net = slim.conv2d(net, 192, [3, 3], scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'MaxPool_3a_3x3'
net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_3b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 96, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 128, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 16, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 32, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 32, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_3c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 192, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'MaxPool_4a_3x3'
net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 96, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 208, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 16, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 48, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 112, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 224, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 24, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 64, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4d'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 256, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 24, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 64, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4e'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 112, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 144, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 288, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 64, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4f'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 256, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 320, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 128, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'MaxPool_5a_2x2'
net = slim.max_pool2d(net, [2, 2], stride=2, scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_5b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 256, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 320, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 128, [3, 3], scope='Conv2d_0a_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_5c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 384, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 384, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 48, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 128, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
raise ValueError('Unknown final endpoint %s' % final_endpoint)
def inception_v1(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.8,
prediction_fn=slim.softmax,
spatial_squeeze=True,
reuse=None,
scope='InceptionV1',
global_pool=False):
"""Defines the Inception V1 architecture.
This architecture is defined in:
Going deeper with convolutions
Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed,
Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, Andrew Rabinovich.
http://arxiv.org/pdf/1409.4842v1.pdf.
The default image size used to train this network is 224x224.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes. If 0 or None, the logits layer
is omitted and the input features to the logits layer (before dropout)
are returned instead.
is_training: whether is training or not.
dropout_keep_prob: the percentage of activation values that are retained.
prediction_fn: a function to get predictions out of logits.
spatial_squeeze: if True, logits is of shape [B, C], if false logits is of
shape [B, 1, 1, C], where B is batch_size and C is number of classes.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
global_pool: Optional boolean flag to control the avgpooling before the
logits layer. If false or unset, pooling is done with a fixed window
that reduces default-sized inputs to 1x1, while larger inputs lead to
larger outputs. If true, any input size is pooled down to 1x1.
Returns:
net: a Tensor with the logits (pre-softmax activations) if num_classes
is a non-zero integer, or the non-dropped-out input to the logits layer
if num_classes is 0 or None.
end_points: a dictionary from components of the network to the corresponding
activation.
"""
# Final pooling and prediction
with tf.variable_scope(scope, 'InceptionV1', [inputs], reuse=reuse) as scope:
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
net, end_points = inception_v1_base(inputs, scope=scope)
with tf.variable_scope('Logits'):
if global_pool:
# Global average pooling.
net = tf.reduce_mean(net, [1, 2], keep_dims=True, name='global_pool')
end_points['global_pool'] = net
else:
# Pooling with a fixed kernel size.
net = slim.avg_pool2d(net, [7, 7], stride=1, scope='AvgPool_0a_7x7')
end_points['AvgPool_0a_7x7'] = net
if not num_classes:
return net, end_points
net = slim.dropout(net, dropout_keep_prob, scope='Dropout_0b')
logits = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,
normalizer_fn=None, scope='Conv2d_0c_1x1')
if spatial_squeeze:
logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze')
end_points['Logits'] = logits
end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
return logits, end_points
inception_v1.default_image_size = 224
inception_v1_arg_scope = inception_utils.inception_arg_scope | PypiClean |
/bigdl_orca_spark321-2.1.0b202207291-py3-none-macosx_10_11_x86_64.whl/bigdl/orca/data/utils.py | import os
import numpy as np
from bigdl.dllib.utils.file_utils import get_file_list
from bigdl.dllib.utils.utils import convert_row_to_numpy
from bigdl.dllib.utils.log4Error import *
def list_s3_file(file_path, env):
path_parts = file_path.split('/')
bucket = path_parts.pop(0)
key = "/".join(path_parts)
access_key_id = env["AWS_ACCESS_KEY_ID"]
secret_access_key = env["AWS_SECRET_ACCESS_KEY"]
import boto3
s3_client = boto3.Session(
aws_access_key_id=access_key_id,
aws_secret_access_key=secret_access_key,
).client('s3', verify=False)
# file
if os.path.splitext(file_path)[1] != '':
return ["s3://" + file_path]
else:
keys = []
resp = s3_client.list_objects_v2(Bucket=bucket,
Prefix=key)
for obj in resp['Contents']:
keys.append(obj['Key'])
file_paths = [os.path.join("s3://" + bucket, file) for file in keys]
return file_paths
def extract_one_path(file_path, env):
file_url_splits = file_path.split("://")
prefix = file_url_splits[0]
if prefix == "s3":
file_paths = list_s3_file(file_url_splits[1], env)
elif prefix == "hdfs":
import pyarrow as pa
fs = pa.hdfs.connect()
if fs.isfile(file_path):
file_paths = [file_path]
else:
file_paths = get_file_list(file_path)
else: # Local file path; could be a relative path.
from os.path import isfile, abspath, join
if isfile(file_path):
file_paths = [abspath(file_path)]
else:
# An error would be already raised here if the path is invalid.
file_paths = [abspath(join(file_path, file)) for file in os.listdir(file_path)]
return file_paths
def check_type_and_convert(data, allow_tuple=True, allow_list=True):
"""
:param allow_tuple: boolean, if the model accepts a tuple as input. Default: True
:param allow_list: boolean, if the model accepts a list as input. Default: True
:return:
"""
def check_and_convert(convert_data):
if isinstance(convert_data, np.ndarray):
return [convert_data]
elif isinstance(convert_data, tuple) and \
all([isinstance(d, np.ndarray) for d in convert_data]):
return _convert_list_tuple(convert_data, allow_tuple=allow_tuple,
allow_list=allow_list)
elif isinstance(convert_data, list) and \
all([isinstance(d, np.ndarray) for d in convert_data]):
return _convert_list_tuple(convert_data, allow_tuple=allow_tuple,
allow_list=allow_list)
else:
invalidInputError(False,
"value of x and y should be a ndarray, "
"a tuple of ndarrays or a list of ndarrays")
result = {}
invalidInputError(isinstance(data, dict), "each shard should be an dict")
invalidInputError("x" in data, "key x should in each shard")
x = data["x"]
result["x"] = check_and_convert(x)
if "y" in data:
y = data["y"]
result["y"] = check_and_convert(y)
return result
def get_spec(allow_tuple=True, allow_list=True):
"""
:param allow_tuple: boolean, if the model accepts a tuple as input. Default: True
:param allow_list: boolean, if the model accepts a list as input. Default: True
:return:
"""
def _get_spec(data):
data = check_type_and_convert(data, allow_tuple, allow_list)
feature_spec = [(feat.dtype, feat.shape[1:])
for feat in data["x"]]
if "y" in data:
label_spec = [(label.dtype, label.shape[1:])
for label in data["y"]]
else:
label_spec = None
return feature_spec, label_spec
return _get_spec
# todo this might be very slow
def flatten_xy(allow_tuple=True, allow_list=True):
"""
:param allow_tuple: boolean, if the model accepts a tuple as input. Default: True
:param allow_list: boolean, if the model accepts a list as input. Default: True
:return:
"""
def _flatten_xy(data):
data = check_type_and_convert(data, allow_tuple, allow_list)
features = data["x"]
has_label = "y" in data
labels = data["y"] if has_label else None
length = features[0].shape[0]
for i in range(length):
fs = [feat[i] for feat in features]
if has_label:
ls = [l[i] for l in labels]
yield (fs, ls)
else:
yield (fs,)
return _flatten_xy
def combine(data_list):
item = data_list[0]
if isinstance(item, dict):
res = {}
for k, v in item.items():
res[k] = np.concatenate([data[k] for data in data_list], axis=0)
elif isinstance(item, list) or isinstance(item, tuple):
res = []
for i in range(len(data_list[0])):
res.append(np.concatenate([data[i] for data in data_list], axis=0))
if isinstance(item, tuple):
res = tuple(res)
elif isinstance(data_list[0], np.ndarray):
res = np.concatenate(data_list, axis=0)
else:
invalidInputError(False,
"value of x and y should be an ndarray, a dict of ndarrays, a tuple"
" of ndarrays or a list of ndarrays, please check your input")
return res
def ray_partition_get_data_label(partition_data,
allow_tuple=True,
allow_list=True,
has_label=True):
"""
:param partition_data: The data partition from Spark RDD, which should be a list of records.
:param allow_tuple: Boolean. Whether the model accepts a tuple as input. Default is True.
:param allow_list: Boolean. Whether the model accepts a list as input. Default is True.
:param has_label: Boolean. Whether the data partition contains labels.
:return: Concatenated data for the data partition.
"""
data_list = [data['x'] for data in partition_data]
label_list = [data['y'] for data in partition_data]
data = _convert_list_tuple(combine(data_list),
allow_tuple=allow_tuple, allow_list=allow_list)
if has_label:
label = _convert_list_tuple(combine(label_list),
allow_tuple=allow_tuple, allow_list=allow_list)
else:
label = None
return data, label
def ray_partitions_get_data_label(partition_list,
allow_tuple=True,
allow_list=True,
has_label=True):
partition_data = [item for partition in partition_list for item in partition]
data, label = ray_partition_get_data_label(partition_data,
allow_tuple=allow_tuple,
allow_list=allow_list,
has_label=has_label)
return data, label
def ray_partitions_get_tf_dataset(partition_list, has_label=True):
import tensorflow as tf
partition_data = [item for partition in partition_list for item in partition]
if len(partition_data) != 0:
sample = partition_data[0]
keys = sample.keys()
if "x" in keys:
if has_label:
invalidInputError("y" in keys, "key y should in each shard if has_label=True")
data, label = ray_partition_get_data_label(partition_data,
allow_tuple=True,
allow_list=False)
dataset = tf.data.Dataset.from_tensor_slices((data, label))
elif "ds_def" in keys and "elem_spec" in keys:
from tensorflow.python.distribute.coordinator.values import \
deserialize_dataset_from_graph
from functools import reduce
dataset_list = [deserialize_dataset_from_graph(serialized_dataset["ds_def"],
serialized_dataset["elem_spec"])
for serialized_dataset in partition_data]
dataset = reduce(lambda x, y: x.concatenate(y), dataset_list)
else:
invalidInputError(False,
"value of x and y should be a ndarray, "
"a tuple of ndarrays or a list of ndarrays")
else:
# TODO: may cause error
dataset = tf.data.Dataset.from_tensor_slices(([], []))
return dataset
# todo: this might be very slow
def xshard_to_sample(data):
from bigdl.dllib.utils.file_utils import Sample
data = check_type_and_convert(data, allow_list=True, allow_tuple=False)
features = data["x"]
length = features[0].shape[0]
if "y" in data:
labels = data["y"]
else:
labels = np.array([[-1] * length])
for i in range(length):
fs = [feat[i] for feat in features]
ls = [l[i] for l in labels]
if len(fs) == 1:
fs = fs[0]
if len(ls) == 1:
ls = ls[0]
yield Sample.from_ndarray(fs, ls)
def row_to_sample(row, schema, feature_cols, label_cols):
from bigdl.dllib.utils.common import Sample
if label_cols:
feature, label = convert_row_to_numpy(row, schema, feature_cols, label_cols)
sample = Sample.from_ndarray(feature, label)
else:
feature, = convert_row_to_numpy(row, schema, feature_cols, label_cols)
sample = Sample.from_ndarray(feature, np.array([0.0]))
return sample
def read_pd_hdfs_file_list(iterator, file_type, **kwargs):
import pyarrow as pa
fs = pa.hdfs.connect()
dfs = []
for x in iterator:
with fs.open(x, 'rb') as f:
df = read_pd_file(f, file_type, **kwargs)
dfs.append(df)
import pandas as pd
return [pd.concat(dfs)]
def read_pd_s3_file_list(iterator, file_type, **kwargs):
access_key_id = os.environ["AWS_ACCESS_KEY_ID"]
secret_access_key = os.environ["AWS_SECRET_ACCESS_KEY"]
import boto3
s3_client = boto3.Session(
aws_access_key_id=access_key_id,
aws_secret_access_key=secret_access_key,
).client('s3', verify=False)
dfs = []
for x in iterator:
path_parts = x.split("://")[1].split('/')
bucket = path_parts.pop(0)
key = "/".join(path_parts)
obj = s3_client.get_object(Bucket=bucket, Key=key)
df = read_pd_file(obj['Body'], file_type, **kwargs)
dfs.append(df)
import pandas as pd
return [pd.concat(dfs)]
def read_pd_file(path, file_type, **kwargs):
import pandas as pd
if file_type == "csv":
df = pd.read_csv(path, **kwargs)
elif file_type == "json":
df = pd.read_json(path, **kwargs)
else:
invalidInputError(False,
"Unsupported file type: %s. Only csv and json files are "
"supported for now" % file_type)
return df
def get_class_name(obj):
if obj.__class__.__module__ != 'builtins':
return '.'.join([obj.__class__.__module__, obj.__class__.__name__])
return obj.__class__.__name__
def _convert_list_tuple(data, allow_tuple, allow_list):
if isinstance(data, list):
if not allow_list and allow_tuple:
return tuple(data)
else:
if not allow_tuple and allow_list:
return list(data)
return data
def process_spark_xshards(spark_xshards, num_workers):
from bigdl.orca.data.ray_xshards import RayXShards
data = spark_xshards
ray_xshards = RayXShards.from_spark_xshards(data)
return ray_xshards
def index_data(x, i):
if isinstance(x, np.ndarray):
return x[i]
elif isinstance(x, dict):
res = {}
for k, v in x.items():
res[k] = v[i]
return res
elif isinstance(x, tuple):
return tuple(item[i] for item in x)
elif isinstance(x, list):
return [item[i] for item in x]
else:
invalidInputError(False,
"data should be an ndarray, a dict of ndarrays, a tuple of ndarrays"
" or a list of ndarrays, please check your input")
def get_size(x):
if isinstance(x, np.ndarray):
return len(x)
elif isinstance(x, dict):
for k, v in x.items():
return len(v)
elif isinstance(x, tuple) or isinstance(x, list):
return len(x[0])
else:
invalidInputError(False,
"data should be an ndarray, a dict of ndarrays, a tuple of ndarrays"
" or a list of ndarrays, please check your input")
def spark_df_to_rdd_pd(df, squeeze=False, index_col=None,
dtype=None, index_map=None):
from bigdl.orca.data import SparkXShards
from bigdl.orca import OrcaContext
columns = df.columns
import pyspark.sql.functions as F
import pyspark.sql.types as T
to_array = F.udf(lambda v: v.toArray().tolist(), T.ArrayType(T.FloatType()))
for colName, colType in df.dtypes:
if colType == 'vector':
df = df.withColumn(colName, to_array(colName))
shard_size = OrcaContext._shard_size
pd_rdd = df.rdd.mapPartitions(to_pandas(df.columns, squeeze, index_col, dtype, index_map,
batch_size=shard_size))
return pd_rdd
def spark_df_to_pd_sparkxshards(df, squeeze=False, index_col=None,
dtype=None, index_map=None):
pd_rdd = spark_df_to_rdd_pd(df, squeeze, index_col, dtype, index_map)
from bigdl.orca.data import SparkXShards
spark_xshards = SparkXShards(pd_rdd)
return spark_xshards
def to_pandas(columns, squeeze=False, index_col=None, dtype=None, index_map=None,
batch_size=None):
def postprocess(pd_df):
if dtype is not None:
if isinstance(dtype, dict):
for col, type in dtype.items():
if isinstance(col, str):
if col not in pd_df.columns:
invalidInputError(False,
"column to be set type is not"
" in current dataframe")
pd_df[col] = pd_df[col].astype(type)
elif isinstance(col, int):
if index_map[col] not in pd_df.columns:
invalidInputError(False,
"column index to be set type is not"
" in current dataframe")
pd_df[index_map[col]] = pd_df[index_map[col]].astype(type)
else:
pd_df = pd_df.astype(dtype)
if squeeze and len(pd_df.columns) == 1:
pd_df = pd_df.iloc[:, 0]
if index_col:
pd_df = pd_df.set_index(index_col)
return pd_df
def f(iter):
import pandas as pd
counter = 0
data = []
for row in iter:
counter += 1
data.append(row)
if batch_size and counter % batch_size == 0:
pd_df = pd.DataFrame(data, columns=columns)
pd_df = postprocess(pd_df)
yield pd_df
data = []
if data:
pd_df = pd.DataFrame(data, columns=columns)
pd_df = postprocess(pd_df)
yield pd_df
return f
def spark_xshards_to_ray_dataset(spark_xshards):
from bigdl.orca.data.ray_xshards import RayXShards
import ray
ray_xshards = RayXShards.from_spark_xshards(spark_xshards)
partition_refs = ray_xshards.get_refs()
ray_dataset = ray.data.from_pandas_refs(partition_refs)
return ray_dataset
def generate_string_idx(df, columns, freq_limit, order_by_freq):
from bigdl.dllib.utils.file_utils import callZooFunc
return callZooFunc("float", "generateStringIdx", df, columns, freq_limit, order_by_freq)
def check_col_exists(df, columns):
df_cols = df.columns
col_not_exist = list(filter(lambda x: x not in df_cols, columns))
if len(col_not_exist) > 0:
invalidInputError(False,
str(col_not_exist) + " do not exist in this Table")
def transform_to_shard_dict(data, featureCols, labelCol):
def to_shard_dict(df):
featureLists = [df[feature_col].to_numpy() for feature_col in featureCols]
result = {
"x": np.stack(featureLists, axis=1),
"y": df[labelCol].to_numpy().reshape((-1, 1))}
return result
data = data.transform_shard(to_shard_dict)
return data | PypiClean |
/organize-media-files-1.0.1.tar.gz/organize-media-files-1.0.1/README.rst | What is OMF?
============
Organize Media Files (OMF) is a command-line utility, which helps user to dispatch unsorted media files according meta data tags and configurable rules. OMF is using `Mutagen <https://mutagen.readthedocs.io>`_ to handle audio files. Later more media files support would be added.
Installation
============
Using \ *pip install*\ \: ::
$ pip install organize_media_files
Getting started
===============
After successfull installation, you can see OMF doing it's job by heading into \ *example/*\ , which is located in OMF top source tree directory. ::
$ cd example/
Here you can see `example.conf <https://github.com/IsaacMother/organize-media-files/blob/master/example/example.conf>`_ and some sample audio files, containing filled metatags. Type: ::
$ omf -d -c example.conf sample_mp3.mp3
Moving:
example/sample_mp3.mp3
To:
/tmp/omf_example/some_artist_mp3 - some_title_mp3
You can see OMF running in \ ``--dry-run``\ . It is designed to prevent unexpected behavior and to show user what is going to happen in specified configuration. Before rushing OMF usage, don't forget to set up proper configuration using .conf files.
Configuration files
===================
OMF providing sample \ **system.conf**\ and \ **user.conf**\ , both located at \ */etc/.omfrc/*\ directory. Configuration file's consist of two sections. \ *[patterns]*\ section is where user set's up dispatch path's - a \ *pattern*\ , which must be given in the form of absolute path's (\'~\' may be used to specify \ *home*\ directory) with inclusion of ``{metatags}``.
Example audio file pattern in UNIX system\: ::
uno = ~/Music/{artist}/{tracknumber}-{title}
Valid ``{metatags}`` for audio file are: \ ``{artist}``\ , \ ``{title}``\ , \ ``{album}``\ , \ ``{tracknumber}``\ , \ ``{genre}``\ , \ ``{date}``\ , \ ``{encoder}``\ . Due to the simplicity of utility, OMF won't lexically analyze pattern's (except for valid \ ``{metatags}``\ ), so it is up to user to specify correct pattern (use \ ``--dry-run``\ option to see what's OMF going to do).
Usage
=====
Basic OMF usage is: ::
$ omf filename_1 filename_2
In this case \ ``filename_1``\ and \ ``filename_2``\ will be dispatched according to the default pattern in \ **user.conf**\ .
Options:
* \ ``-h, --help``\ - show help message.
* \ ``-d, --dry-run``\ - run OMF without actually moving files and print verbose information.
* \ ``-c FILE, --config FILE``\ - specify an alternative configuration file.
* \ ``-f, --force``\ - ignore inconsistencies or/and overwrite files (for example, if a file, in a given list of filenames, with the same name already exists, overwrite it).
* \ ``-p PATTERN-NAME, --pattern PATTERN_NAME``\ - explicitly specify dispatch pattern.
TODO
====
1. Create documentation.
2. Figure out how to move user.conf to home directory (got bug on it).
3. Add bash-completion for patterns.
4. Append extensions to the end of dispathed file.
Some warnings for future
========================
1. OMF dispatching files using pathlib.Path(pattern-specified-path). Such behavior can lead to usage misunderstandings. | PypiClean |
/client_chat_pyqt_march_24-0.0.1-py3-none-any.whl/client/client/transport.py |
import socket
import time
import logging
import json
import threading
import hashlib
import hmac
import binascii
from PyQt5.QtCore import pyqtSignal, QObject
from common.variables import ACTION, PRESENCE, TIME, USER, \
ACCOUNT_NAME, PUBLIC_KEY, ERROR, RESPONSE, DATA, RESPONSE_511, MESSAGE, \
MESSAGE_TEXT, DESTINATION, SENDER, GET_CONTACTS, LIST_INFO, \
PUBLIC_KEY_REQUEST, ADD_CONTACT, USERS_REQUEST, REMOVE_CONTACT, EXIT
from common.utils import send_message, get_message
from common.errors import ServerError
# Логер.
LOG = logging.getLogger('app.client')
# Объект блокировки для работы с сокетом.
socket_lock = threading.Lock()
class ClientTransport(threading.Thread, QObject):
"""Класс реализующий транспортную подсистему клиентского модуля.
Отвечает за взаимодействие с сервером.
"""
# Сигналы новое сообщение и потеря соединения
new_message = pyqtSignal(dict)
message_205 = pyqtSignal()
connection_lost = pyqtSignal()
def __init__(self, port, ip_address, database, username, passwd, keys):
# Вызываем конструкторы предков
threading.Thread.__init__(self)
QObject.__init__(self)
# Класс База данных - работа с базой.
self.database = database
# Имя пользователя.
self.username = username
# Пароль.
self.password = passwd
# Сокет для работы с сервером.
self.transport = None
# Набор ключей для шифрования.
self.keys = keys
# Устанавливаем соединение:
self.connection_init(port, ip_address)
# Обновляем таблицы известных пользователей и контактов.
try:
self.user_list_update()
self.contacts_list_update()
except OSError as err:
if err.errno:
LOG.critical(f'Потеряно соединение с сервером.')
raise ServerError('Потеряно соединение с сервером!')
LOG.error(
'Timeout соединения при обновлении списков пользователей.')
except json.JSONDecodeError:
LOG.critical(f'Потеряно соединение с сервером.')
raise ServerError('Потеряно соединение с сервером!')
# Флаг продолжения работы транспорта.
self.running = True
def connection_init(self, port, ip):
"""Устанавливает соединение с сервером."""
# Инициализация сокета и сообщение серверу о нашем появлении
self.transport = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Таймаут необходим для освобождения сокета.
self.transport.settimeout(5)
# Соединяемся, 5 попыток соединения, флаг успеха ставим в True если
# удалось.
connected = False
for i in range(5):
LOG.info(f'Попытка подключения №{i + 1}')
try:
self.transport.connect((ip, port))
except (OSError, ConnectionRefusedError):
pass
else:
connected = True
LOG.debug("Connection established.")
break
time.sleep(1)
# Если соединится не удалось - исключение
if not connected:
LOG.critical('Не удалось установить соединение с сервером')
raise ServerError('Не удалось установить соединение с сервером')
LOG.debug('Starting auth dialog.')
# Запускаем процедуру авторизации.
# Получаем хэш пароля
passwd_bytes = self.password.encode('utf-8')
salt = self.username.lower().encode('utf-8')
passwd_hash = hashlib.pbkdf2_hmac('sha512', passwd_bytes, salt, 10000)
passwd_hash_string = binascii.hexlify(passwd_hash)
LOG.debug(f'Passwd hash ready: {passwd_hash_string}')
# Получаем публичный ключ и декодируем его из байтов
pubkey = self.keys.publickey().export_key().decode('ascii')
# Авторизуемся на сервере.
with socket_lock:
presense = {
ACTION: PRESENCE,
TIME: time.time(),
USER: {
ACCOUNT_NAME: self.username,
PUBLIC_KEY: pubkey
}
}
LOG.debug(f"Presence message = {presense}")
# Отправляем серверу приветственное сообщение.
try:
send_message(self.transport, presense)
ans = get_message(self.transport)
LOG.debug(f'Server response = {ans}.')
# Если сервер вернул ошибку, бросаем исключение.
if RESPONSE in ans:
if ans[RESPONSE] == 400:
raise ServerError(ans[ERROR])
elif ans[RESPONSE] == 511:
# Если всё нормально, то продолжаем процедуру
# авторизации.
ans_data = ans[DATA]
hash = hmac.new(
passwd_hash_string, ans_data.encode('utf-8'),
'MD5')
digest = hash.digest()
my_ans = RESPONSE_511
my_ans[DATA] = binascii.b2a_base64(
digest).decode('ascii')
send_message(self.transport, my_ans)
self.process_server_ans(get_message(self.transport))
except (OSError, json.JSONDecodeError) as err:
LOG.debug(f'Connection error.', exc_info=err)
raise ServerError('Сбой соединения в процессе авторизации.')
def process_server_ans(self, message):
"""Обрабатывает поступающие сообщения с сервера."""
LOG.debug(f'Разбор сообщения от сервера: {message}')
# Если это подтверждение чего-либо
if RESPONSE in message:
if message[RESPONSE] == 200:
return
elif message[RESPONSE] == 400:
raise ServerError(f'{message[ERROR]}')
elif message[RESPONSE] == 205:
self.user_list_update()
self.contacts_list_update()
self.message_205.emit()
else:
LOG.error(
f'Принят неизвестный код подтверждения '
f'{message[RESPONSE]}')
# Если это сообщение от пользователя добавляем в базу, даём сигнал о
# новом сообщении
elif ACTION in message and message[ACTION] == MESSAGE and SENDER in \
message and DESTINATION in message and MESSAGE_TEXT in \
message and message[DESTINATION] == self.username:
LOG.debug(
f'Получено сообщение от пользователя {message[SENDER]}:'
f'{message[MESSAGE_TEXT]}')
self.new_message.emit(message)
def contacts_list_update(self):
"""Обновляет с сервера список контактов."""
self.database.contacts_clear()
LOG.debug(f'Запрос контакт листа для пользователя {self.name}')
req = {
ACTION: GET_CONTACTS,
TIME: time.time(),
USER: self.username
}
LOG.debug(f'Сформирован запрос {req}')
with socket_lock:
send_message(self.transport, req)
ans = get_message(self.transport)
LOG.debug(f'Получен ответ {ans}')
if RESPONSE in ans and ans[RESPONSE] == 202:
for contact in ans[LIST_INFO]:
self.database.add_contact(contact)
else:
LOG.error('Не удалось обновить список контактов.')
def user_list_update(self):
"""Обновляет с сервера список пользователей."""
LOG.debug(f'Запрос списка известных пользователей {self.username}')
req = {
ACTION: USERS_REQUEST,
TIME: time.time(),
ACCOUNT_NAME: self.username
}
with socket_lock:
send_message(self.transport, req)
ans = get_message(self.transport)
if RESPONSE in ans and ans[RESPONSE] == 202:
self.database.add_users(ans[LIST_INFO])
else:
LOG.error('Не удалось обновить список известных пользователей.')
def key_request(self, user):
"""Запрашивает с сервера публичный ключ пользователя."""
LOG.debug(f'Запрос публичного ключа для {user}')
req = {
ACTION: PUBLIC_KEY_REQUEST,
TIME: time.time(),
ACCOUNT_NAME: user
}
with socket_lock:
send_message(self.transport, req)
ans = get_message(self.transport)
if RESPONSE in ans and ans[RESPONSE] == 511:
return ans[DATA]
else:
LOG.error(f'Не удалось получить ключ собеседника{user}.')
def add_contact(self, contact):
"""Отправляет на сервер сведения о добавлении контакта."""
LOG.debug(f'Создание контакта {contact}')
req = {
ACTION: ADD_CONTACT,
TIME: time.time(),
USER: self.username,
ACCOUNT_NAME: contact
}
with socket_lock:
send_message(self.transport, req)
self.process_server_ans(get_message(self.transport))
def remove_contact(self, contact):
"""Отправляет на сервер сведения об удалении контакта."""
LOG.debug(f'Удаление контакта {contact}')
req = {
ACTION: REMOVE_CONTACT,
TIME: time.time(),
USER: self.username,
ACCOUNT_NAME: contact
}
with socket_lock:
send_message(self.transport, req)
self.process_server_ans(get_message(self.transport))
def transport_shutdown(self):
"""Уведомляет сервер о завершении работы клиента."""
self.running = False
message = {
ACTION: EXIT,
TIME: time.time(),
ACCOUNT_NAME: self.username
}
with socket_lock:
try:
send_message(self.transport, message)
except OSError:
pass
LOG.debug('Транспорт завершает работу.')
time.sleep(0.5)
def send_message(self, to, message):
"""Отправляет на сервер сообщения для пользователя."""
message_dict = {
ACTION: MESSAGE,
SENDER: self.username,
DESTINATION: to,
TIME: time.time(),
MESSAGE_TEXT: message
}
LOG.debug(f'Сформирован словарь сообщения: {message_dict}')
# Необходимо дождаться освобождения сокета для отправки сообщения
with socket_lock:
send_message(self.transport, message_dict)
self.process_server_ans(get_message(self.transport))
LOG.info(f'Отправлено сообщение для пользователя {to}')
def run(self):
"""Метод содержащий основной цикл работы транспортного потока."""
LOG.debug('Запущен процесс - приёмник сообщений с сервера.')
while self.running:
# Отдыхаем секунду и снова пробуем захватить сокет.
# Если не сделать тут задержку, то отправка может достаточно долго
# ждать освобождения сокета.
time.sleep(1)
message = None
with socket_lock:
try:
self.transport.settimeout(0.5)
message = get_message(self.transport)
except OSError as err:
if err.errno:
LOG.critical(f'Потеряно соединение с сервером.')
self.running = False
self.connection_lost.emit()
# Проблемы с соединением
except (ConnectionError, ConnectionAbortedError,
ConnectionResetError, json.JSONDecodeError, TypeError):
LOG.debug(f'Потеряно соединение с сервером.')
self.running = False
self.connection_lost.emit()
finally:
self.transport.settimeout(5)
# Если сообщение получено, то вызываем функцию обработчик:
if message:
LOG.debug(f'Принято сообщение с сервера: {message}')
self.process_server_ans(message) | PypiClean |
/drypatrick-2021.7.5.tar.gz/drypatrick-2021.7.5/homeassistant/components/firmata/__init__.py | import asyncio
from copy import copy
import logging
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import (
CONF_BINARY_SENSORS,
CONF_LIGHTS,
CONF_MAXIMUM,
CONF_MINIMUM,
CONF_NAME,
CONF_PIN,
CONF_SENSORS,
CONF_SWITCHES,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers import config_validation as cv, device_registry as dr
from .board import FirmataBoard
from .const import (
CONF_ARDUINO_INSTANCE_ID,
CONF_ARDUINO_WAIT,
CONF_DIFFERENTIAL,
CONF_INITIAL_STATE,
CONF_NEGATE_STATE,
CONF_PIN_MODE,
CONF_PLATFORM_MAP,
CONF_SAMPLING_INTERVAL,
CONF_SERIAL_BAUD_RATE,
CONF_SERIAL_PORT,
CONF_SLEEP_TUNE,
DOMAIN,
FIRMATA_MANUFACTURER,
PIN_MODE_ANALOG,
PIN_MODE_INPUT,
PIN_MODE_OUTPUT,
PIN_MODE_PULLUP,
PIN_MODE_PWM,
)
_LOGGER = logging.getLogger(__name__)
DATA_CONFIGS = "board_configs"
ANALOG_PIN_SCHEMA = vol.All(cv.string, vol.Match(r"^A[0-9]+$"))
SWITCH_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
# Both digital and analog pins may be used as digital output
vol.Required(CONF_PIN): vol.Any(cv.positive_int, ANALOG_PIN_SCHEMA),
vol.Required(CONF_PIN_MODE): PIN_MODE_OUTPUT,
vol.Optional(CONF_INITIAL_STATE, default=False): cv.boolean,
vol.Optional(CONF_NEGATE_STATE, default=False): cv.boolean,
},
required=True,
)
LIGHT_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
# Both digital and analog pins may be used as PWM/analog output
vol.Required(CONF_PIN): vol.Any(cv.positive_int, ANALOG_PIN_SCHEMA),
vol.Required(CONF_PIN_MODE): PIN_MODE_PWM,
vol.Optional(CONF_INITIAL_STATE, default=0): cv.positive_int,
vol.Optional(CONF_MINIMUM, default=0): cv.positive_int,
vol.Optional(CONF_MAXIMUM, default=255): cv.positive_int,
},
required=True,
)
BINARY_SENSOR_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
# Both digital and analog pins may be used as digital input
vol.Required(CONF_PIN): vol.Any(cv.positive_int, ANALOG_PIN_SCHEMA),
vol.Required(CONF_PIN_MODE): vol.Any(PIN_MODE_INPUT, PIN_MODE_PULLUP),
vol.Optional(CONF_NEGATE_STATE, default=False): cv.boolean,
},
required=True,
)
SENSOR_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
# Currently only analog input sensor is implemented
vol.Required(CONF_PIN): ANALOG_PIN_SCHEMA,
vol.Required(CONF_PIN_MODE): PIN_MODE_ANALOG,
# Default differential is 40 to avoid a flood of messages on initial setup
# in case pin is unplugged. Firmata responds really really fast
vol.Optional(CONF_DIFFERENTIAL, default=40): vol.All(
cv.positive_int, vol.Range(min=1)
),
},
required=True,
)
BOARD_CONFIG_SCHEMA = vol.Schema(
{
vol.Required(CONF_SERIAL_PORT): cv.string,
vol.Optional(CONF_SERIAL_BAUD_RATE): cv.positive_int,
vol.Optional(CONF_ARDUINO_INSTANCE_ID): cv.positive_int,
vol.Optional(CONF_ARDUINO_WAIT): cv.positive_int,
vol.Optional(CONF_SLEEP_TUNE): vol.All(
vol.Coerce(float), vol.Range(min=0.0001)
),
vol.Optional(CONF_SAMPLING_INTERVAL): cv.positive_int,
vol.Optional(CONF_SWITCHES): [SWITCH_SCHEMA],
vol.Optional(CONF_LIGHTS): [LIGHT_SCHEMA],
vol.Optional(CONF_BINARY_SENSORS): [BINARY_SENSOR_SCHEMA],
vol.Optional(CONF_SENSORS): [SENSOR_SCHEMA],
},
required=True,
)
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: vol.All(cv.ensure_list, [BOARD_CONFIG_SCHEMA])}, extra=vol.ALLOW_EXTRA
)
async def async_setup(hass: HomeAssistant, config: dict) -> bool:
"""Set up the Firmata domain."""
# Delete specific entries that no longer exist in the config
if hass.config_entries.async_entries(DOMAIN):
for entry in hass.config_entries.async_entries(DOMAIN):
remove = True
for board in config[DOMAIN]:
if entry.data[CONF_SERIAL_PORT] == board[CONF_SERIAL_PORT]:
remove = False
break
if remove:
await hass.config_entries.async_remove(entry.entry_id)
# Setup new entries and update old entries
for board in config[DOMAIN]:
firmata_config = copy(board)
existing_entry = False
for entry in hass.config_entries.async_entries(DOMAIN):
if board[CONF_SERIAL_PORT] == entry.data[CONF_SERIAL_PORT]:
existing_entry = True
firmata_config[CONF_NAME] = entry.data[CONF_NAME]
hass.config_entries.async_update_entry(entry, data=firmata_config)
break
if not existing_entry:
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data=firmata_config,
)
)
return True
async def async_setup_entry(
hass: HomeAssistant, config_entry: config_entries.ConfigEntry
) -> bool:
"""Set up a Firmata board for a config entry."""
if DOMAIN not in hass.data:
hass.data[DOMAIN] = {}
_LOGGER.debug(
"Setting up Firmata id %s, name %s, config %s",
config_entry.entry_id,
config_entry.data[CONF_NAME],
config_entry.data,
)
board = FirmataBoard(config_entry.data)
if not await board.async_setup():
return False
hass.data[DOMAIN][config_entry.entry_id] = board
async def handle_shutdown(event) -> None:
"""Handle shutdown of board when Home Assistant shuts down."""
# Ensure board was not already removed previously before shutdown
if config_entry.entry_id in hass.data[DOMAIN]:
await board.async_reset()
config_entry.async_on_unload(
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, handle_shutdown)
)
device_registry = await dr.async_get_registry(hass)
device_registry.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={},
identifiers={(DOMAIN, board.name)},
manufacturer=FIRMATA_MANUFACTURER,
name=board.name,
sw_version=board.firmware_version,
)
for (conf, platform) in CONF_PLATFORM_MAP.items():
if conf in config_entry.data:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, platform)
)
return True
async def async_unload_entry(
hass: HomeAssistant, config_entry: config_entries.ConfigEntry
) -> None:
"""Shutdown and close a Firmata board for a config entry."""
_LOGGER.debug("Closing Firmata board %s", config_entry.data[CONF_NAME])
unload_entries = []
for (conf, platform) in CONF_PLATFORM_MAP.items():
if conf in config_entry.data:
unload_entries.append(
hass.config_entries.async_forward_entry_unload(config_entry, platform)
)
results = []
if unload_entries:
results = await asyncio.gather(*unload_entries)
results.append(await hass.data[DOMAIN].pop(config_entry.entry_id).async_reset())
return False not in results | PypiClean |
/fabric_fim-1.5.5b0-py3-none-any.whl/fim/user/component.py |
from typing import Any, Dict, List, Tuple
import recordclass
import uuid
from fim.view_only_dict import ViewOnlyDict
from ..graph.abc_property_graph import ABCPropertyGraph
from .model_element import ModelElement, ElementType, TopologyException
from .network_service import NetworkService, ServiceType
from .interface import Interface
from ..slivers.capacities_labels import Labels
from ..slivers.attached_components import ComponentSliver, ComponentType
from ..slivers.network_service import NSLayer
from ..slivers.component_catalog import ComponentCatalog, ComponentModelType
class Component(ModelElement):
"""
A component, like a GPU, a NIC, an FPGA or an NVMe drive of a node in a model.
In addition to public methods the following calls
return various dictionaries or lists:
component.interfaces - a dictionary of interfaces
component.interface_list - a list of interfaces
component.network_services - a dictionary of network services
"""
def __init__(self, *, name: str, node_id: str = None, topo: Any,
etype: ElementType = ElementType.EXISTING, model: str = None,
ctype: ComponentType = None, comp_model: ComponentModelType = None,
parent_node_id: str = None,
network_service_node_id: str = None, interface_node_ids: List[str] = None,
interface_labels: List[Labels] = None,
check_existing: bool = False,
**kwargs):
"""
Don't call this yourself, use Node.add_component(). Instantiates components based on
catalog resource file.
:param name:
:param node_id:
:param topo:
:param etype: is this supposed to be new or existing
:param model: must be specified if a new component
:param ctype: component type
:param comp_model: Component and Model combined
:param parent_node_id: node_id of the parent Node (for new components)
:param network_service_node_id: node id of network_service if one needs to be added (for substrate models only)
:param interface_node_ids: a list of node ids for expected interfaces (for substrate models only)
:param interface_labels: a list of Labels structure to associate with each interface
:param check_existing: check if a Component exists in the graph
"""
assert name is not None
assert topo is not None
if etype == ElementType.NEW:
# cant use isinstance as it would create circular import dependencies
if str(topo.__class__) == "<class 'fim.user.topology.SubstrateTopology'>" and node_id is None:
raise TopologyException("When adding components to substrate topology nodes you must specify static Node ID")
if node_id is None:
node_id = str(uuid.uuid4())
if parent_node_id is None:
raise TopologyException("Parent node id must be specified for new components")
if (model is None or ctype is None) and comp_model is None:
raise TopologyException("Model and component type or comp_model must be specified for new components")
if str(topo.__class__) == "<class 'fim.user.topology.SubstrateTopology'>" and \
(ctype == ComponentType.SharedNIC or ctype == ComponentType.SmartNIC) and \
(network_service_node_id is None or interface_node_ids is None or interface_labels is None):
raise TopologyException('For substrate topologies and components with network interfaces '
'static network service node id, interface node ids and interface labels'
'must be specified')
super().__init__(name=name, node_id=node_id, topo=topo)
cata = ComponentCatalog()
# get the name of the parent node to pass to component generation
_, parent_props = self.topo.graph_model.get_node_properties(node_id=parent_node_id)
parent_name = parent_props.get(ABCPropertyGraph.PROP_NAME, None)
comp_sliver = cata.generate_component(name=name, model=model, ctype=ctype,
model_type=comp_model,
ns_node_id=network_service_node_id,
interface_node_ids=interface_node_ids,
interface_labels=interface_labels,
parent_name=parent_name)
comp_sliver.node_id = node_id
comp_sliver.set_properties(**kwargs)
self.topo.graph_model.add_component_sliver(parent_node_id=parent_node_id, component=comp_sliver)
else:
assert node_id is not None
super().__init__(name=name, node_id=node_id, topo=topo)
if check_existing and not self.topo.graph_model.check_node_name(node_id=node_id, name=name,
label=ABCPropertyGraph.CLASS_Component):
raise TopologyException(f"Component with this id and name {name} doesn't exist")
@property
def type(self):
return self.get_property('type') if self.__dict__.get('topo', None) is not None else None
@property
def model(self):
return self.get_property('model') if self.__dict__.get('topo', None) is not None else None
def get_property(self, pname: str) -> Any:
"""
Retrieve a component property
:param pname:
:return:
"""
_, node_properties = self.topo.graph_model.get_node_properties(node_id=self.node_id)
comp_sliver = self.topo.graph_model.component_sliver_from_graph_properties_dict(node_properties)
return comp_sliver.get_property(pname)
def set_property(self, pname: str, pval: Any):
"""
Set a component property or unset of pval is None
:param pname:
:param pval:
:return:
"""
if pval is None:
self.unset_property(pname)
return
comp_sliver = ComponentSliver()
comp_sliver.set_property(prop_name=pname, prop_val=pval)
# write into the graph
prop_dict = self.topo.graph_model.component_sliver_to_graph_properties_dict(comp_sliver)
self.topo.graph_model.update_node_properties(node_id=self.node_id, props=prop_dict)
def set_properties(self, **kwargs):
"""
Set multiple properties of the component
:param kwargs:
:return:
"""
comp_sliver = ComponentSliver()
comp_sliver.set_properties(**kwargs)
# write into the graph
prop_dict = self.topo.graph_model.component_sliver_to_graph_properties_dict(comp_sliver)
self.topo.graph_model.update_node_properties(node_id=self.node_id, props=prop_dict)
@staticmethod
def list_properties() -> Tuple[str]:
return ComponentSliver.list_properties()
def __get_interface_by_id(self, node_id: str) -> Interface:
"""
Get an interface of a node by its node_id, return Interface object
:param node_id:
:return:
"""
assert node_id is not None
_, node_props = self.topo.graph_model.get_node_properties(node_id=node_id)
assert node_props.get(ABCPropertyGraph.PROP_NAME, None) is not None
return Interface(name=node_props[ABCPropertyGraph.PROP_NAME], node_id=node_id,
topo=self.topo)
def __get_ns_by_id(self, node_id: str) -> NetworkService:
"""
Get an network service of a node by its node_id, return NetworkService object
:param node_id:
:return:
"""
assert node_id is not None
_, node_props = self.topo.graph_model.get_node_properties(node_id=node_id)
assert node_props.get(ABCPropertyGraph.PROP_NAME, None) is not None
return NetworkService(name=node_props[ABCPropertyGraph.PROP_NAME], node_id=node_id,
topo=self.topo)
def __list_interfaces(self) -> ViewOnlyDict:
"""
List all interfaces of the component as a dictionary
:return:
"""
node_id_list = self.topo.graph_model.get_all_node_or_component_connection_points(parent_node_id=self.node_id)
# Could consider using frozendict here
ret = dict()
for nid in node_id_list:
i = self.__get_interface_by_id(nid)
ret[i.name] = i
return ViewOnlyDict(ret)
def __list_of_interfaces(self) -> Tuple[Interface]:
"""
List all interfaces of the component
:return:
"""
node_id_list = self.topo.graph_model.get_all_node_or_component_connection_points(parent_node_id=self.node_id)
ret = list()
for nid in node_id_list:
i = self.__get_interface_by_id(nid)
ret.append(i)
return tuple(ret)
def __list_network_services(self) -> ViewOnlyDict:
"""
List all network service children of a node as a dictionary organized
by network service name. Modifying the dictionary will not affect
the underlying model, but modifying Components in the dictionary will.
:return:
"""
node_id_list = self.topo.graph_model.get_all_network_node_or_component_nss(parent_node_id=self.node_id)
# Could consider using frozendict or other immutable idioms
ret = dict()
for nid in node_id_list:
c = self.__get_ns_by_id(nid)
ret[c.name] = c
return ViewOnlyDict(ret)
@property
def interfaces(self):
return self.__list_interfaces()
@property
def interface_list(self):
return self.__list_of_interfaces()
@property
def network_services(self):
return self.__list_network_services()
def get_sliver(self) -> ComponentSliver:
"""
Get a deep sliver representation of this component from graph
:return:
"""
return self.topo.graph_model.build_deep_component_sliver(node_id=self.node_id)
def __repr__(self):
_, node_properties = self.topo.graph_model.get_node_properties(node_id=self.node_id)
comp_sliver = self.topo.graph_model.component_sliver_from_graph_properties_dict(node_properties)
return comp_sliver.__repr__()
def __str__(self):
return self.__repr__() | PypiClean |
/pyTenable-1.4.13.tar.gz/pyTenable-1.4.13/tenable/io/exclusions.py | from datetime import datetime
from restfly.utils import dict_merge
from tenable.io.base import TIOEndpoint
class ExclusionsAPI(TIOEndpoint):
'''
This will contain all methods related to exclusions
'''
def create(self, name, members, start_time=None, end_time=None,
timezone=None, description=None, frequency=None,
interval=None, weekdays=None, day_of_month=None,
enabled=True, network_id=None):
'''
Create a scan target exclusion.
:devportal:`exclusions: create <exclusions-create>`
Args:
name (str): The name of the exclusion to create.
members (list):
The exclusions members. Each member should be a string with
either a FQDN, IP Address, IP Range, or CIDR.
description (str, optional):
Some further detail about the exclusion.
start_time (datetime): When the exclusion should start.
end_time (datetime): When the exclusion should end.
timezone (str, optional):
The timezone to use for the exclusion. The default if none is
specified is to use UTC. For the list of usable timezones,
please refer to :devportal:`scans-timezones`
frequency (str, optional):
The frequency of the rule. The string inputted will be up-cased.
Valid values are: ``ONETIME``, ``DAILY``, ``WEEKLY``,
``MONTHLY``, ``YEARLY``.
Default value is ``ONETIME``.
interval (int, optional):
The interval of the rule. The default interval is 1
weekdays (list, optional):
List of 2-character representations of the days of the week to
repeat the frequency rule on. Valid values are:
*SU, MO, TU, WE, TH, FR, SA*
Default values: ``['SU', 'MO', 'TU', 'WE', 'TH', 'FR', 'SA']``
day_of_month (int, optional):
The day of the month to repeat a **MONTHLY** frequency rule on.
The default is today.
enabled (bool, optional):
If enabled is true, the exclusion schedule is active.
If enabled is false, the exclusion is "Always Active"
The default value is ``True``
network_id (uuid, optional):
The ID of the network object associated with scanners
where Tenable.io applies the exclusion.
Returns:
:obj:`dict`:
Dictionary of the newly minted exclusion.
Examples:
Creating a one-time exclusion:
>>> from datetime import datetime, timedelta
>>> exclusion = tio.exclusions.create(
... 'Example One-Time Exclusion',
... ['127.0.0.1'],
... start_time=datetime.utcnow(),
... end_time=datetime.utcnow() + timedelta(hours=1))
Creating a daily exclusion:
>>> exclusion = tio.exclusions.create(
... 'Example Daily Exclusion',
... ['127.0.0.1'],
... frequency='daily',
... start_time=datetime.utcnow(),
... end_time=datetime.utcnow() + timedelta(hours=1))
Creating a weekly exclusion:
>>> exclusion = tio.exclusions.create(
... 'Example Weekly Exclusion',
... ['127.0.0.1'],
... frequency='weekly',
... weekdays=['mo', 'we', 'fr'],
... start_time=datetime.utcnow(),
... end_time=datetime.utcnow() + timedelta(hours=1))
Creating a monthly esxclusion:
>>> exclusion = tio.exclusions.create(
... 'Example Monthly Exclusion',
... ['127.0.0.1'],
... frequency='monthly',
... day_of_month=1,
... start_time=datetime.utcnow(),
... end_time=datetime.utcnow() + timedelta(hours=1))
Creating a yearly exclusion:
>>> exclusion = tio.exclusions.create(
... 'Example Yearly Exclusion',
... ['127.0.0.1'],
... frequency='yearly',
... start_time=datetime.utcnow(),
... end_time=datetime.utcnow() + timedelta(hours=1))
'''
# Starting with the innermost part of the payload, lets construct the
# rrules dictionary.
frequency = self._check('frequency', frequency, str,
choices=['ONETIME', 'DAILY', 'WEEKLY', 'MONTHLY', 'YEARLY'],
default='ONETIME',
case='upper')
rrules = {
'freq': frequency,
'interval': self._check('interval', interval, int, default=1)
}
# if the frequency is a weekly one, then we will need to specify the
# days of the week that the exclusion is run on.
if frequency == 'WEEKLY':
rrules['byweekday'] = ','.join(self._check(
'weekdays', weekdays, list,
choices=['SU', 'MO', 'TU', 'WE', 'TH', 'FR', 'SA'],
default=['SU', 'MO', 'TU', 'WE', 'TH', 'FR', 'SA'],
case='upper'))
# In the same vein as the frequency check, we're accepting
# case-insensitive input, comparing it to our known list of
# acceptable responses, then joining them all together into a
# comma-separated string.
# if the frequency is monthly, then we will need to specify the day of
# the month that the rule will run on.
if frequency == 'MONTHLY':
rrules['bymonthday'] = self._check('day_of_month', day_of_month, int,
choices=list(range(1,32)),
default=datetime.today().day)
# construct payload schedule based on enable
if enabled is True:
schedule = {
'enabled': True,
'starttime':
self._check('start_time', start_time, datetime).strftime('%Y-%m-%d %H:%M:%S'),
'endtime':
self._check('end_time', end_time, datetime).strftime('%Y-%m-%d %H:%M:%S'),
'timezone': self._check('timezone', timezone, str,
choices=self._api._tz, default='Etc/UTC'),
'rrules': rrules
}
elif enabled is False:
schedule = {'enabled': False}
else:
raise TypeError('enabled must be a boolean value.')
# Next we need to construct the rest of the payload
payload = {
'name': self._check('name', name, str),
'members': ','.join(self._check('members', members, list)),
'description': self._check('description', description, str, default=''),
'network_id': self._check('network_id', network_id, 'uuid',
default='00000000-0000-0000-0000-000000000000'),
'schedule': schedule
}
# And now to make the call and return the data.
return self._api.post('exclusions', json=payload).json()
def delete(self, exclusion_id):
'''
Delete a scan target exclusion.
:devportal:`exclusions: delete <exclusions-delete>`
Args:
exclusion_id (int): The exclusion identifier to delete
Returns:
:obj:`None`:
The exclusion was successfully deleted.
Examples:
>>> tio.exclusions.delete(1)
'''
self._api.delete('exclusions/{}'.format(self._check('exclusion_id', exclusion_id, int)))
def details(self, exclusion_id):
'''
Retrieve the details for a specific scan target exclusion.
:devportal:`exclusions: details <exclusions-details>`
Args:
exclusion_id (int): The exclusion identifier.
Returns:
:obj:`dict`:
The exclusion record requested.
Examples:
>>> exclusion = tio.exclusions.details(1)
>>> pprint(exclusion)
'''
return self._api.get(
'exclusions/{}'.format(self._check('exclusion_id', exclusion_id, int))).json()
def edit(self, exclusion_id, name=None, members=None, start_time=None,
end_time=None, timezone=None, description=None, frequency=None,
interval=None, weekdays=None, day_of_month=None, enabled=None, network_id=None):
'''
Edit an existing scan target exclusion.
:devportal:`exclusions: edit <exclusions-edit>`
The edit function will first gather the details of the exclusion that
will be edited and will overlay the changes on top. The result will
then be pushed back to the API to modify the exclusion.
Args:
exclusion_id (int): The id of the exclusion object in Tenable.io
scanner_id (int, optional): The scanner id.
name (str, optional): The name of the exclusion to create.
description (str, optional):
Some further detail about the exclusion.
start_time (datetime, optional): When the exclusion should start.
end_time (datetime, optional): When the exclusion should end.
timezone (str, optional):
The timezone to use for the exclusion. The default if none is
specified is to use UTC.
frequency (str, optional):
The frequency of the rule. The string inputted will be upcased.
Valid values are: *ONETIME, DAILY, WEEKLY, MONTHLY, YEARLY*.
interval (int, optional): The interval of the rule.
weekdays (list, optional):
List of 2-character representations of the days of the week to
repeat the frequency rule on. Valid values are:
*SU, MO, TU, WE, TH, FR, SA*
Default values: ``['SU', 'MO', 'TU', 'WE', 'TH', 'FR', 'SA']``
day_of_month (int, optional):
The day of the month to repeat a **MONTHLY** frequency rule on.
enabled (bool, optional):
enable/disable exclusion.
network_id (uuid, optional):
The ID of the network object associated with scanners
where Tenable.io applies the exclusion.
Returns:
:obj:`dict`:
Dictionary of the newly minted exclusion.
Examples:
Modifying the name of an exclusion:
>>> exclusion = tio.exclusions.edit(1, name='New Name')
'''
# Lets start constructing the payload to be sent to the API...
payload = self.details(exclusion_id)
if name:
payload['name'] = self._check('name', name, str)
if members:
payload['members'] = ','.join(self._check('members', members, list))
if description:
payload['description'] = self._check('description', description, str)
if enabled is not None:
payload['schedule']['enabled'] = self._check('enabled', enabled, bool)
if payload['schedule']['enabled']:
frequency = self._check('frequency', frequency, str,
choices=['ONETIME', 'DAILY', 'WEEKLY', 'MONTHLY', 'YEARLY'],
default=payload['schedule']['rrules'].get('freq')
if payload['schedule']['rrules'] is not None else 'ONETIME',
case='upper')
# interval needs to be handled in schedule enabled excusion
rrules = {
'freq': frequency,
'interval': payload['schedule']['rrules'].get('interval', None) or 1
if payload['schedule']['rrules'] is not None else 1
}
# frequency default value is designed for weekly and monthly based on below conditions
# - if schedule rrules is None and not defined in edit params, assign default values
# - if schedule rrules is not None and not defined in edit params, assign old values
# - if schedule rrules is not None and not defined in edit params
# and byweekday/bymonthday key not already exist, assign default values
# - if schedule rrules is not None and defined in edit params, assign new values
if frequency == 'WEEKLY':
rrules['byweekday'] = ','.join(self._check(
'weekdays', weekdays, list,
choices=['SU', 'MO', 'TU', 'WE', 'TH', 'FR', 'SA'],
default=payload['schedule']['rrules'].get('byweekday', '').split()
or ['SU', 'MO', 'TU', 'WE', 'TH', 'FR', 'SA']
if payload['schedule']['rrules'] is not None else
['SU', 'MO', 'TU', 'WE', 'TH', 'FR', 'SA'],
case='upper'))
# In the same vein as the frequency check, we're accepting
# case-insensitive input, comparing it to our known list of
# acceptable responses, then joining them all together into a
# comma-separated string.
if frequency == 'MONTHLY':
rrules['bymonthday'] = self._check(
'day_of_month', day_of_month, int, choices=list(range(1, 32)),
default=payload['schedule']['rrules'].get('bymonthday', datetime.today().day)
if payload['schedule']['rrules'] is not None else datetime.today().day)
# update new rrules in existing payload
if payload['schedule']['rrules'] is not None:
dict_merge(payload['schedule']['rrules'], rrules)
else:
payload['schedule']['rrules'] = rrules
if start_time:
payload['schedule']['starttime'] = self._check(
'start_time', start_time, datetime).strftime('%Y-%m-%d %H:%M:%S')
if end_time:
payload['schedule']['endtime'] = self._check(
'end_time', end_time, datetime).strftime('%Y-%m-%d %H:%M:%S')
if interval:
payload['schedule']['rrules']['interval'] = self._check(
'interval', interval, int)
payload['schedule']['timezone'] = self._check(
'timezone', timezone, str, choices=self._api._tz, default='Etc/UTC')
if network_id:
payload['network_id'] = self._check('network_id', network_id, 'uuid')
# Lets check to make sure that the scanner_id and exclusion_id are
# integers as the API documentation requests and if we don't raise an
# error, then lets make the call.
return self._api.put(
'exclusions/{}'.format(
self._check('exclusion_id', exclusion_id, int)
), json=payload).json()
def list(self):
'''
List the currently configured scan target exclusions.
:devportal:`exclusions: list <exclusions-list>`
Returns:
:obj:`list`:
List of exclusion resource records.
Examples:
>>> for exclusion in tio.exclusions.list():
... pprint(exclusion)
'''
return self._api.get('exclusions').json()['exclusions']
def exclusions_import(self, fobj):
'''
Import exclusions into Tenable.io.
:devportal:`exclusions: import <exclusions-import>`
Args:
fobj (FileObject):
The file object of the exclusion(s) you wish to import.
Returns:
:obj:`None`:
Returned if Tenable.io successfully imports the exclusion file.
Examples:
>>> with open('import_example.csv') as exclusion:
... tio.exclusions.exclusions_import(exclusion)
'''
fid = self._api.files.upload(fobj)
return self._api.post('exclusions/import', json={'file': fid}) | PypiClean |
/beets-1.6.0.tar.gz/beets-1.6.0/docs/reference/pathformat.rst | Path Formats
============
The ``paths:`` section of the config file (see :doc:`config`) lets
you specify the directory and file naming scheme for your music library.
Templates substitute symbols like ``$title`` (any field value prefixed by ``$``)
with the appropriate value from the track's metadata. Beets adds the filename
extension automatically.
For example, consider this path format string:
``$albumartist/$album/$track $title``
Here are some paths this format will generate:
* ``Yeah Yeah Yeahs/It's Blitz!/01 Zero.mp3``
* ``Spank Rock/YoYoYoYoYo/11 Competition.mp3``
* ``The Magnetic Fields/Realism/01 You Must Be Out of Your Mind.mp3``
Because ``$`` is used to delineate a field reference, you can use ``$$`` to emit
a dollars sign. As with `Python template strings`_, ``${title}`` is equivalent
to ``$title``; you can use this if you need to separate a field name from the
text that follows it.
.. _Python template strings: https://docs.python.org/library/string.html#template-strings
A Note About Artists
--------------------
Note that in path formats, you almost certainly want to use ``$albumartist`` and
not ``$artist``. The latter refers to the "track artist" when it is present,
which means that albums that have tracks from different artists on them (like
`Stop Making Sense`_, for example) will be placed into different folders!
Continuing with the Stop Making Sense example, you'll end up with most of the
tracks in a "Talking Heads" directory and one in a "Tom Tom Club" directory. You
probably don't want that! So use ``$albumartist``.
.. _Stop Making Sense:
https://musicbrainz.org/release/798dcaab-0f1a-4f02-a9cb-61d5b0ddfd36.html
As a convenience, however, beets allows ``$albumartist`` to fall back to the value for ``$artist`` and vice-versa if one tag is present but the other is not.
.. _template-functions:
Template Functions
------------------
Beets path formats also support *function calls*, which can be used to transform
text and perform logical manipulations. The syntax for function calls is like
this: ``%func{arg,arg}``. For example, the ``upper`` function makes its argument
upper-case, so ``%upper{beets rocks}`` will be replaced with ``BEETS ROCKS``.
You can, of course, nest function calls and place variable references in
function arguments, so ``%upper{$artist}`` becomes the upper-case version of the
track's artists.
These functions are built in to beets:
* ``%lower{text}``: Convert ``text`` to lowercase.
* ``%upper{text}``: Convert ``text`` to UPPERCASE.
* ``%title{text}``: Convert ``text`` to Title Case.
* ``%left{text,n}``: Return the first ``n`` characters of ``text``.
* ``%right{text,n}``: Return the last ``n`` characters of ``text``.
* ``%if{condition,text}`` or ``%if{condition,truetext,falsetext}``: If
``condition`` is nonempty (or nonzero, if it's a number), then returns
the second argument. Otherwise, returns the third argument if specified (or
nothing if ``falsetext`` is left off).
* ``%asciify{text}``: Convert non-ASCII characters to their ASCII equivalents.
For example, "café" becomes "cafe". Uses the mapping provided by the
`unidecode module`_. See the :ref:`asciify-paths` configuration
option.
* ``%aunique{identifiers,disambiguators,brackets}``: Provides a unique string
to disambiguate similar albums in the database. See :ref:`aunique`, below.
* ``%time{date_time,format}``: Return the date and time in any format accepted
by `strftime`_. For example, to get the year some music was added to your
library, use ``%time{$added,%Y}``.
* ``%first{text}``: Returns the first item, separated by ``;`` (a semicolon
followed by a space).
You can use ``%first{text,count,skip}``, where ``count`` is the number of
items (default 1) and ``skip`` is number to skip (default 0). You can also use
``%first{text,count,skip,sep,join}`` where ``sep`` is the separator, like
``;`` or ``/`` and join is the text to concatenate the items.
* ``%ifdef{field}``, ``%ifdef{field,truetext}`` or
``%ifdef{field,truetext,falsetext}``: Checks if an flexible attribute
``field`` is defined. If it exists, then return ``truetext`` or ``field``
(default). Otherwise, returns ``falsetext``. The ``field`` should be entered
without ``$``. Note that this doesn't work with built-in :ref:`itemfields`, as
they are always defined.
.. _unidecode module: https://pypi.org/project/Unidecode
.. _strftime: https://docs.python.org/3/library/time.html#time.strftime
Plugins can extend beets with more template functions (see
:ref:`templ_plugins`).
.. _aunique:
Album Disambiguation
--------------------
Occasionally, bands release two albums with the same name (c.f. Crystal Castles,
Weezer, and any situation where a single has the same name as an album or EP).
Beets ships with special support, in the form of the ``%aunique{}`` template
function, to avoid placing two identically-named albums in the same directory on
disk.
The ``aunique`` function detects situations where two albums have some identical
fields and emits text from additional fields to disambiguate the albums. For
example, if you have both Crystal Castles albums in your library, ``%aunique{}``
will expand to "[2008]" for one album and "[2010]" for the other. The
function detects that you have two albums with the same artist and title but
that they have different release years.
For full flexibility, the ``%aunique`` function takes three arguments. The
first two are whitespace-separated lists of album field names: a set of
*identifiers* and a set of *disambiguators*. The third argument is a pair of
characters used to surround the disambiguator.
Any group of albums with identical values for all the identifiers will be
considered "duplicates". Then, the function tries each disambiguator field,
looking for one that distinguishes each of the duplicate albums from each
other. The first such field is used as the result for ``%aunique``. If no field
suffices, an arbitrary number is used to distinguish the two albums.
The default identifiers are ``albumartist album`` and the default
disambiguators are ``albumtype year label catalognum albumdisambig
releasegroupdisambig``. So you can get reasonable disambiguation
behavior if you just use ``%aunique{}`` with no parameters in your
path forms (as in the default path formats), but you can customize the
disambiguation if, for example, you include the year by default in
path formats.
The default characters used as brackets are ``[]``. To change this, provide a
third argument to the ``%aunique`` function consisting of two characters: the left
and right brackets. Or, to turn off bracketing entirely, leave argument blank.
One caveat: When you import an album that is named identically to one already in
your library, the *first* album—the one already in your library— will not
consider itself a duplicate at import time. This means that ``%aunique{}`` will
expand to nothing for this album and no disambiguation string will be used at
its import time. Only the second album will receive a disambiguation string. If
you want to add the disambiguation string to both albums, just run ``beet move``
(possibly restricted by a query) to update the paths for the albums.
Syntax Details
--------------
The characters ``$``, ``%``, ``{``, ``}``, and ``,`` are "special" in the path
template syntax. This means that, for example, if you want a ``%`` character to
appear in your paths, you'll need to be careful that you don't accidentally
write a function call. To escape any of these characters (except ``{``, and
``,`` outside a function argument), prefix it with a ``$``. For example,
``$$`` becomes ``$``; ``$%`` becomes ``%``, etc. The only exceptions are:
* ``${``, which is ambiguous with the variable reference syntax (like
``${title}``). To insert a ``{`` alone, it's always sufficient to just type
``{``.
* commas are used as argument separators in function calls. Inside of a
function's argument, use ``$,`` to get a literal ``,`` character. Outside of
any function argument, escaping is not necessary: ``,`` by itself will
produce ``,`` in the output.
If a value or function is undefined, the syntax is simply left unreplaced. For
example, if you write ``$foo`` in a path template, this will yield ``$foo`` in
the resulting paths because "foo" is not a valid field name. The same is true of
syntax errors like unclosed ``{}`` pairs; if you ever see template syntax
constructs leaking into your paths, check your template for errors.
If an error occurs in the Python code that implements a function, the function
call will be expanded to a string that describes the exception so you can debug
your template. For example, the second parameter to ``%left`` must be an
integer; if you write ``%left{foo,bar}``, this will be expanded to something
like ``<ValueError: invalid literal for int()>``.
.. _itemfields:
Available Values
----------------
Here's a list of the different values available to path formats. The current
list can be found definitively by running the command ``beet fields``. Note that
plugins can add new (or replace existing) template values (see
:ref:`templ_plugins`).
Ordinary metadata:
* title
* artist
* artist_sort: The "sort name" of the track artist (e.g., "Beatles, The" or
"White, Jack").
* artist_credit: The track-specific `artist credit`_ name, which may be a
variation of the artist's "canonical" name.
* album
* albumartist: The artist for the entire album, which may be different from the
artists for the individual tracks.
* albumartist_sort
* albumartist_credit
* genre
* composer
* grouping
* year, month, day: The release date of the specific release.
* original_year, original_month, original_day: The release date of the original
version of the album.
* track
* tracktotal
* disc
* disctotal
* lyrics
* comments
* bpm
* comp: Compilation flag.
* albumtype: The MusicBrainz album type; the MusicBrainz wiki has a `list of
type names`_.
* label
* asin
* catalognum
* script
* language
* country
* albumstatus
* media
* albumdisambig
* disctitle
* encoder
.. _artist credit: https://wiki.musicbrainz.org/Artist_Credit
.. _list of type names: https://musicbrainz.org/doc/Release_Group/Type
Audio information:
* length (in seconds)
* bitrate (in kilobits per second, with units: e.g., "192kbps")
* format (e.g., "MP3" or "FLAC")
* channels
* bitdepth (only available for some formats)
* samplerate (in kilohertz, with units: e.g., "48kHz")
MusicBrainz and fingerprint information:
* mb_trackid
* mb_releasetrackid
* mb_albumid
* mb_artistid
* mb_albumartistid
* mb_releasegroupid
* acoustid_fingerprint
* acoustid_id
Library metadata:
* mtime: The modification time of the audio file.
* added: The date and time that the music was added to your library.
* path: The item's filename.
.. _templ_plugins:
Template functions and values provided by plugins
-------------------------------------------------
Beets plugins can provide additional fields and functions to templates. See
the :doc:`/plugins/index` page for a full list of plugins. Some plugin-provided
constructs include:
* ``$missing`` by :doc:`/plugins/missing`: The number of missing tracks per
album.
* ``%bucket{text}`` by :doc:`/plugins/bucket`: Substitute a string by the
range it belongs to.
* ``%the{text}`` by :doc:`/plugins/the`: Moves English articles to ends of
strings.
The :doc:`/plugins/inline` lets you define template fields in your beets
configuration file using Python snippets. And for more advanced processing,
you can go all-in and write a dedicated plugin to register your own fields and
functions (see :ref:`writing-plugins`).
| PypiClean |
/gpt_index-0.8.17-py3-none-any.whl/llama_index/retrievers/recursive_retriever.py | from typing import Dict, List, Optional, Tuple, Union
from llama_index.callbacks.base import CallbackManager
from llama_index.callbacks.schema import CBEventType, EventPayload
from llama_index.indices.query.base import BaseQueryEngine
from llama_index.indices.query.schema import QueryBundle
from llama_index.schema import TextNode, IndexNode, NodeWithScore, BaseNode
from llama_index.bridge.langchain import print_text
from llama_index.indices.base_retriever import BaseRetriever
DEFAULT_QUERY_RESPONSE_TMPL = "Query: {query_str}\nResponse: {response}"
RQN_TYPE = Union[BaseRetriever, BaseQueryEngine, BaseNode]
class RecursiveRetriever(BaseRetriever):
"""Recursive retriever.
This retriever will recursively explore links from nodes to other
retrievers/query engines.
For any retrieved nodes, if any of the nodes are IndexNodes,
then it will explore the linked retriever/query engine, and query that.
Args:
root_id (str): The root id of the query graph.
retriever_dict (Optional[Dict[str, BaseRetriever]]): A dictionary
of id to retrievers.
query_engine_dict (Optional[Dict[str, BaseQueryEngine]]): A dictionary of
id to query engines.
"""
def __init__(
self,
root_id: str,
retriever_dict: Dict[str, BaseRetriever],
query_engine_dict: Optional[Dict[str, BaseQueryEngine]] = None,
node_dict: Optional[Dict[str, BaseNode]] = None,
callback_manager: Optional[CallbackManager] = None,
query_response_tmpl: Optional[str] = None,
verbose: bool = False,
) -> None:
"""Init params."""
self._root_id = root_id
if root_id not in retriever_dict:
raise ValueError(
f"Root id {root_id} not in retriever_dict, it must be a retriever."
)
self._retriever_dict = retriever_dict
self._query_engine_dict = query_engine_dict or {}
self._node_dict = node_dict or {}
self.callback_manager = callback_manager or CallbackManager([])
# make sure keys don't overlap
if set(self._retriever_dict.keys()) & set(self._query_engine_dict.keys()):
raise ValueError("Retriever and query engine ids must not overlap.")
self._query_response_tmpl = query_response_tmpl or DEFAULT_QUERY_RESPONSE_TMPL
self._verbose = verbose
super().__init__()
def _query_retrieved_nodes(
self, query_bundle: QueryBundle, nodes_with_score: List[NodeWithScore]
) -> Tuple[List[NodeWithScore], List[NodeWithScore]]:
"""Query for retrieved nodes.
If node is an IndexNode, then recursively query the retriever/query engine.
If node is a TextNode, then simply return the node.
"""
nodes_to_add = []
additional_nodes = []
visited_ids = set()
# dedup index nodes that reference same index id
new_nodes_with_score = []
for node_with_score in nodes_with_score:
node = node_with_score.node
if isinstance(node, IndexNode):
if node.index_id not in visited_ids:
visited_ids.add(node.index_id)
new_nodes_with_score.append(node_with_score)
nodes_with_score = new_nodes_with_score
# recursively retrieve
for node_with_score in nodes_with_score:
node = node_with_score.node
if isinstance(node, IndexNode):
if self._verbose:
print_text(
"Retrieved node with id, entering: " f"{node.index_id}\n",
color="pink",
)
cur_retrieved_nodes, cur_additional_nodes = self._retrieve_rec(
query_bundle, query_id=node.index_id
)
else:
assert isinstance(node, TextNode)
if self._verbose:
print_text(
"Retrieving text node: " f"{node.get_content()}\n",
color="pink",
)
cur_retrieved_nodes = [node_with_score]
cur_additional_nodes = []
nodes_to_add.extend(cur_retrieved_nodes)
additional_nodes.extend(cur_additional_nodes)
return nodes_to_add, additional_nodes
def _get_object(self, query_id: str) -> RQN_TYPE:
"""Fetch retriever or query engine."""
node = self._node_dict.get(query_id, None)
if node is not None:
return node
retriever = self._retriever_dict.get(query_id, None)
if retriever is not None:
return retriever
query_engine = self._query_engine_dict.get(query_id, None)
if query_engine is not None:
return query_engine
raise ValueError(
f"Query id {query_id} not found in either `retriever_dict` "
"or `query_engine_dict`."
)
def _retrieve_rec(
self, query_bundle: QueryBundle, query_id: Optional[str] = None
) -> Tuple[List[NodeWithScore], List[NodeWithScore]]:
"""Query recursively."""
if self._verbose:
print_text(
f"Retrieving with query id {query_id}: {query_bundle.query_str}\n",
color="blue",
)
query_id = query_id or self._root_id
obj = self._get_object(query_id)
if isinstance(obj, BaseNode):
nodes_to_add = [NodeWithScore(node=obj, score=1.0)]
additional_nodes: List[NodeWithScore] = []
elif isinstance(obj, BaseRetriever):
with self.callback_manager.event(
CBEventType.RETRIEVE,
payload={EventPayload.QUERY_STR: query_bundle.query_str},
) as event:
nodes = obj.retrieve(query_bundle)
event.on_end(payload={EventPayload.NODES: nodes})
nodes_to_add, additional_nodes = self._query_retrieved_nodes(
query_bundle, nodes
)
elif isinstance(obj, BaseQueryEngine):
sub_resp = obj.query(query_bundle)
if self._verbose:
print_text(
f"Got response: {str(sub_resp)}\n",
color="green",
)
# format with both the query and the response
node_text = self._query_response_tmpl.format(
query_str=query_bundle.query_str, response=str(sub_resp)
)
node = TextNode(text=node_text)
nodes_to_add = [NodeWithScore(node=node, score=1.0)]
additional_nodes = sub_resp.source_nodes
else:
raise ValueError("Must be a retriever or query engine.")
return nodes_to_add, additional_nodes
def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
retrieved_nodes, _ = self._retrieve_rec(query_bundle, query_id=None)
return retrieved_nodes
def retrieve_all(
self, query_bundle: QueryBundle
) -> Tuple[List[NodeWithScore], List[NodeWithScore]]:
"""Retrieve all nodes.
Unlike default `retrieve` method, this also fetches additional sources.
"""
return self._retrieve_rec(query_bundle, query_id=None) | PypiClean |
/plone.app.async-1.7.0.zip/plone.app.async-1.7.0/src/plone/app/async/browser/queue.py | import inspect
from DateTime import DateTime
from datetime import datetime
from zope.cachedescriptors.property import Lazy as lazy_property
from zope.component import getUtility
from Products.Five import BrowserView
from zc.async.interfaces import ACTIVE, COMPLETED
from zc.async.utils import custom_repr
from zc.twist import Failure
from plone.app.async.interfaces import IAsyncService
from webdav.xmltools import escape
from ZODB.utils import p64, u64
import simplejson as json
import pytz
local_zone = DateTime().asdatetime().tzinfo
def get_failure(job):
if job.status == COMPLETED and isinstance(job.result, Failure):
return job.result
elif job.status not in (ACTIVE, COMPLETED) and job._retry_policy \
and job._retry_policy.data.get('job_error', 0):
return job._retry_policy.data['last_job_error']
class JobsView(BrowserView):
js = """
jQuery(function($) {
var update = function() {
var escape = function(s) {
return s.replace('<', '<').replace('>', '>');
}
$.fn.render = function(data) {
var rows = ['<tr><th>Job</th><th>Status</th></tr>'];
$(data).each(function(i, job) {
row = ['<tr><td><div><strong>' + escape(job.callable) +
'</strong></div>'];
row.push('<div>' + escape(job.args) + '</div></td>');
row.push('<td>' + job.status);
if (job.progress)
row.push('<div>' + job.progress + '</div>');
if (job.failure)
row.push('<div>' + job.failure + '</div>')
rows.push(row.join('') + '</tr>');
});
$('table', this).html(rows.join(''));
var form = this.closest('form');
var legend = $('legend', this);
$('.formTab span', form).eq($('legend', form).
index(legend)).html(legend.html().replace('0', data.length));
};
$.getJSON('jobs.json', function(data) {
$('#queued-jobs').render(data.queued);
$('#active-jobs').render(data.active);
$('#dead-jobs').render(data.dead);
$('#completed-jobs').render(data.completed);
});
setTimeout(update, 5000);
};
update();
});
"""
class JobsJSON(BrowserView):
def _find_jobs(self):
service = getUtility(IAsyncService)
queue = service.getQueues()['']
for job in queue:
yield 'queued', job
for da in queue.dispatchers.values():
for agent in da.values():
for job in agent:
yield 'active', job
for job in agent.completed:
if isinstance(job.result, Failure):
yield 'dead', job
else:
yield 'completed', job
def _filter_jobs(self):
for job_status, job in self._find_jobs():
if len(job.args) == 0:
continue
job_context = job.args[0]
if type(job_context) == tuple and \
job_context[:len(self.portal_path)] == self.portal_path:
yield job_status, job
@lazy_property
def portal_path(self):
return self.context.getPhysicalPath()
@lazy_property
def now(self):
return datetime.now(pytz.UTC)
def __call__(self):
self.request.response.setHeader('Content-Type', 'application/json')
jobs = {
'queued': [],
'active': [],
'completed': [],
'dead': [],
}
for job_status, job in self._filter_jobs():
jobs[job_status].append({
'id': u64(job._p_oid),
'callable': custom_repr(job.callable),
'args': self.format_args(job),
'status': self.format_status(job),
'progress': self.format_progress(job),
'failure': self.format_failure(job),
})
return json.dumps(jobs)
def format_status(self, job):
if job.status == COMPLETED:
return 'Completed at %s' % self.format_datetime(job.active_end)
elif job.status == ACTIVE:
return 'Started at %s' % self.format_datetime(job.active_start)
else:
if job.begin_after > self.now:
retries = 0
if job._retry_policy:
retries = job._retry_policy.data.get('job_error', 0)
if retries:
return 'Retry #%s scheduled for %s' % (retries,
self.format_datetime(job.begin_after))
else:
return 'Scheduled for %s' % self.format_datetime(
job.begin_after)
else:
return 'Queued at %s' % self.format_datetime(job.begin_after)
def format_progress(self, job):
if job.status != ACTIVE:
return ''
progress = job.annotations.get('progress', 0.0) * 100
if not progress:
return ''
return """<div style="width:100px; border: solid 1px #000;">
<div style="width:%dpx; background: red;"> </div></div>%d%%""" % (
progress, progress)
def format_args(self, job):
try:
argnames = inspect.getargspec(job.callable).args
except:
argnames = None
if argnames is not None:
args = ', '.join('%s=%s' % (k, v)
for k, v in zip(argnames, job.args))
else:
args = ', '.join(custom_repr(a) for a in job.args)
kwargs = ', '.join(k + "=" + custom_repr(v)
for k, v in job.kwargs.items())
if args and kwargs:
args += ", " + kwargs
elif kwargs:
args = kwargs
return 'Args: %s' % args
def format_failure(self, job):
failure = get_failure(job)
if failure is None:
return ''
res = '%s: %s' % (failure.type.__name__, failure.getErrorMessage())
res += ' <a href="%s/manage-job-error?id=%s">Details</a>' % (
self.context.absolute_url(), u64(job._p_oid))
return res
def format_datetime(self, dt):
return dt.astimezone(local_zone).strftime('%I:%M:%S %p, %Y-%m-%d')
class TracebackView(BrowserView):
def __call__(self, id):
queue = getUtility(IAsyncService).getQueues()['']
job = queue._p_jar.get(p64(int(id)))
failure = get_failure(job)
return escape(failure.getTraceback()) | PypiClean |
/django-settings-env-4.3.0.tar.gz/django-settings-env-4.3.0/README.md | -------------------
django-settings-env
-------------------
12-factor.net settings environment handler for Django
envex
---------
The functionality outlined in this section is derived from the dependent package
`envex`, the docs for which are partially repeated below.
Skip to the Django Support section for functionality added by this extension.
`envex` provides a convenient type-smart interface for handling the environment, and therefore
configuration of any application using 12factor.net principals removing many environment specific
variables and security sensitive information from application code.
This module provides some features not supported by other dotenv handlers
(python-dotenv, etc.) including expansion of template variables which is very useful
for DRY.
More detailed info can be found in the `envex` README.
Django Support
--------------
By default, the Env class provided by this module can apply a given prefix (default "DJANGO_")
to environment variables names, but will only be used in that form if the raw (unprefixed)
variable name is not set in the environment. To change the prefix including setting it to
an empty string, pass the prefix= kwarg to `Env.__init__`.
Some django specific methods included in this module are URL parsers for:
| Default Var | Parser
|----------------|-----------------------
| DATABASE_URL | `env.database_url()`
| CACHE_URL | `env.cache_url()`
| EMAIL_URL | `env.email_url()`
| SEARCH_URL | `env.search_url()`
| QUEUE_URL | `env.queue_url()`
each of which can be injected into django settings via the environment, typically
from a .env file at the project root.
The name of the file and paths searched is fully customisable.
The url specified includes a schema that determines the "backend" class or module
that handles the corresponding functionality as documented below.
## `database_url`
Evaluates a URL in the form
```
schema://[username:[password]@]host_or_path[:port]/name
```
Schemas:
| Scheme | Database
|-----------------|----------------------
| postgres | Postgres (psycopg2)
| postgresql | Postgres (psycopg2)
| psql | Postgres (psycopg2)
| pgsql | Postgres (psycopg2)
| postgis | Postgres (psycopg2) using PostGIS extensions
| mysql | MySql (mysqlclient)
| mysql2 | MySql (mysqlclient)
| mysql-connector | MySql (mysql-connector)
| mysqlgis | MySql (mysqlclient) using GIS extensions
| mssql | SqlServer (sql_server.pyodbc)
| oracle | Oracle (cx_Oracle)
| pyodbc | ODBC (pyodbc)
| redshift | Amazon Redshift
| spatialite | Sqlite with spatial extensions (spatialite)
| sqlite | Sqlite
| ldap | django-ldap
## `cache_url`
Evaluates a URL in the form
```
schema://[username:[password]@]host_or_path[:port]/[name]
```
Schemas:
| Scheme | Cache
|-----------------|----------------------
| dbcache | cache in database
| dummycache | dummy cache - "no cache"
| filecache | cache data in files
| locmemcache | cache in memory
| memcache | memcached (python-memcached)
| pymemcache | memcached (pymemcache)
| rediscache | redis (django-redis)
| redis | redis (django-redis)
## `email_url`
Evaluates a URL in the form
```
schema://[username[@domain]:[password]@]host_or_path[:port]/
```
Schemas:
| Scheme | Service
|-----------------|----------------------
| smtp | smtp, no SSL
| smtps | smtp over SSL
| smtp+tls | smtp over SSL
| smtp+ssl | smtp over SSL
| consolemail | publish mail to console (dev)
| filemail | append email to file (dev)
| memorymail | store emails in memory
| dummymail | do-nothing email backend
| amazonses | Amazon Wimple Email Service
| amazon-ses | Amazon Wimple Email Service
## `search_url`
Evaluates a URL in the form
```
schema://[username:[password]@]host_or_path[:port]/[index]
```
Schemas:
| Scheme | Engine
|-----------------|----------------------
| elasticsearch | elasticsearch (django-haystack)
| elasticsearch2 | elasticsearch2 (django-haystack)
| solr | Apache solr (django-haystack)
| whoosh | Whoosh search engine (pure python, haystack)
| xapian | Xapian search engine (haystack)
| simple | Simple search engine (haystack)
## `queue_url`
Evaluates a URL in the form
```
schema://[username:[password]@]host_or_path[:port]/[queue]
```
Schemas:
| Scheme | Engine
|-----------------|----------------------
| rabbitmq | RabbitMQ
| redis | Redis
| amazonsqs | Amazon SQS
| amazon-sqs | alias for Amazon SQS
Django Class Settings
---------------------
Support for the `django-class-settings` module is added to the env handler, allowing
a much simplified use withing a class_settings.Settings class, e.g.:
```python
from django_settings_env import Env
from class_settings import Settings
env = Env(prefix='DJANGO_')
class MySettings(Settings):
MYSETTING = env()
```
This usage will look for 'MYSETTING' or 'DJANGO_MYSETTNG' in the environment and lazily
assign it to the MYSETTING value for the settings class.
> :warning: The functional form of env() is now available even if django class settings is not
used or installed.
| PypiClean |
/PyRATA-0.4.1.tar.gz/PyRATA-0.4.1/pyrata/state.py |
""" Description of the NFA elementary object namely as the state"""
import logging
class State(object):
START_STATE = '#S'
MATCHING_STATE = '#M'
EMPTY_STATE = '#E'
class_counter = 0 # make each State object have a unique id
@classmethod
def get_state_description(cls, state):
if state.char == cls.START_STATE:
return '({}) START'.format(state.id)
elif state.char == cls.MATCHING_STATE:
return '({}) MATCHING'.format(state.id)
elif state.char == cls.EMPTY_STATE:
return '({}) EMPTY'.format(state.id)
else:
return '({}) {}'.format(state.id,state.char)
def __init__(self, char, in_states, out_states, symbolic_step_expression, single_constraint_tuple_list, single_constraint_variable_list, group_pile):
self.char = char
self.in_states = in_states
self.out_states = out_states
self.symbolic_step_expression = symbolic_step_expression
self.single_constraint_tuple_list = single_constraint_tuple_list
self.single_constraint_variable_list = single_constraint_variable_list
self.group_pile = group_pile # list of group id currently open at this point
self.id = State.class_counter # unique id for this State
State.class_counter += 1
logging.debug('State - create object - char={} id={} self={}'.format(char, self.id, self))
# print ('Debug: State type(self.char)={}'.format(self.char))
# print ('Debug: State type(self.in_states)={}'.format(self.in_states))
# print ('Debug: State type(self.symbolic_step_expression)={}'.format(self.symbolic_step_expression))
# print ('Debug: State type(self.single_constraint_tuple_list)={}'.format(self.single_constraint_tuple_list))
# print ('Debug: State type(self.single_constraint_variable_list)={}'.format(self.single_constraint_variable_list))
# print ('Debug: State type(self.group_pile)={}'.format(self.group_pile))
# print ('Debug: State type(self.id)={}'.format(self.id))
def is_start(self):
return self.char == self.START_STATE
def is_matching(self):
return self.char == self.MATCHING_STATE
def is_empty(self):
return self.char == self.EMPTY_STATE
def is_normal(self):
return (not self.is_start() and
not self.is_matching() and
not self.is_empty())
@classmethod
def create_start_state(cls):
new_state = cls(cls.START_STATE, set(), set(), None, None, None, [])
return new_state, new_state
@classmethod
def create_matching_state(cls):
new_state = cls(cls.MATCHING_STATE, set(), set(), None, None, None, [])
return new_state, new_state
@classmethod
def create_empty_state(cls):
new_state = cls(cls.EMPTY_STATE, set(), set(), None, None, None, [])
return new_state, new_state
@classmethod
def create_char_state(cls, char, symbolic_step_expression, single_constraint_tuple_list, single_constraint_variable_list, group_pile):
new_state = cls(char, set(), set(), symbolic_step_expression, single_constraint_tuple_list, single_constraint_variable_list, group_pile)
return new_state, new_state
@classmethod
def append_B_to_A(cls, elem_A, elem_B, merge_callback=None):
"""Append element B to A and return the last state of the combined
element
[A] and [B] are the end and start state of element A and B
respectively
+------ in_B ----
<-- out_A --+ |
| v
-- in_A -->[A]----->[B]----- out_B -->
[A] can merge with [B] if either [A] or [B] is empty and
after the merge, in_B cannot reach out_A, i.e. there is no
going back possibilities from states after [B] to states
before [A]. If [A] is start state, don't merge.
"""
A = elem_A[1]
B = elem_B[0]
last_state = elem_B[1]
if not ((A.is_start() and (
B.is_normal() or B.is_matching())) or (
A.is_normal() and (
B.is_matching() or B.is_normal()))) and (
(len(A.out_states) == 0 and not A.is_normal()) or (len(B.in_states) == 0 and not B.is_normal())):
if A.is_empty():
A.char = B.char
A.symbolic_step_expression = B.symbolic_step_expression
A.single_constraint_tuple_list = B.single_constraint_tuple_list
A.single_constraint_variable_list = B.single_constraint_variable_list
A.group_pile = B.group_pile
A.id = B.id
A.out_states.discard(B)
B.in_states.discard(A)
A.out_states.update(B.out_states)
for ous in B.out_states:
ous.in_states.discard(B)
ous.in_states.add(A)
A.in_states.update(B.in_states)
for ins in B.in_states:
ins.out_states.discard(B)
ins.out_states.add(A)
if elem_B[0] == elem_B[1]:
last_state = A
if merge_callback:
merge_callback()
else:
A.out_states.add(B)
B.in_states.add(A)
return elem_A[0], last_state
@classmethod
def create_element_star_state(cls, elem):
facade_elem = cls.create_start_state()
final_elem = cls.append_B_to_A(facade_elem, elem)
facade_elem[1].char = cls.MATCHING_STATE
final_elem = cls.append_B_to_A(final_elem, facade_elem)
final_elem[1].char = cls.EMPTY_STATE
return final_elem[1], final_elem[1]
@classmethod
def create_element_plus_state(cls, elem):
if len(elem[0].out_states) == 1:
os = elem[0].out_states.pop()
tmp_elem = cls.append_B_to_A((elem[0], elem[0]), (os, os))
if tmp_elem[1] != elem[0]:
elem[0].out_states.add(os)
if len(elem[1].in_states) == 1:
ins = elem[1].in_states.pop()
tmp_elem = cls.append_B_to_A((ins, ins), (elem[1], elem[1]))
if tmp_elem[1] == elem[1]:
elem[1].in_states.add(ins)
else:
elem = (elem[0], tmp_elem[1])
elem[1].out_states.add(elem[0])
elem[0].in_states.add(elem[1])
return elem
@classmethod
def create_element_question_mark_state(cls, elem):
new_start_elem = cls.create_start_state()
new_end_elem = cls.create_matching_state()
final_elem = cls.append_B_to_A(new_start_elem, elem)
final_elem = cls.append_B_to_A(final_elem, new_end_elem)
final_elem = cls.append_B_to_A(
(final_elem[0], final_elem[0]), (final_elem[1], final_elem[1]))
final_elem[0].char = cls.EMPTY_STATE
final_elem[1].char = cls.EMPTY_STATE
return final_elem
def __str__(self):
return 'IN[%s]->CHAR(%s)->OUT[%s]' % (','.join([s.char for s in self.in_states]), self.char, ','.join([s.char for s in self.out_states]))
def __repr__(self):
return "'%s'" % self | PypiClean |
/ka-lite-0.17.6b4.tar.gz/ka-lite-0.17.6b4/kalite/distributed/static/js/distributed/perseus/ke/local-only/localeplanet/icu.gaa-GH.js | (function() {
var dfs = {"am_pm":["AM","PM"],"day_name":["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],"day_short":["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],"era":["BC","AD"],"era_name":["Before Christ","Anno Domini"],"month_name":["January","February","March","April","May","June","July","August","September","October","November","December"],"month_short":["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"],"order_full":"MDY","order_long":"MDY","order_medium":"MDY","order_short":"MDY"};
var nfs = {"decimal_separator":".","grouping_separator":",","minus":"-"};
var df = {SHORT_PADDED_CENTURY:function(d){if(d){return(((d.getMonth()+101)+'').substring(1)+'/'+((d.getDate()+101)+'').substring(1)+'/'+d.getFullYear());}},SHORT:function(d){if(d){return((d.getMonth()+1)+'/'+d.getDate()+'/'+(d.getFullYear()+'').substring(2));}},SHORT_NOYEAR:function(d){if(d){return((d.getMonth()+1)+'/'+d.getDate());}},SHORT_NODAY:function(d){if(d){return((d.getMonth()+1)+' '+(d.getFullYear()+'').substring(2));}},MEDIUM:function(d){if(d){return(dfs.month_short[d.getMonth()]+' '+d.getDate()+','+' '+d.getFullYear());}},MEDIUM_NOYEAR:function(d){if(d){return(dfs.month_short[d.getMonth()]+' '+d.getDate());}},MEDIUM_WEEKDAY_NOYEAR:function(d){if(d){return(dfs.day_short[d.getDay()]+' '+dfs.month_short[d.getMonth()]+' '+d.getDate());}},LONG_NODAY:function(d){if(d){return(dfs.month_name[d.getMonth()]+' '+d.getFullYear());}},LONG:function(d){if(d){return(dfs.month_name[d.getMonth()]+' '+d.getDate()+','+' '+d.getFullYear());}},FULL:function(d){if(d){return(dfs.day_name[d.getDay()]+','+' '+dfs.month_name[d.getMonth()]+' '+d.getDate()+','+' '+d.getFullYear());}}};
window.icu = window.icu || new Object();
var icu = window.icu;
icu.getCountry = function() { return "GH" };
icu.getCountryName = function() { return "Ghana" };
icu.getDateFormat = function(formatCode) { var retVal = {}; retVal.format = df[formatCode]; return retVal; };
icu.getDateFormats = function() { return df; };
icu.getDateFormatSymbols = function() { return dfs; };
icu.getDecimalFormat = function(places) { var retVal = {}; retVal.format = function(n) { var ns = n < 0 ? Math.abs(n).toFixed(places) : n.toFixed(places); var ns2 = ns.split('.'); s = ns2[0]; var d = ns2[1]; var rgx = /(\d+)(\d{3})/;while(rgx.test(s)){s = s.replace(rgx, '$1' + nfs["grouping_separator"] + '$2');} return (n < 0 ? nfs["minus"] : "") + s + nfs["decimal_separator"] + d;}; return retVal; };
icu.getDecimalFormatSymbols = function() { return nfs; };
icu.getIntegerFormat = function() { var retVal = {}; retVal.format = function(i) { var s = i < 0 ? Math.abs(i).toString() : i.toString(); var rgx = /(\d+)(\d{3})/;while(rgx.test(s)){s = s.replace(rgx, '$1' + nfs["grouping_separator"] + '$2');} return i < 0 ? nfs["minus"] + s : s;}; return retVal; };
icu.getLanguage = function() { return "gaa" };
icu.getLanguageName = function() { return "Ga" };
icu.getLocale = function() { return "null" };
icu.getLocaleName = function() { return "Ga (Ghana)" };
})(); | PypiClean |
/cctbx_base-2020.8-0_py38h167b89d-cp38-cp38m-manylinux2010_x86_64.whl/mmtbx/command_line/validate_ligands.py | from __future__ import absolute_import, division, print_function
from iotbx.cli_parser import run_program
from mmtbx.programs import validate_ligands
if __name__ == '__main__':
run_program(program_class=validate_ligands.Program)
#old stuff
#from __future__ import absolute_import, division, print_function
#from libtbx.str_utils import make_sub_header
#from libtbx.utils import Sorry
#import os
#import sys
#
#def master_phil():
# from mmtbx.command_line import generate_master_phil_with_inputs
# return generate_master_phil_with_inputs(
# enable_automatic_twin_detection=True,
# phil_string="""
#ligand_code = None
# .type = str
# .multiple = True
#reference_structure = None
# .type = path
#only_segid = None
# .type = str
#verbose = False
# .type = bool
#""")
#
#def run(args, out=sys.stdout):
# usage_string = """\
#mmtbx.validate_ligands model.pdb data.mtz LIGAND_CODE [...]
#
#Print out basic statistics for residue(s) with the given code(s), including
#electron density values/CC.
#"""
# import mmtbx.validation.ligands
# import mmtbx.command_line
# args_ = []
# for arg in args :
# if (len(arg) == 3) and arg.isalnum() and (not os.path.exists(arg)):
# args_.append("ligand_code=%s" % arg)
# else :
# args_.append(arg)
# cmdline = mmtbx.command_line.load_model_and_data(
# args=args_,
# master_phil=master_phil(),
# process_pdb_file=False,
# usage_string=usage_string)
# params = cmdline.params
# if (params.ligand_code is None) or (len(params.ligand_code) == 0):
# raise Sorry("Ligand code required!")
# make_sub_header("Validating ligands", out=out)
# for ligand_code in params.ligand_code :
# validations = mmtbx.validation.ligands.validate_ligands(
# pdb_hierarchy=cmdline.pdb_hierarchy,
# fmodel=cmdline.fmodel,
# ligand_code=ligand_code,
# reference_structure=params.reference_structure,
# only_segid=params.only_segid)
# if (validations is None):
# raise Sorry("No ligands named '%s' found." % ligand_code)
# mmtbx.validation.ligands.show_validation_results(validations=validations,
# out=out,
# verbose=params.verbose)
#
#if (__name__ == "__main__"):
# run(sys.argv[1:]) | PypiClean |
/taskcc-alipay-sdk-python-3.3.398.tar.gz/taskcc-alipay-sdk-python-3.3.398/alipay/aop/api/request/MybankPaymentTradeNormalpayOrderCreateRequest.py | import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.MybankPaymentTradeNormalpayOrderCreateModel import MybankPaymentTradeNormalpayOrderCreateModel
class MybankPaymentTradeNormalpayOrderCreateRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, MybankPaymentTradeNormalpayOrderCreateModel):
self._biz_content = value
else:
self._biz_content = MybankPaymentTradeNormalpayOrderCreateModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'mybank.payment.trade.normalpay.order.create'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params | PypiClean |
/adam-robotics-0.0.7.tar.gz/adam-robotics-0.0.7/src/adam/parametric/computations.py |
import casadi as cs
import numpy as np
from adam.casadi.casadi_like import SpatialMath
from adam.core import RBDAlgorithms
from adam.model import Model
from adam.parametric import ParametricModelFactory
class KinDynComputations:
"""This is a small class that retrieves robot quantities represented in a symbolic fashion using CasADi
in mixed representation, for Floating Base systems - as humanoid robots.
"""
def __init__(
self,
urdfstring: str,
joints_name_list: list,
root_link: str = "root_link",
gravity: np.array = np.array([0.0, 0.0, -9.80665, 0.0, 0.0, 0.0]),
f_opts: dict = dict(jit=False, jit_options=dict(flags="-Ofast")),
) -> None:
"""
Args:
urdfstring (str): path of the urdf
joints_name_list (list): list of the actuated joints
root_link (str, optional): the first link. Defaults to 'root_link'.
"""
factory = URDFModelFactory(urdfstring, SpatialMath())
model = Model.build(factory=factory, joints_name_list=joints_name_list)
self.rbdalgos = RBDAlgorithms(model=model)
self.NDoF = self.rbdalgos.NDoF
self.g = gravity
self.f_opts = f_opts
def mass_matrix_fun(self) -> cs.Function:
"""Returns the Mass Matrix functions computed the CRBA
Returns:
M (casADi function): Mass Matrix
"""
T_b = cs.SX.sym("T_b", 4, 4)
s = cs.SX.sym("s", self.NDoF)
[M, _] = self.rbdalgos.crba(T_b, s)
return cs.Function("M", [T_b, s], [M.array], self.f_opts)
def centroidal_momentum_matrix_fun(self) -> cs.Function:
"""Returns the Centroidal Momentum Matrix functions computed the CRBA
Returns:
Jcc (casADi function): Centroidal Momentum matrix
"""
T_b = cs.SX.sym("T_b", 4, 4)
s = cs.SX.sym("s", self.NDoF)
[_, Jcm] = self.rbdalgos.crba(T_b, s)
return cs.Function("Jcm", [T_b, s], [Jcm.array], self.f_opts)
def forward_kinematics_fun(self, frame: str) -> cs.Function:
"""Computes the forward kinematics relative to the specified frame
Args:
frame (str): The frame to which the fk will be computed
Returns:
T_fk (casADi function): The fk represented as Homogenous transformation matrix
"""
s = cs.SX.sym("s", self.NDoF)
T_b = cs.SX.sym("T_b", 4, 4)
T_fk = self.rbdalgos.forward_kinematics(frame, T_b, s)
return cs.Function("T_fk", [T_b, s], [T_fk.array], self.f_opts)
def jacobian_fun(self, frame: str) -> cs.Function:
"""Returns the Jacobian relative to the specified frame
Args:
frame (str): The frame to which the jacobian will be computed
Returns:
J_tot (casADi function): The Jacobian relative to the frame
"""
s = cs.SX.sym("s", self.NDoF)
T_b = cs.SX.sym("T_b", 4, 4)
J_tot = self.rbdalgos.jacobian(frame, T_b, s)
return cs.Function("J_tot", [T_b, s], [J_tot.array], self.f_opts)
def relative_jacobian_fun(self, frame: str) -> cs.Function:
"""Returns the Jacobian between the root link and a specified frame frames
Args:
frame (str): The tip of the chain
Returns:
J (casADi function): The Jacobian between the root and the frame
"""
s = cs.SX.sym("s", self.NDoF)
J = self.rbdalgos.relative_jacobian(frame, s)
return cs.Function("J", [s], [J.array], self.f_opts)
def CoM_position_fun(self) -> cs.Function:
"""Returns the CoM positon
Returns:
com (casADi function): The CoM position
"""
s = cs.SX.sym("s", self.NDoF)
T_b = cs.SX.sym("T_b", 4, 4)
com_pos = self.rbdalgos.CoM_position(T_b, s)
return cs.Function("CoM_pos", [T_b, s], [com_pos.array], self.f_opts)
def bias_force_fun(self) -> cs.Function:
"""Returns the bias force of the floating-base dynamics equation,
using a reduced RNEA (no acceleration and external forces)
Returns:
h (casADi function): the bias force
"""
T_b = cs.SX.sym("T_b", 4, 4)
s = cs.SX.sym("s", self.NDoF)
v_b = cs.SX.sym("v_b", 6)
s_dot = cs.SX.sym("s_dot", self.NDoF)
h = self.rbdalgos.rnea(T_b, s, v_b, s_dot, self.g)
return cs.Function("h", [T_b, s, v_b, s_dot], [h.array], self.f_opts)
def coriolis_term_fun(self) -> cs.Function:
"""Returns the coriolis term of the floating-base dynamics equation,
using a reduced RNEA (no acceleration and external forces)
Returns:
C (casADi function): the Coriolis term
"""
T_b = cs.SX.sym("T_b", 4, 4)
q = cs.SX.sym("q", self.NDoF)
v_b = cs.SX.sym("v_b", 6)
q_dot = cs.SX.sym("q_dot", self.NDoF)
# set in the bias force computation the gravity term to zero
C = self.rbdalgos.rnea(T_b, q, v_b, q_dot, np.zeros(6))
return cs.Function("C", [T_b, q, v_b, q_dot], [C.array], self.f_opts)
def gravity_term_fun(self) -> cs.Function:
"""Returns the gravity term of the floating-base dynamics equation,
using a reduced RNEA (no acceleration and external forces)
Returns:
G (casADi function): the gravity term
"""
T_b = cs.SX.sym("T_b", 4, 4)
q = cs.SX.sym("q", self.NDoF)
# set in the bias force computation the velocity to zero
G = self.rbdalgos.rnea(T_b, q, np.zeros(6), np.zeros(self.NDoF), self.g)
return cs.Function("G", [T_b, q], [G.array], self.f_opts)
def forward_kinematics(self, frame, T_b, s) -> cs.Function:
"""Computes the forward kinematics relative to the specified frame
Args:
frame (str): The frame to which the fk will be computed
Returns:
T_fk (casADi function): The fk represented as Homogenous transformation matrix
"""
return self.rbdalgos.forward_kinematics(frame, T_b, s)
def get_total_mass(self):
"""Returns the total mass of the robot
Returns:
mass: The total mass
"""
return self.rbdalgos.get_total_mass() | PypiClean |
/tensorleap-openapi-client-1.2.0.tar.gz/tensorleap-openapi-client-1.2.0/tensorleap_openapi_client/paths/visualizations_get_visualization/post.py | from dataclasses import dataclass
import typing_extensions
import urllib3
from urllib3._collections import HTTPHeaderDict
from tensorleap_openapi_client import api_client, exceptions
from datetime import date, datetime # noqa: F401
import decimal # noqa: F401
import functools # noqa: F401
import io # noqa: F401
import re # noqa: F401
import typing # noqa: F401
import typing_extensions # noqa: F401
import uuid # noqa: F401
import frozendict # noqa: F401
from tensorleap_openapi_client import schemas # noqa: F401
from tensorleap_openapi_client.model.visualization import Visualization
from tensorleap_openapi_client.model.get_visualization_params import GetVisualizationParams
from . import path
# body param
SchemaForRequestBodyApplicationJson = GetVisualizationParams
request_body_get_visualization_params = api_client.RequestBody(
content={
'application/json': api_client.MediaType(
schema=SchemaForRequestBodyApplicationJson),
},
required=True,
)
SchemaFor200ResponseBodyApplicationJson = Visualization
@dataclass
class ApiResponseFor200(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor200ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_200 = api_client.OpenApiResponse(
response_cls=ApiResponseFor200,
content={
'application/json': api_client.MediaType(
schema=SchemaFor200ResponseBodyApplicationJson),
},
)
_status_code_to_response = {
'200': _response_for_200,
}
_all_accept_content_types = (
'application/json',
)
class BaseApi(api_client.Api):
@typing.overload
def _get_visualization_oapg(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: typing_extensions.Literal["application/json"] = ...,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def _get_visualization_oapg(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: str = ...,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def _get_visualization_oapg(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
skip_deserialization: typing_extensions.Literal[True],
content_type: str = ...,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def _get_visualization_oapg(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: str = ...,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization,
]: ...
def _get_visualization_oapg(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: str = 'application/json',
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
"""
:param skip_deserialization: If true then api_response.response will be set but
api_response.body and api_response.headers will not be deserialized into schema
class instances
"""
used_path = path.value
_headers = HTTPHeaderDict()
# TODO add cookie handling
if accept_content_types:
for accept_content_type in accept_content_types:
_headers.add('Accept', accept_content_type)
if body is schemas.unset:
raise exceptions.ApiValueError(
'The required body parameter has an invalid value of: unset. Set a valid value instead')
_fields = None
_body = None
serialized_data = request_body_get_visualization_params.serialize(body, content_type)
_headers.add('Content-Type', content_type)
if 'fields' in serialized_data:
_fields = serialized_data['fields']
elif 'body' in serialized_data:
_body = serialized_data['body']
response = self.api_client.call_api(
resource_path=used_path,
method='post'.upper(),
headers=_headers,
fields=_fields,
body=_body,
stream=stream,
timeout=timeout,
)
if skip_deserialization:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
else:
response_for_status = _status_code_to_response.get(str(response.status))
if response_for_status:
api_response = response_for_status.deserialize(response, self.api_client.configuration)
else:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
if not 200 <= response.status <= 299:
raise exceptions.ApiException(api_response=api_response)
return api_response
class GetVisualization(BaseApi):
# this class is used by api classes that refer to endpoints with operationId fn names
@typing.overload
def get_visualization(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: typing_extensions.Literal["application/json"] = ...,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def get_visualization(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: str = ...,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def get_visualization(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
skip_deserialization: typing_extensions.Literal[True],
content_type: str = ...,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def get_visualization(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: str = ...,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization,
]: ...
def get_visualization(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: str = 'application/json',
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
return self._get_visualization_oapg(
body=body,
content_type=content_type,
accept_content_types=accept_content_types,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
)
class ApiForpost(BaseApi):
# this class is used by api classes that refer to endpoints by path and http method names
@typing.overload
def post(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: typing_extensions.Literal["application/json"] = ...,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def post(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: str = ...,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def post(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
skip_deserialization: typing_extensions.Literal[True],
content_type: str = ...,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def post(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: str = ...,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization,
]: ...
def post(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: str = 'application/json',
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
return self._get_visualization_oapg(
body=body,
content_type=content_type,
accept_content_types=accept_content_types,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
) | PypiClean |
/lbrlabs_pulumi_launchdarkly-0.0.6.tar.gz/lbrlabs_pulumi_launchdarkly-0.0.6/lbrlabs_pulumi_launchdarkly/get_project.py |
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
__all__ = [
'GetProjectResult',
'AwaitableGetProjectResult',
'get_project',
'get_project_output',
]
@pulumi.output_type
class GetProjectResult:
"""
A collection of values returned by getProject.
"""
def __init__(__self__, client_side_availabilities=None, default_client_side_availabilities=None, id=None, key=None, name=None, tags=None):
if client_side_availabilities and not isinstance(client_side_availabilities, list):
raise TypeError("Expected argument 'client_side_availabilities' to be a list")
if client_side_availabilities is not None:
warnings.warn("""'client_side_availability' is now deprecated. Please migrate to 'default_client_side_availability' to maintain future compatability.""", DeprecationWarning)
pulumi.log.warn("""client_side_availabilities is deprecated: 'client_side_availability' is now deprecated. Please migrate to 'default_client_side_availability' to maintain future compatability.""")
pulumi.set(__self__, "client_side_availabilities", client_side_availabilities)
if default_client_side_availabilities and not isinstance(default_client_side_availabilities, list):
raise TypeError("Expected argument 'default_client_side_availabilities' to be a list")
pulumi.set(__self__, "default_client_side_availabilities", default_client_side_availabilities)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if key and not isinstance(key, str):
raise TypeError("Expected argument 'key' to be a str")
pulumi.set(__self__, "key", key)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if tags and not isinstance(tags, list):
raise TypeError("Expected argument 'tags' to be a list")
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="clientSideAvailabilities")
def client_side_availabilities(self) -> Sequence['outputs.GetProjectClientSideAvailabilityResult']:
"""
**Deprecated** A map describing which client-side SDKs can use new flags by default. To learn more, read Nested Client-Side Availability Block.
Please migrate to `default_client_side_availability` to maintain future compatability.
"""
return pulumi.get(self, "client_side_availabilities")
@property
@pulumi.getter(name="defaultClientSideAvailabilities")
def default_client_side_availabilities(self) -> Sequence['outputs.GetProjectDefaultClientSideAvailabilityResult']:
"""
A block describing which client-side SDKs can use new flags by default. To learn more, read Nested Client-Side Availability Block.
"""
return pulumi.get(self, "default_client_side_availabilities")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def key(self) -> str:
return pulumi.get(self, "key")
@property
@pulumi.getter
def name(self) -> str:
"""
The project's name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def tags(self) -> Optional[Sequence[str]]:
"""
The project's set of tags.
"""
return pulumi.get(self, "tags")
class AwaitableGetProjectResult(GetProjectResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetProjectResult(
client_side_availabilities=self.client_side_availabilities,
default_client_side_availabilities=self.default_client_side_availabilities,
id=self.id,
key=self.key,
name=self.name,
tags=self.tags)
def get_project(key: Optional[str] = None,
tags: Optional[Sequence[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetProjectResult:
"""
Provides a LaunchDarkly project data source.
This data source allows you to retrieve project information from your LaunchDarkly organization.
> **Note:** LaunchDarkly data sources do not provide access to the project's environments. If you wish to import environment configurations as data sources you must use the [`Environment` data source](https://www.terraform.io/docs/providers/launchdarkly/d/environment.html).
## Example Usage
```python
import pulumi
import pulumi_launchdarkly as launchdarkly
example = launchdarkly.get_project(key="example-project")
```
:param str key: The project's unique key.
:param Sequence[str] tags: The project's set of tags.
"""
__args__ = dict()
__args__['key'] = key
__args__['tags'] = tags
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('launchdarkly:index/getProject:getProject', __args__, opts=opts, typ=GetProjectResult).value
return AwaitableGetProjectResult(
client_side_availabilities=__ret__.client_side_availabilities,
default_client_side_availabilities=__ret__.default_client_side_availabilities,
id=__ret__.id,
key=__ret__.key,
name=__ret__.name,
tags=__ret__.tags)
@_utilities.lift_output_func(get_project)
def get_project_output(key: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Optional[Sequence[str]]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetProjectResult]:
"""
Provides a LaunchDarkly project data source.
This data source allows you to retrieve project information from your LaunchDarkly organization.
> **Note:** LaunchDarkly data sources do not provide access to the project's environments. If you wish to import environment configurations as data sources you must use the [`Environment` data source](https://www.terraform.io/docs/providers/launchdarkly/d/environment.html).
## Example Usage
```python
import pulumi
import pulumi_launchdarkly as launchdarkly
example = launchdarkly.get_project(key="example-project")
```
:param str key: The project's unique key.
:param Sequence[str] tags: The project's set of tags.
"""
... | PypiClean |
/fireblocks_py-1.0.0-py3-none-any.whl/fireblocks_client/paths/vault_public_key_info_/get.py | from dataclasses import dataclass
import typing_extensions
import urllib3
from urllib3._collections import HTTPHeaderDict
from fireblocks_client import api_client, exceptions
from datetime import date, datetime # noqa: F401
import decimal # noqa: F401
import functools # noqa: F401
import io # noqa: F401
import re # noqa: F401
import typing # noqa: F401
import typing_extensions # noqa: F401
import uuid # noqa: F401
import frozendict # noqa: F401
from fireblocks_client import schemas # noqa: F401
from fireblocks_client.model.public_key_information import PublicKeyInformation
from fireblocks_client.model.error import Error
from . import path
# Query params
DerivationPathSchema = schemas.StrSchema
AlgorithmSchema = schemas.StrSchema
CompressedSchema = schemas.BoolSchema
RequestRequiredQueryParams = typing_extensions.TypedDict(
'RequestRequiredQueryParams',
{
'derivationPath': typing.Union[DerivationPathSchema, str, ],
'algorithm': typing.Union[AlgorithmSchema, str, ],
}
)
RequestOptionalQueryParams = typing_extensions.TypedDict(
'RequestOptionalQueryParams',
{
'compressed': typing.Union[CompressedSchema, bool, ],
},
total=False
)
class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams):
pass
request_query_derivation_path = api_client.QueryParameter(
name="derivationPath",
style=api_client.ParameterStyle.FORM,
schema=DerivationPathSchema,
required=True,
explode=True,
)
request_query_algorithm = api_client.QueryParameter(
name="algorithm",
style=api_client.ParameterStyle.FORM,
schema=AlgorithmSchema,
required=True,
explode=True,
)
request_query_compressed = api_client.QueryParameter(
name="compressed",
style=api_client.ParameterStyle.FORM,
schema=CompressedSchema,
explode=True,
)
XRequestIDSchema = schemas.StrSchema
x_request_id_parameter = api_client.HeaderParameter(
name="X-Request-ID",
style=api_client.ParameterStyle.SIMPLE,
schema=XRequestIDSchema,
)
SchemaFor200ResponseBody = PublicKeyInformation
ResponseHeadersFor200 = typing_extensions.TypedDict(
'ResponseHeadersFor200',
{
'X-Request-ID': XRequestIDSchema,
}
)
@dataclass
class ApiResponseFor200(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor200ResponseBody,
]
headers: ResponseHeadersFor200
_response_for_200 = api_client.OpenApiResponse(
response_cls=ApiResponseFor200,
content={
'*/*': api_client.MediaType(
schema=SchemaFor200ResponseBody),
},
headers=[
x_request_id_parameter,
]
)
XRequestIDSchema = schemas.StrSchema
x_request_id_parameter = api_client.HeaderParameter(
name="X-Request-ID",
style=api_client.ParameterStyle.SIMPLE,
schema=XRequestIDSchema,
)
SchemaFor0ResponseBodyApplicationJson = Error
ResponseHeadersFor0 = typing_extensions.TypedDict(
'ResponseHeadersFor0',
{
'X-Request-ID': XRequestIDSchema,
}
)
@dataclass
class ApiResponseForDefault(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor0ResponseBodyApplicationJson,
]
headers: ResponseHeadersFor0
_response_for_default = api_client.OpenApiResponse(
response_cls=ApiResponseForDefault,
content={
'application/json': api_client.MediaType(
schema=SchemaFor0ResponseBodyApplicationJson),
},
headers=[
x_request_id_parameter,
]
)
_status_code_to_response = {
'200': _response_for_200,
'default': _response_for_default,
}
_all_accept_content_types = (
'*/*',
'application/json',
)
class BaseApi(api_client.Api):
@typing.overload
def _get_public_key_info_oapg(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
ApiResponseForDefault,
]: ...
@typing.overload
def _get_public_key_info_oapg(
self,
skip_deserialization: typing_extensions.Literal[True],
query_params: RequestQueryParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def _get_public_key_info_oapg(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor200,
ApiResponseForDefault,
api_client.ApiResponseWithoutDeserialization,
]: ...
def _get_public_key_info_oapg(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
"""
Get the public key information
:param skip_deserialization: If true then api_response.response will be set but
api_response.body and api_response.headers will not be deserialized into schema
class instances
"""
self._verify_typed_dict_inputs_oapg(RequestQueryParams, query_params)
used_path = path.value
prefix_separator_iterator = None
for parameter in (
request_query_derivation_path,
request_query_algorithm,
request_query_compressed,
):
parameter_data = query_params.get(parameter.name, schemas.unset)
if parameter_data is schemas.unset:
continue
if prefix_separator_iterator is None:
prefix_separator_iterator = parameter.get_prefix_separator_iterator()
serialized_data = parameter.serialize(parameter_data, prefix_separator_iterator)
for serialized_value in serialized_data.values():
used_path += serialized_value
_headers = HTTPHeaderDict()
# TODO add cookie handling
if accept_content_types:
for accept_content_type in accept_content_types:
_headers.add('Accept', accept_content_type)
response = self.api_client.call_api(
resource_path=used_path,
method='get'.upper(),
headers=_headers,
stream=stream,
timeout=timeout,
)
if skip_deserialization:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
else:
response_for_status = _status_code_to_response.get(str(response.status))
if response_for_status:
api_response = response_for_status.deserialize(response, self.api_client.configuration)
else:
default_response = _status_code_to_response.get('default')
if default_response:
api_response = default_response.deserialize(response, self.api_client.configuration)
else:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
if not 200 <= response.status <= 299:
raise exceptions.ApiException(
status=response.status,
reason=response.reason,
api_response=api_response
)
return api_response
class GetPublicKeyInfo(BaseApi):
# this class is used by api classes that refer to endpoints with operationId fn names
@typing.overload
def get_public_key_info(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
ApiResponseForDefault,
]: ...
@typing.overload
def get_public_key_info(
self,
skip_deserialization: typing_extensions.Literal[True],
query_params: RequestQueryParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def get_public_key_info(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor200,
ApiResponseForDefault,
api_client.ApiResponseWithoutDeserialization,
]: ...
def get_public_key_info(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
return self._get_public_key_info_oapg(
query_params=query_params,
accept_content_types=accept_content_types,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
)
class ApiForget(BaseApi):
# this class is used by api classes that refer to endpoints by path and http method names
@typing.overload
def get(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
ApiResponseForDefault,
]: ...
@typing.overload
def get(
self,
skip_deserialization: typing_extensions.Literal[True],
query_params: RequestQueryParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def get(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor200,
ApiResponseForDefault,
api_client.ApiResponseWithoutDeserialization,
]: ...
def get(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
return self._get_public_key_info_oapg(
query_params=query_params,
accept_content_types=accept_content_types,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
) | PypiClean |
/nti.schema-1.16.0.tar.gz/nti.schema-1.16.0/docs/interfaces.rst | =======================
nti.schema.interfaces
=======================
.. automodule:: nti.schema.interfaces
:members:
:undoc-members:
.. exception:: InvalidValue(*args, field=None, value=None)
Adds a field specifically to carry the value that is invalid.
.. deprecated:: 1.4.0
This is now just a convenience wrapper around
:class:`zope.schema.interfaces.InvalidValue` that calls
:meth:`.zope.schema.interfaces.ValidationError.with_field_and_value`
before returning the exception. You should always catch
:class:`zope.schema.interfaces.InvalidValue`.
| PypiClean |
/django-easy-notify-1.1.tar.gz/django-easy-notify-1.1/django_notifications/settings.py | import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/4.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "django-insecure-ove12^l=a$9dgua2j709x$!p6%wmm$@z)lyep64*s4-t!0bgyk"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ENABLE_PUSH_NOTIFICATION = True
ALLOWED_HOSTS = ["*"]
# Application definition
INSTALLED_APPS = [
"channels",
"daphne",
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"notifications",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "django_notifications.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [os.path.join(BASE_DIR, "notifications/templates")],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
if ENABLE_PUSH_NOTIFICATION:
ASGI_APPLICATION = "django_notifications.asgi.application"
CHANNEL_LAYERS = {
"default": {
"BACKEND": "channels_redis.core.RedisChannelLayer",
"CONFIG": {
"hosts": [("redis", 6379)],
},
},
}
else:
WSGI_APPLICATION = "django_notifications.wsgi.application"
# Database
# https://docs.djangoproject.com/en/4.2/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": BASE_DIR / "db.sqlite3",
}
}
# Password validation
# https://docs.djangoproject.com/en/4.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/4.2/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/4.2/howto/static-files/
STATIC_URL = "static/"
# Default primary key field type
# https://docs.djangoproject.com/en/4.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField"
MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), "media")
MEDIA_URL = "/media/" | PypiClean |
/quara-poetry-core-next-1.1.0a6.tar.gz/quara-poetry-core-next-1.1.0a6/src/poetry/core/masonry/utils/module.py | from pathlib import Path
from typing import TYPE_CHECKING
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
if TYPE_CHECKING:
from poetry.core.masonry.utils.include import Include
class ModuleOrPackageNotFound(ValueError):
pass
class Module:
def __init__(
self,
name: str,
directory: str = ".",
packages: Optional[List[Dict[str, Any]]] = None,
includes: Optional[List[Dict[str, Any]]] = None,
) -> None:
from poetry.core.masonry.utils.include import Include
from poetry.core.masonry.utils.package_include import PackageInclude
from poetry.core.utils.helpers import module_name
self._name = module_name(name)
self._in_src = False
self._is_package = False
self._path = Path(directory)
self._includes: List[Include] = []
packages = packages or []
includes = includes or []
if not packages:
# It must exist either as a .py file or a directory, but not both
pkg_dir = Path(directory, self._name)
py_file = Path(directory, self._name + ".py")
if pkg_dir.is_dir() and py_file.is_file():
raise ValueError(f"Both {pkg_dir} and {py_file} exist")
elif pkg_dir.is_dir():
packages = [{"include": str(pkg_dir.relative_to(self._path))}]
elif py_file.is_file():
packages = [{"include": str(py_file.relative_to(self._path))}]
else:
# Searching for a src module
src = Path(directory, "src")
src_pkg_dir = src / self._name
src_py_file = src / (self._name + ".py")
if src_pkg_dir.is_dir() and src_py_file.is_file():
raise ValueError(f"Both {pkg_dir} and {py_file} exist")
elif src_pkg_dir.is_dir():
packages = [
{
"include": str(src_pkg_dir.relative_to(src)),
"from": str(src.relative_to(self._path)),
}
]
elif src_py_file.is_file():
packages = [
{
"include": str(src_py_file.relative_to(src)),
"from": str(src.relative_to(self._path)),
}
]
else:
raise ModuleOrPackageNotFound(
f"No file/folder found for package {name}"
)
for package in packages:
formats = package.get("format")
if formats and not isinstance(formats, list):
formats = [formats]
self._includes.append(
PackageInclude(
self._path,
package["include"],
formats=formats,
source=package.get("from"),
)
)
for include in includes:
self._includes.append(
Include(self._path, include["path"], formats=include["format"])
)
@property
def name(self) -> str:
return self._name
@property
def path(self) -> Path:
return self._path
@property
def file(self) -> Path:
if self._is_package:
return self._path / "__init__.py"
else:
return self._path
@property
def includes(self) -> List["Include"]:
return self._includes
def is_package(self) -> bool:
return self._is_package
def is_in_src(self) -> bool:
return self._in_src | PypiClean |
/avh_api-1.0.5-py3-none-any.whl/avh_api/rest.py | import io
import json
import logging
import re
import ssl
from urllib.parse import urlencode
from urllib.parse import urlparse
from urllib.request import proxy_bypass_environment
import urllib3
import ipaddress
from avh_api.exceptions import ApiException, UnauthorizedException, ForbiddenException, NotFoundException, ServiceException, ApiValueError
logger = logging.getLogger(__name__)
class RESTResponse(io.IOBase):
def __init__(self, resp):
self.urllib3_response = resp
self.status = resp.status
self.reason = resp.reason
self.data = resp.data
def getheaders(self):
"""Returns a dictionary of the response headers."""
return self.urllib3_response.getheaders()
def getheader(self, name, default=None):
"""Returns a given response header."""
return self.urllib3_response.getheader(name, default)
class RESTClientObject(object):
def __init__(self, configuration, pools_size=4, maxsize=None):
# urllib3.PoolManager will pass all kw parameters to connectionpool
# https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/poolmanager.py#L75 # noqa: E501
# https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/connectionpool.py#L680 # noqa: E501
# maxsize is the number of requests to host that are allowed in parallel # noqa: E501
# Custom SSL certificates and client certificates: http://urllib3.readthedocs.io/en/latest/advanced-usage.html # noqa: E501
# cert_reqs
if configuration.verify_ssl:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
addition_pool_args = {}
if configuration.assert_hostname is not None:
addition_pool_args['assert_hostname'] = configuration.assert_hostname # noqa: E501
if configuration.retries is not None:
addition_pool_args['retries'] = configuration.retries
if configuration.socket_options is not None:
addition_pool_args['socket_options'] = configuration.socket_options
if maxsize is None:
if configuration.connection_pool_maxsize is not None:
maxsize = configuration.connection_pool_maxsize
else:
maxsize = 4
# https pool manager
if configuration.proxy and not should_bypass_proxies(configuration.host, no_proxy=configuration.no_proxy or ''):
self.pool_manager = urllib3.ProxyManager(
num_pools=pools_size,
maxsize=maxsize,
cert_reqs=cert_reqs,
ca_certs=configuration.ssl_ca_cert,
cert_file=configuration.cert_file,
key_file=configuration.key_file,
proxy_url=configuration.proxy,
proxy_headers=configuration.proxy_headers,
**addition_pool_args
)
else:
self.pool_manager = urllib3.PoolManager(
num_pools=pools_size,
maxsize=maxsize,
cert_reqs=cert_reqs,
ca_certs=configuration.ssl_ca_cert,
cert_file=configuration.cert_file,
key_file=configuration.key_file,
**addition_pool_args
)
def request(self, method, url, query_params=None, headers=None,
body=None, post_params=None, _preload_content=True,
_request_timeout=None):
"""Perform requests.
:param method: http request method
:param url: http request url
:param query_params: query parameters in the url
:param headers: http request headers
:param body: request json body, for `application/json`
:param post_params: request post parameters,
`application/x-www-form-urlencoded`
and `multipart/form-data`
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
"""
method = method.upper()
assert method in ['GET', 'HEAD', 'DELETE', 'POST', 'PUT',
'PATCH', 'OPTIONS']
if post_params and body:
raise ApiValueError(
"body parameter cannot be used with post_params parameter."
)
post_params = post_params or {}
headers = headers or {}
timeout = None
if _request_timeout:
if isinstance(_request_timeout, (int, float)): # noqa: E501,F821
timeout = urllib3.Timeout(total=_request_timeout)
elif (isinstance(_request_timeout, tuple) and
len(_request_timeout) == 2):
timeout = urllib3.Timeout(
connect=_request_timeout[0], read=_request_timeout[1])
try:
# For `POST`, `PUT`, `PATCH`, `OPTIONS`, `DELETE`
if method in ['POST', 'PUT', 'PATCH', 'OPTIONS', 'DELETE']:
# Only set a default Content-Type for POST, PUT, PATCH and OPTIONS requests
if (method != 'DELETE') and ('Content-Type' not in headers):
headers['Content-Type'] = 'application/json'
if query_params:
url += '?' + urlencode(query_params)
if ('Content-Type' not in headers) or (re.search('json', headers['Content-Type'], re.IGNORECASE)):
request_body = None
if body is not None:
request_body = json.dumps(body)
r = self.pool_manager.request(
method, url,
body=request_body,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
elif headers['Content-Type'] == 'application/x-www-form-urlencoded': # noqa: E501
r = self.pool_manager.request(
method, url,
fields=post_params,
encode_multipart=False,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
elif headers['Content-Type'] == 'multipart/form-data':
# must del headers['Content-Type'], or the correct
# Content-Type which generated by urllib3 will be
# overwritten.
del headers['Content-Type']
r = self.pool_manager.request(
method, url,
fields=post_params,
encode_multipart=True,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
# Pass a `string` parameter directly in the body to support
# other content types than Json when `body` argument is
# provided in serialized form
elif isinstance(body, str) or isinstance(body, bytes):
request_body = body
r = self.pool_manager.request(
method, url,
body=request_body,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
else:
# Cannot generate the request from given parameters
msg = """Cannot prepare a request message for provided
arguments. Please check that your arguments match
declared content type."""
raise ApiException(status=0, reason=msg)
# For `GET`, `HEAD`
else:
r = self.pool_manager.request(method, url,
fields=query_params,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
except urllib3.exceptions.SSLError as e:
msg = "{0}\n{1}".format(type(e).__name__, str(e))
raise ApiException(status=0, reason=msg)
if _preload_content:
r = RESTResponse(r)
# log response body
logger.debug("response body: %s", r.data)
if not 200 <= r.status <= 299:
if r.status == 401:
raise UnauthorizedException(http_resp=r)
if r.status == 403:
raise ForbiddenException(http_resp=r)
if r.status == 404:
raise NotFoundException(http_resp=r)
if 500 <= r.status <= 599:
raise ServiceException(http_resp=r)
raise ApiException(http_resp=r)
return r
def GET(self, url, headers=None, query_params=None, _preload_content=True,
_request_timeout=None):
return self.request("GET", url,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
query_params=query_params)
def HEAD(self, url, headers=None, query_params=None, _preload_content=True,
_request_timeout=None):
return self.request("HEAD", url,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
query_params=query_params)
def OPTIONS(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("OPTIONS", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def DELETE(self, url, headers=None, query_params=None, body=None,
_preload_content=True, _request_timeout=None):
return self.request("DELETE", url,
headers=headers,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def POST(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("POST", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def PUT(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("PUT", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def PATCH(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("PATCH", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
# end of class RESTClientObject
def is_ipv4(target):
""" Test if IPv4 address or not
"""
try:
chk = ipaddress.IPv4Address(target)
return True
except ipaddress.AddressValueError:
return False
def in_ipv4net(target, net):
""" Test if target belongs to given IPv4 network
"""
try:
nw = ipaddress.IPv4Network(net)
ip = ipaddress.IPv4Address(target)
if ip in nw:
return True
return False
except ipaddress.AddressValueError:
return False
except ipaddress.NetmaskValueError:
return False
def should_bypass_proxies(url, no_proxy=None):
""" Yet another requests.should_bypass_proxies
Test if proxies should not be used for a particular url.
"""
parsed = urlparse(url)
# special cases
if parsed.hostname in [None, '']:
return True
# special cases
if no_proxy in [None , '']:
return False
if no_proxy == '*':
return True
no_proxy = no_proxy.lower().replace(' ','');
entries = (
host for host in no_proxy.split(',') if host
)
if is_ipv4(parsed.hostname):
for item in entries:
if in_ipv4net(parsed.hostname, item):
return True
return proxy_bypass_environment(parsed.hostname, {'no': no_proxy} ) | PypiClean |
/bpy_cuda-2.82-cp37-cp37m-win_amd64.whl/bpy_cuda-2.82.data/scripts/2.82/scripts/startup/bl_ui/properties_data_empty.py |
# <pep8 compliant>
from bpy.types import Panel
class DataButtonsPanel:
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "data"
@classmethod
def poll(cls, context):
ob = context.object
return (ob and ob.type == 'EMPTY')
class DATA_PT_empty(DataButtonsPanel, Panel):
bl_label = "Empty"
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
ob = context.object
layout.prop(ob, "empty_display_type", text="Display As")
layout.prop(ob, "empty_display_size", text="Size")
if ob.empty_display_type == 'IMAGE':
layout.prop(ob, "use_empty_image_alpha")
col = layout.column()
col.active = ob.use_empty_image_alpha
col.prop(ob, "color", text="Opacity", index=3, slider=True)
col = layout.column(align=True)
col.prop(ob, "empty_image_offset", text="Offset X", index=0)
col.prop(ob, "empty_image_offset", text="Y", index=1)
col = layout.column()
col.row().prop(ob, "empty_image_depth", text="Depth", expand=True)
col.row().prop(ob, "empty_image_side", text="Side", expand=True)
col.prop(ob, "show_empty_image_orthographic", text="Display Orthographic")
col.prop(ob, "show_empty_image_perspective", text="Display Perspective")
col.prop(ob, "show_empty_image_only_axis_aligned")
class DATA_PT_empty_image(DataButtonsPanel, Panel):
bl_label = "Image"
@classmethod
def poll(cls, context):
ob = context.object
return (ob and ob.type == 'EMPTY' and ob.empty_display_type == 'IMAGE')
def draw(self, context):
layout = self.layout
ob = context.object
layout.template_ID(ob, "data", open="image.open", unlink="object.unlink_data")
layout.separator()
layout.template_image(ob, "data", ob.image_user, compact=True)
classes = (
DATA_PT_empty,
DATA_PT_empty_image,
)
if __name__ == "__main__": # only for live edit.
from bpy.utils import register_class
for cls in classes:
register_class(cls) | PypiClean |
/dsl-james-0.1.4.tar.gz/dsl-james-0.1.4/james/cli.py | import sys
from pathlib import Path
from loguru import logger
import click
from termcolor import colored
from james import __version__
from james.utils import check_path, cmd, timeit, PythonVersionType
from james.config import IgniteConfig, IgniteInvalidStateError
from james.azure import AzureSetup
from james.james import Ignition
from james.review import CodeInspection
logger.remove(0)
logger.add(sys.stderr, level='INFO')
INTRO_TEXT = r"""
██╗ █████╗ ███╗ ███╗███████╗███████╗
██║██╔══██╗████╗ ████║██╔════╝██╔════╝
██║███████║██╔████╔██║█████╗ ███████╗
██ ██║██╔══██║██║╚██╔╝██║██╔══╝ ╚════██║
╚█████╔╝██║ ██║██║ ╚═╝ ██║███████╗███████║
╚════╝ ╚═╝ ╚═╝╚═╝ ╚═╝╚══════╝╚══════╝
█████████████████████████████████████████████████████████████████████████████████
██▀▄─██─▄─▄─███▄─█─▄█─▄▄─█▄─██─▄█▄─▄▄▀███─▄▄▄▄█▄─▄▄─█▄─▄▄▀█▄─█─▄█▄─▄█─▄▄▄─█▄─▄▄─█
██─▀─████─██████▄─▄██─██─██─██─███─▄─▄███▄▄▄▄─██─▄█▀██─▄─▄██▄▀▄███─██─███▀██─▄█▀█
▀▄▄▀▄▄▀▀▄▄▄▀▀▀▀▀▄▄▄▀▀▄▄▄▄▀▀▄▄▄▄▀▀▄▄▀▄▄▀▀▀▄▄▄▄▄▀▄▄▄▄▄▀▄▄▀▄▄▀▀▀▄▀▀▀▄▄▄▀▄▄▄▄▄▀▄▄▄▄▄▀
************************************************ powered by Data Science Lab ////
Fast & easy project startup
"""
"""
- Main:
- setup: only set variables
- show: show variables / status
- go: execute (if vars are set)
Typical workflow
$ [~/projects/] james setup
-> reads / creates file in ~
$ [~/projects/] james go
-> reads file in ~, copies file to ~/projects/my-project/
$ [~/projects/my-project] james show
-> reads file in ~/projects/my-project/
"""
def msg_error(msg: str) -> None:
click.secho(f'\n\n{msg}\n', bg='red', fg='white', bold=True)
def msg_success(msg: str) -> None:
click.secho(f'\n\n{msg}\n', bg='green', fg='white', bold=True)
def bold(txt: str) -> str:
return colored(txt, attrs=['bold'])
def pprompt(text, default, options=None):
text = colored(f'✣ {text}', 'blue', attrs=['bold'])
if isinstance(options, list):
ptype = click.Choice(options, case_sensitive=True)
else:
ptype = str
#default = colored(default, attrs=['underline'])
return click.prompt(text=text, type=ptype, default=default)
@click.group()
@click.version_option(message='You are using %(prog)s version %(version)s')
@click.pass_context
@timeit()
def main(ctx: click.Context) -> None:
"""
Console script for james.
The main function only reads the config
"""
click.secho(INTRO_TEXT, fg='cyan')
logger.info('Creating config object')
ctx.obj = IgniteConfig()
@main.command()
@click.pass_context
@click.confirmation_option(prompt="""
This will prompt for generic settings that apply to all projects.
If you change this it may invalidate existing projects.
Are you sure you want to continue?
""")
def setup(ctx: click.Context) -> None:
"""
One-time setup for generic settings like
- Azure & Azure DevOps defaults
- Projects dir
Args:
ctx (click.Context): ctx.obj contains the IgniteConfig object
"""
# handle git provider
git_provider = pprompt(
text='Choose a git provider',
#type=click.Choice(['Azure DevOps Repos', 'Github'], case_sensitive=True),
options=['Azure DevOps Repos', 'Github'],
default='Azure DevOps Repos'
)
ctx.obj.set('META', 'git_provider', git_provider)
if git_provider == 'Azure DevOps Repos':
devops_organization = click.prompt(
text='✣ Enter Azure DevOps organization name (https://dev.azure.com/<organization>)',
type=str,
default='data-science-lab'
)
ctx.obj.set('AZUREDEVOPS', 'devops_organization', devops_organization)
elif git_provider == 'Github':
github_username = click.prompt(
text='Enter Github username',
type=str,
default=''
)
ctx.obj.set('GITHUB', 'github_username', github_username)
else:
raise ValueError(f'Unsupported git provider "{git_provider}"')
# handle cloud resource provider
cloud_provider = click.prompt(
text='Choose a cloud provider (currently only Azure is supported!)',
type=click.Choice(['Azure'], case_sensitive=True),
default='Azure'
)
ctx.obj.set('META', 'cloud_provider', cloud_provider)
if cloud_provider == 'Azure':
# ask subscription
pass # subscription can be project specific
# set user project directory
projects_dir = click.prompt(
text='Choose a directory containing your projects (use "~" for your home dir)',
type=click.Path(),
default='~/projects'
)
projects_dir = Path(projects_dir.replace('~', Path.home().as_posix())).resolve()
if not projects_dir.exists():
raise FileExistsError(f'Directory {projects_dir} does not exist. Please check if it\'s correct.')
ctx.obj.set('META', 'projects_dir', projects_dir.as_posix())
msg_success('All set!\nYou can now use james init from your projects dir to start a new project.')
@main.command()
@click.pass_context
def init(ctx: click.Context) -> None:
"""
Start a new project
Set project settings via prompts
Args:
ctx (click.Context): ctx.obj contains the IgniteConfig object
"""
if Path.cwd() == Path.home():
msg_error('Cannot init a new project here.\nChange to projects dir first')
return
if ctx.obj.is_existing_project:
#raise IgniteInvalidStateError(f'Current config defines an existing project. Cannot call james init here.')
msg_error('Current config defines an existing project. Cannot call james init here.')
return
# new project: clear existing values in config file
ctx.obj.clear()
if ctx.obj.get('META', 'cloud_provider') == 'Azure':
# set Azure config
azsetup = AzureSetup()
# azure subscription
subscriptions = azsetup.get_subscriptions()
options = [
sub['name']
for sub in subscriptions
if sub['isDefault']
] + [
sub['name']
for sub in subscriptions
if not sub['isDefault']
]
subscription_name = click.prompt(
text='Choose Azure subscription',
type=click.Choice(options, case_sensitive=True),
default=options[0]
)
subscription_id = [
sub['id']
for sub in subscriptions
if sub['name'] == subscription_name
][0]
ctx.obj.set('AZURE', 'subscription_name', subscription_name)
ctx.obj.set('AZURE', 'subscription_id', subscription_id)
azsetup.set_subscription(subscription_id)
if ctx.obj.get('META', 'git_provider') == 'Azure DevOps Repos':
# Azure DevOps settings
devops_projects = azsetup.get_devops_projects()
project = click.prompt(
text='Choose Azure DevOps project',
type=click.Choice(devops_projects, case_sensitive=True),
default=azsetup.DEFAULT_PROJECT
)
ctx.obj.set('AZUREDEVOPS', 'devops_organization', azsetup.DEFAULT_ORG)
ctx.obj.set('AZUREDEVOPS', 'devops_project', project)
azsetup.set_devops_project(project)
# prompt for project setting values
for section, var in ctx.obj.iter_settings(project_only=True):
value = click.prompt(
text=var['description'],
type=var['type'],
default=var['default']()
)
ctx.obj.set(section, var['name'], value)
ctx.obj.cleanup()
@main.command()
@click.pass_context
@click.argument('section')
@click.argument('key')
@click.argument('value')
def set(ctx: click.Context, section: str, key: str, value: str) -> None:
"""
Set a single value
"""
logger.info(f'Setting {section}.{key} = {value}')
ctx.obj.set(section, key, value)
@main.command()
@click.pass_context
def show(ctx: click.Context) -> None:
"""
Display settings
"""
try:
#ctx.obj.show(method=click.echo)
file, contents = ctx.obj.list()
prefix = colored('\n\n❯ Settings read from ', 'white', 'on_blue')
file = colored(file.resolve(), 'white', 'on_blue', ['bold'])
postfix = colored(':\n', 'white', 'on_blue')
click.echo(f'{prefix}{file}{postfix}')
click.echo(contents)
except FileExistsError:
#click.secho('There is no project defined in this directory. Run "james init" first', bg='red', bold=True)
msg_error('There is no project defined in this directory. Run "james init" first')
@main.command()
@click.pass_context
def status(ctx: click.Context) -> None:
"""
Check status of stages for ignition
"""
try:
plan = Ignition(config=ctx.obj)
click.echo(plan.stage_report())
except ValueError as e:
logger.error(e)
msg_error('There is no project defined in this directory. Run "james init" first')
@main.command()
@click.pass_context
@click.argument('directory')
def review(ctx: click.Context, directory: str = None) -> None:
"""
Linting / code review
"""
# if directory is None:
# path = Path.cwd()
# else:
# path = Path(directory)
# if not path.is_dir():
# raise ValueError(f'Path {directory} is not a valid directory')
path = check_path(directory or Path.cwd(), check_dir=True)
inspection = CodeInspection(path=path)
report = inspection()
click.echo(report)
@main.command()
@click.pass_context
@click.confirmation_option(prompt="""
This will execute the actual project setup work:
- create a git repository
- create a new local project dir from a cookiecutter template
- create a python environment
Are you sure you want to continue?
""")
def go(ctx: click.Context) -> None:
"""
Execute actions for project start
"""
Ignition(config=ctx.obj).execute(callback_fn=click.echo)
if __name__ == "__main__":
main() | PypiClean |
/paho-mqtt-1.6.1.tar.gz/paho-mqtt-1.6.1/examples/loop_trio.py |
import socket
import uuid
import trio
import paho.mqtt.client as mqtt
client_id = 'paho-mqtt-python/issue72/' + str(uuid.uuid4())
topic = client_id
print("Using client_id / topic: " + client_id)
class TrioAsyncHelper:
def __init__(self, client):
self.client = client
self.sock = None
self._event_large_write = trio.Event()
self.client.on_socket_open = self.on_socket_open
self.client.on_socket_register_write = self.on_socket_register_write
self.client.on_socket_unregister_write = self.on_socket_unregister_write
async def read_loop(self):
while True:
await trio.hazmat.wait_readable(self.sock)
self.client.loop_read()
async def write_loop(self):
while True:
await self._event_large_write.wait()
await trio.hazmat.wait_writable(self.sock)
self.client.loop_write()
async def misc_loop(self):
print("misc_loop started")
while self.client.loop_misc() == mqtt.MQTT_ERR_SUCCESS:
await trio.sleep(1)
print("misc_loop finished")
def on_socket_open(self, client, userdata, sock):
print("Socket opened")
self.sock = sock
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 2048)
def on_socket_register_write(self, client, userdata, sock):
print('large write request')
self._event_large_write.set()
def on_socket_unregister_write(self, client, userdata, sock):
print("finished large write")
self._event_large_write = trio.Event()
class TrioAsyncMqttExample:
def on_connect(self, client, userdata, flags, rc):
print("Subscribing")
client.subscribe(topic)
def on_message(self, client, userdata, msg):
print("Got response with {} bytes".format(len(msg.payload)))
def on_disconnect(self, client, userdata, rc):
print('Disconnect result {}'.format(rc))
async def test_write(self, cancel_scope: trio.CancelScope):
for c in range(3):
await trio.sleep(5)
print("Publishing")
self.client.publish(topic, b'Hello' * 40000, qos=1)
cancel_scope.cancel()
async def main(self):
self.client = mqtt.Client(client_id=client_id)
self.client.on_connect = self.on_connect
self.client.on_message = self.on_message
self.client.on_disconnect = self.on_disconnect
trio_helper = TrioAsyncHelper(self.client)
self.client.connect('mqtt.eclipseprojects.io', 1883, 60)
async with trio.open_nursery() as nursery:
nursery.start_soon(trio_helper.read_loop)
nursery.start_soon(trio_helper.write_loop)
nursery.start_soon(trio_helper.misc_loop)
nursery.start_soon(self.test_write, nursery.cancel_scope)
self.client.disconnect()
print("Disconnected")
print("Starting")
trio.run(TrioAsyncMqttExample().main)
print("Finished") | PypiClean |
/jupyterhub_url_sharing-0.1.0.tar.gz/jupyterhub_url_sharing-0.1.0/node_modules/@blueprintjs/select/lib/esm/common/listItemsProps.d.ts | import { Props } from "@blueprintjs/core";
import { ItemListRenderer } from "./itemListRenderer";
import { ItemRenderer } from "./itemRenderer";
import { ICreateNewItem } from "./listItemsUtils";
import { ItemListPredicate, ItemPredicate } from "./predicate";
/**
* Equality test comparator to determine if two {@link IListItemsProps} items are equivalent.
*
* @return `true` if the two items are equivalent.
*/
export declare type ItemsEqualComparator<T> = (itemA: T, itemB: T) => boolean;
/**
* Union of all possible types for {@link IListItemsProps#itemsEqual}.
*/
export declare type ItemsEqualProp<T> = ItemsEqualComparator<T> | keyof T;
/** Reusable generic props for a component that operates on a filterable, selectable list of `items`. */
export interface IListItemsProps<T> extends Props {
/**
* The currently focused item for keyboard interactions, or `null` to
* indicate that no item is active. If omitted or `undefined`, this prop will be
* uncontrolled (managed by the component's state). Use `onActiveItemChange`
* to listen for updates.
*/
activeItem?: T | ICreateNewItem | null;
/** Array of items in the list. */
items: T[];
/**
* Specifies how to test if two items are equal. By default, simple strict
* equality (`===`) is used to compare two items.
*
* If your items have a unique identifier field, simply provide the name of
* a property on the item that can be compared with strict equality to
* determine equivalence: `itemsEqual="id"` will check `a.id === b.id`.
*
* If more complex comparison logic is required, provide an equality
* comparator function that returns `true` if the two items are equal. The
* arguments to this function will never be `null` or `undefined`, as those
* values are handled before calling the function.
*/
itemsEqual?: ItemsEqualProp<T>;
/**
* Determine if the given item is disabled. Provide a callback function, or
* simply provide the name of a boolean property on the item that exposes
* its disabled state.
*/
itemDisabled?: keyof T | ((item: T, index: number) => boolean);
/**
* Customize querying of entire `items` array. Return new list of items.
* This method can reorder, add, or remove items at will.
* (Supports filter algorithms that operate on the entire set, rather than individual items.)
*
* If `itemPredicate` is also defined, this prop takes priority and the other will be ignored.
*/
itemListPredicate?: ItemListPredicate<T>;
/**
* Customize querying of individual items.
*
* __Filtering a list of items.__ This function is invoked to filter the
* list of items as a query is typed. Return `true` to keep the item, or
* `false` to hide. This method is invoked once for each item, so it should
* be performant. For more complex queries, use `itemListPredicate` to
* operate once on the entire array. For the purposes of filtering the list,
* this prop is ignored if `itemListPredicate` is also defined.
*
* __Matching a pasted value to an item.__ This function is also invoked to
* match a pasted value to an existing item if possible. In this case, the
* function will receive `exactMatch=true`, and the function should return
* true only if the item _exactly_ matches the query. For the purposes of
* matching pasted values, this prop will be invoked even if
* `itemListPredicate` is defined.
*/
itemPredicate?: ItemPredicate<T>;
/**
* Custom renderer for an item in the dropdown list. Receives a boolean indicating whether
* this item is active (selected by keyboard arrows) and an `onClick` event handler that
* should be attached to the returned element.
*/
itemRenderer: ItemRenderer<T>;
/**
* Custom renderer for the contents of the dropdown.
*
* The default implementation invokes `itemRenderer` for each item that passes the predicate
* and wraps them all in a `Menu` element. If the query is empty then `initialContent` is returned,
* and if there are no items that match the predicate then `noResults` is returned.
*/
itemListRenderer?: ItemListRenderer<T>;
/**
* React content to render when query is empty.
* If omitted, all items will be rendered (or result of `itemListPredicate` with empty query).
* If explicit `null`, nothing will be rendered when query is empty.
*
* This prop is ignored if a custom `itemListRenderer` is supplied.
*/
initialContent?: React.ReactNode | null;
/**
* React content to render when filtering items returns zero results.
* If omitted, nothing will be rendered in this case.
*
* This prop is ignored if a custom `itemListRenderer` is supplied.
*/
noResults?: React.ReactNode;
/**
* Invoked when user interaction should change the active item: arrow keys
* move it up/down in the list, selecting an item makes it active, and
* changing the query may reset it to the first item in the list if it no
* longer matches the filter.
*
* If the "Create Item" option is displayed and currently active, then
* `isCreateNewItem` will be `true` and `activeItem` will be `null`. In this
* case, you should provide a valid `ICreateNewItem` object to the
* `activeItem` _prop_ in order for the "Create Item" option to appear as
* active.
*
* __Note:__ You can instantiate a `ICreateNewItem` object using the
* `getCreateNewItem()` utility exported from this package.
*/
onActiveItemChange?: (activeItem: T | null, isCreateNewItem: boolean) => void;
/**
* Callback invoked when an item from the list is selected,
* typically by clicking or pressing `enter` key.
*/
onItemSelect: (item: T, event?: React.SyntheticEvent<HTMLElement>) => void;
/**
* Callback invoked when multiple items are selected at once via pasting.
*/
onItemsPaste?: (items: T[]) => void;
/**
* Callback invoked when the query string changes.
*/
onQueryChange?: (query: string, event?: React.ChangeEvent<HTMLInputElement>) => void;
/**
* If provided, allows new items to be created using the current query
* string. This is invoked when user interaction causes a new item to be
* created, either by pressing the `Enter` key or by clicking on the "Create
* Item" option. It transforms a query string into an item type.
*/
createNewItemFromQuery?: (query: string) => T;
/**
* Custom renderer to transform the current query string into a selectable
* "Create Item" option. If this function is provided, a "Create Item"
* option will be rendered at the end of the list of items. If this function
* is not provided, a "Create Item" option will not be displayed.
*/
createNewItemRenderer?: (query: string, active: boolean, handleClick: React.MouseEventHandler<HTMLElement>) => JSX.Element | undefined;
/**
* Determines the position of the `createNewItem` within the list: first or
* last. Only relevant when `createNewItemRenderer` is defined.
*
* @default 'last'
*/
createNewItemPosition?: "first" | "last";
/**
* Whether the active item should be reset to the first matching item _every
* time the query changes_ (via prop or by user input).
*
* @default true
*/
resetOnQuery?: boolean;
/**
* Whether the active item should be reset to the first matching item _when
* an item is selected_. The query will also be reset to the empty string.
*
* @default false
*/
resetOnSelect?: boolean;
/**
* When `activeItem` is controlled, whether the active item should _always_
* be scrolled into view when the prop changes. If `false`, only changes
* that result from built-in interactions (clicking, querying, or using
* arrow keys) will scroll the active item into view. Ignored if the
* `activeItem` prop is omitted (uncontrolled behavior).
*
* @default true
*/
scrollToActiveItem?: boolean;
/**
* Query string passed to `itemListPredicate` or `itemPredicate` to filter items.
* This value is controlled: its state must be managed externally by attaching an `onChange`
* handler to the relevant element in your `renderer` implementation.
*/
query?: string;
}
/**
* Utility function for executing the {@link IListItemsProps#itemsEqual} prop to test
* for equality between two items.
*
* @return `true` if the two items are equivalent according to `itemsEqualProp`.
*/
export declare function executeItemsEqual<T>(itemsEqualProp: ItemsEqualProp<T> | undefined, itemA: T | null | undefined, itemB: T | null | undefined): boolean; | PypiClean |
/pytest-7.4.1.tar.gz/pytest-7.4.1/src/_pytest/nodes.py | import os
import warnings
from inspect import signature
from pathlib import Path
from typing import Any
from typing import Callable
from typing import cast
from typing import Iterable
from typing import Iterator
from typing import List
from typing import MutableMapping
from typing import Optional
from typing import overload
from typing import Set
from typing import Tuple
from typing import Type
from typing import TYPE_CHECKING
from typing import TypeVar
from typing import Union
import _pytest._code
from _pytest._code import getfslineno
from _pytest._code.code import ExceptionInfo
from _pytest._code.code import TerminalRepr
from _pytest._code.code import Traceback
from _pytest.compat import cached_property
from _pytest.compat import LEGACY_PATH
from _pytest.config import Config
from _pytest.config import ConftestImportFailure
from _pytest.deprecated import FSCOLLECTOR_GETHOOKPROXY_ISINITPATH
from _pytest.deprecated import NODE_CTOR_FSPATH_ARG
from _pytest.mark.structures import Mark
from _pytest.mark.structures import MarkDecorator
from _pytest.mark.structures import NodeKeywords
from _pytest.outcomes import fail
from _pytest.pathlib import absolutepath
from _pytest.pathlib import commonpath
from _pytest.stash import Stash
from _pytest.warning_types import PytestWarning
if TYPE_CHECKING:
# Imported here due to circular import.
from _pytest.main import Session
from _pytest._code.code import _TracebackStyle
SEP = "/"
tracebackcutdir = Path(_pytest.__file__).parent
def iterparentnodeids(nodeid: str) -> Iterator[str]:
"""Return the parent node IDs of a given node ID, inclusive.
For the node ID
"testing/code/test_excinfo.py::TestFormattedExcinfo::test_repr_source"
the result would be
""
"testing"
"testing/code"
"testing/code/test_excinfo.py"
"testing/code/test_excinfo.py::TestFormattedExcinfo"
"testing/code/test_excinfo.py::TestFormattedExcinfo::test_repr_source"
Note that / components are only considered until the first ::.
"""
pos = 0
first_colons: Optional[int] = nodeid.find("::")
if first_colons == -1:
first_colons = None
# The root Session node - always present.
yield ""
# Eagerly consume SEP parts until first colons.
while True:
at = nodeid.find(SEP, pos, first_colons)
if at == -1:
break
if at > 0:
yield nodeid[:at]
pos = at + len(SEP)
# Eagerly consume :: parts.
while True:
at = nodeid.find("::", pos)
if at == -1:
break
if at > 0:
yield nodeid[:at]
pos = at + len("::")
# The node ID itself.
if nodeid:
yield nodeid
def _check_path(path: Path, fspath: LEGACY_PATH) -> None:
if Path(fspath) != path:
raise ValueError(
f"Path({fspath!r}) != {path!r}\n"
"if both path and fspath are given they need to be equal"
)
def _imply_path(
node_type: Type["Node"],
path: Optional[Path],
fspath: Optional[LEGACY_PATH],
) -> Path:
if fspath is not None:
warnings.warn(
NODE_CTOR_FSPATH_ARG.format(
node_type_name=node_type.__name__,
),
stacklevel=6,
)
if path is not None:
if fspath is not None:
_check_path(path, fspath)
return path
else:
assert fspath is not None
return Path(fspath)
_NodeType = TypeVar("_NodeType", bound="Node")
class NodeMeta(type):
def __call__(self, *k, **kw):
msg = (
"Direct construction of {name} has been deprecated, please use {name}.from_parent.\n"
"See "
"https://docs.pytest.org/en/stable/deprecations.html#node-construction-changed-to-node-from-parent"
" for more details."
).format(name=f"{self.__module__}.{self.__name__}")
fail(msg, pytrace=False)
def _create(self, *k, **kw):
try:
return super().__call__(*k, **kw)
except TypeError:
sig = signature(getattr(self, "__init__"))
known_kw = {k: v for k, v in kw.items() if k in sig.parameters}
from .warning_types import PytestDeprecationWarning
warnings.warn(
PytestDeprecationWarning(
f"{self} is not using a cooperative constructor and only takes {set(known_kw)}.\n"
"See https://docs.pytest.org/en/stable/deprecations.html"
"#constructors-of-custom-pytest-node-subclasses-should-take-kwargs "
"for more details."
)
)
return super().__call__(*k, **known_kw)
class Node(metaclass=NodeMeta):
r"""Base class of :class:`Collector` and :class:`Item`, the components of
the test collection tree.
``Collector``\'s are the internal nodes of the tree, and ``Item``\'s are the
leaf nodes.
"""
# Implemented in the legacypath plugin.
#: A ``LEGACY_PATH`` copy of the :attr:`path` attribute. Intended for usage
#: for methods not migrated to ``pathlib.Path`` yet, such as
#: :meth:`Item.reportinfo`. Will be deprecated in a future release, prefer
#: using :attr:`path` instead.
fspath: LEGACY_PATH
# Use __slots__ to make attribute access faster.
# Note that __dict__ is still available.
__slots__ = (
"name",
"parent",
"config",
"session",
"path",
"_nodeid",
"_store",
"__dict__",
)
def __init__(
self,
name: str,
parent: "Optional[Node]" = None,
config: Optional[Config] = None,
session: "Optional[Session]" = None,
fspath: Optional[LEGACY_PATH] = None,
path: Optional[Path] = None,
nodeid: Optional[str] = None,
) -> None:
#: A unique name within the scope of the parent node.
self.name: str = name
#: The parent collector node.
self.parent = parent
if config:
#: The pytest config object.
self.config: Config = config
else:
if not parent:
raise TypeError("config or parent must be provided")
self.config = parent.config
if session:
#: The pytest session this node is part of.
self.session: Session = session
else:
if not parent:
raise TypeError("session or parent must be provided")
self.session = parent.session
if path is None and fspath is None:
path = getattr(parent, "path", None)
#: Filesystem path where this node was collected from (can be None).
self.path: Path = _imply_path(type(self), path, fspath=fspath)
# The explicit annotation is to avoid publicly exposing NodeKeywords.
#: Keywords/markers collected from all scopes.
self.keywords: MutableMapping[str, Any] = NodeKeywords(self)
#: The marker objects belonging to this node.
self.own_markers: List[Mark] = []
#: Allow adding of extra keywords to use for matching.
self.extra_keyword_matches: Set[str] = set()
if nodeid is not None:
assert "::()" not in nodeid
self._nodeid = nodeid
else:
if not self.parent:
raise TypeError("nodeid or parent must be provided")
self._nodeid = self.parent.nodeid + "::" + self.name
#: A place where plugins can store information on the node for their
#: own use.
self.stash: Stash = Stash()
# Deprecated alias. Was never public. Can be removed in a few releases.
self._store = self.stash
@classmethod
def from_parent(cls, parent: "Node", **kw):
"""Public constructor for Nodes.
This indirection got introduced in order to enable removing
the fragile logic from the node constructors.
Subclasses can use ``super().from_parent(...)`` when overriding the
construction.
:param parent: The parent node of this Node.
"""
if "config" in kw:
raise TypeError("config is not a valid argument for from_parent")
if "session" in kw:
raise TypeError("session is not a valid argument for from_parent")
return cls._create(parent=parent, **kw)
@property
def ihook(self):
"""fspath-sensitive hook proxy used to call pytest hooks."""
return self.session.gethookproxy(self.path)
def __repr__(self) -> str:
return "<{} {}>".format(self.__class__.__name__, getattr(self, "name", None))
def warn(self, warning: Warning) -> None:
"""Issue a warning for this Node.
Warnings will be displayed after the test session, unless explicitly suppressed.
:param Warning warning:
The warning instance to issue.
:raises ValueError: If ``warning`` instance is not a subclass of Warning.
Example usage:
.. code-block:: python
node.warn(PytestWarning("some message"))
node.warn(UserWarning("some message"))
.. versionchanged:: 6.2
Any subclass of :class:`Warning` is now accepted, rather than only
:class:`PytestWarning <pytest.PytestWarning>` subclasses.
"""
# enforce type checks here to avoid getting a generic type error later otherwise.
if not isinstance(warning, Warning):
raise ValueError(
"warning must be an instance of Warning or subclass, got {!r}".format(
warning
)
)
path, lineno = get_fslocation_from_item(self)
assert lineno is not None
warnings.warn_explicit(
warning,
category=None,
filename=str(path),
lineno=lineno + 1,
)
# Methods for ordering nodes.
@property
def nodeid(self) -> str:
"""A ::-separated string denoting its collection tree address."""
return self._nodeid
def __hash__(self) -> int:
return hash(self._nodeid)
def setup(self) -> None:
pass
def teardown(self) -> None:
pass
def listchain(self) -> List["Node"]:
"""Return list of all parent collectors up to self, starting from
the root of collection tree.
:returns: The nodes.
"""
chain = []
item: Optional[Node] = self
while item is not None:
chain.append(item)
item = item.parent
chain.reverse()
return chain
def add_marker(
self, marker: Union[str, MarkDecorator], append: bool = True
) -> None:
"""Dynamically add a marker object to the node.
:param marker:
The marker.
:param append:
Whether to append the marker, or prepend it.
"""
from _pytest.mark import MARK_GEN
if isinstance(marker, MarkDecorator):
marker_ = marker
elif isinstance(marker, str):
marker_ = getattr(MARK_GEN, marker)
else:
raise ValueError("is not a string or pytest.mark.* Marker")
self.keywords[marker_.name] = marker_
if append:
self.own_markers.append(marker_.mark)
else:
self.own_markers.insert(0, marker_.mark)
def iter_markers(self, name: Optional[str] = None) -> Iterator[Mark]:
"""Iterate over all markers of the node.
:param name: If given, filter the results by the name attribute.
:returns: An iterator of the markers of the node.
"""
return (x[1] for x in self.iter_markers_with_node(name=name))
def iter_markers_with_node(
self, name: Optional[str] = None
) -> Iterator[Tuple["Node", Mark]]:
"""Iterate over all markers of the node.
:param name: If given, filter the results by the name attribute.
:returns: An iterator of (node, mark) tuples.
"""
for node in reversed(self.listchain()):
for mark in node.own_markers:
if name is None or getattr(mark, "name", None) == name:
yield node, mark
@overload
def get_closest_marker(self, name: str) -> Optional[Mark]:
...
@overload
def get_closest_marker(self, name: str, default: Mark) -> Mark:
...
def get_closest_marker(
self, name: str, default: Optional[Mark] = None
) -> Optional[Mark]:
"""Return the first marker matching the name, from closest (for
example function) to farther level (for example module level).
:param default: Fallback return value if no marker was found.
:param name: Name to filter by.
"""
return next(self.iter_markers(name=name), default)
def listextrakeywords(self) -> Set[str]:
"""Return a set of all extra keywords in self and any parents."""
extra_keywords: Set[str] = set()
for item in self.listchain():
extra_keywords.update(item.extra_keyword_matches)
return extra_keywords
def listnames(self) -> List[str]:
return [x.name for x in self.listchain()]
def addfinalizer(self, fin: Callable[[], object]) -> None:
"""Register a function to be called without arguments when this node is
finalized.
This method can only be called when this node is active
in a setup chain, for example during self.setup().
"""
self.session._setupstate.addfinalizer(fin, self)
def getparent(self, cls: Type[_NodeType]) -> Optional[_NodeType]:
"""Get the next parent node (including self) which is an instance of
the given class.
:param cls: The node class to search for.
:returns: The node, if found.
"""
current: Optional[Node] = self
while current and not isinstance(current, cls):
current = current.parent
assert current is None or isinstance(current, cls)
return current
def _traceback_filter(self, excinfo: ExceptionInfo[BaseException]) -> Traceback:
return excinfo.traceback
def _repr_failure_py(
self,
excinfo: ExceptionInfo[BaseException],
style: "Optional[_TracebackStyle]" = None,
) -> TerminalRepr:
from _pytest.fixtures import FixtureLookupError
if isinstance(excinfo.value, ConftestImportFailure):
excinfo = ExceptionInfo.from_exc_info(excinfo.value.excinfo)
if isinstance(excinfo.value, fail.Exception):
if not excinfo.value.pytrace:
style = "value"
if isinstance(excinfo.value, FixtureLookupError):
return excinfo.value.formatrepr()
tbfilter: Union[bool, Callable[[ExceptionInfo[BaseException]], Traceback]]
if self.config.getoption("fulltrace", False):
style = "long"
tbfilter = False
else:
tbfilter = self._traceback_filter
if style == "auto":
style = "long"
# XXX should excinfo.getrepr record all data and toterminal() process it?
if style is None:
if self.config.getoption("tbstyle", "auto") == "short":
style = "short"
else:
style = "long"
if self.config.getoption("verbose", 0) > 1:
truncate_locals = False
else:
truncate_locals = True
# excinfo.getrepr() formats paths relative to the CWD if `abspath` is False.
# It is possible for a fixture/test to change the CWD while this code runs, which
# would then result in the user seeing confusing paths in the failure message.
# To fix this, if the CWD changed, always display the full absolute path.
# It will be better to just always display paths relative to invocation_dir, but
# this requires a lot of plumbing (#6428).
try:
abspath = Path(os.getcwd()) != self.config.invocation_params.dir
except OSError:
abspath = True
return excinfo.getrepr(
funcargs=True,
abspath=abspath,
showlocals=self.config.getoption("showlocals", False),
style=style,
tbfilter=tbfilter,
truncate_locals=truncate_locals,
)
def repr_failure(
self,
excinfo: ExceptionInfo[BaseException],
style: "Optional[_TracebackStyle]" = None,
) -> Union[str, TerminalRepr]:
"""Return a representation of a collection or test failure.
.. seealso:: :ref:`non-python tests`
:param excinfo: Exception information for the failure.
"""
return self._repr_failure_py(excinfo, style)
def get_fslocation_from_item(node: "Node") -> Tuple[Union[str, Path], Optional[int]]:
"""Try to extract the actual location from a node, depending on available attributes:
* "location": a pair (path, lineno)
* "obj": a Python object that the node wraps.
* "fspath": just a path
:rtype: A tuple of (str|Path, int) with filename and 0-based line number.
"""
# See Item.location.
location: Optional[Tuple[str, Optional[int], str]] = getattr(node, "location", None)
if location is not None:
return location[:2]
obj = getattr(node, "obj", None)
if obj is not None:
return getfslineno(obj)
return getattr(node, "fspath", "unknown location"), -1
class Collector(Node):
"""Base class of all collectors.
Collector create children through `collect()` and thus iteratively build
the collection tree.
"""
class CollectError(Exception):
"""An error during collection, contains a custom message."""
def collect(self) -> Iterable[Union["Item", "Collector"]]:
"""Collect children (items and collectors) for this collector."""
raise NotImplementedError("abstract")
# TODO: This omits the style= parameter which breaks Liskov Substitution.
def repr_failure( # type: ignore[override]
self, excinfo: ExceptionInfo[BaseException]
) -> Union[str, TerminalRepr]:
"""Return a representation of a collection failure.
:param excinfo: Exception information for the failure.
"""
if isinstance(excinfo.value, self.CollectError) and not self.config.getoption(
"fulltrace", False
):
exc = excinfo.value
return str(exc.args[0])
# Respect explicit tbstyle option, but default to "short"
# (_repr_failure_py uses "long" with "fulltrace" option always).
tbstyle = self.config.getoption("tbstyle", "auto")
if tbstyle == "auto":
tbstyle = "short"
return self._repr_failure_py(excinfo, style=tbstyle)
def _traceback_filter(self, excinfo: ExceptionInfo[BaseException]) -> Traceback:
if hasattr(self, "path"):
traceback = excinfo.traceback
ntraceback = traceback.cut(path=self.path)
if ntraceback == traceback:
ntraceback = ntraceback.cut(excludepath=tracebackcutdir)
return excinfo.traceback.filter(excinfo)
return excinfo.traceback
def _check_initialpaths_for_relpath(session: "Session", path: Path) -> Optional[str]:
for initial_path in session._initialpaths:
if commonpath(path, initial_path) == initial_path:
rel = str(path.relative_to(initial_path))
return "" if rel == "." else rel
return None
class FSCollector(Collector):
"""Base class for filesystem collectors."""
def __init__(
self,
fspath: Optional[LEGACY_PATH] = None,
path_or_parent: Optional[Union[Path, Node]] = None,
path: Optional[Path] = None,
name: Optional[str] = None,
parent: Optional[Node] = None,
config: Optional[Config] = None,
session: Optional["Session"] = None,
nodeid: Optional[str] = None,
) -> None:
if path_or_parent:
if isinstance(path_or_parent, Node):
assert parent is None
parent = cast(FSCollector, path_or_parent)
elif isinstance(path_or_parent, Path):
assert path is None
path = path_or_parent
path = _imply_path(type(self), path, fspath=fspath)
if name is None:
name = path.name
if parent is not None and parent.path != path:
try:
rel = path.relative_to(parent.path)
except ValueError:
pass
else:
name = str(rel)
name = name.replace(os.sep, SEP)
self.path = path
if session is None:
assert parent is not None
session = parent.session
if nodeid is None:
try:
nodeid = str(self.path.relative_to(session.config.rootpath))
except ValueError:
nodeid = _check_initialpaths_for_relpath(session, path)
if nodeid and os.sep != SEP:
nodeid = nodeid.replace(os.sep, SEP)
super().__init__(
name=name,
parent=parent,
config=config,
session=session,
nodeid=nodeid,
path=path,
)
@classmethod
def from_parent(
cls,
parent,
*,
fspath: Optional[LEGACY_PATH] = None,
path: Optional[Path] = None,
**kw,
):
"""The public constructor."""
return super().from_parent(parent=parent, fspath=fspath, path=path, **kw)
def gethookproxy(self, fspath: "os.PathLike[str]"):
warnings.warn(FSCOLLECTOR_GETHOOKPROXY_ISINITPATH, stacklevel=2)
return self.session.gethookproxy(fspath)
def isinitpath(self, path: Union[str, "os.PathLike[str]"]) -> bool:
warnings.warn(FSCOLLECTOR_GETHOOKPROXY_ISINITPATH, stacklevel=2)
return self.session.isinitpath(path)
class File(FSCollector):
"""Base class for collecting tests from a file.
:ref:`non-python tests`.
"""
class Item(Node):
"""Base class of all test invocation items.
Note that for a single function there might be multiple test invocation items.
"""
nextitem = None
def __init__(
self,
name,
parent=None,
config: Optional[Config] = None,
session: Optional["Session"] = None,
nodeid: Optional[str] = None,
**kw,
) -> None:
# The first two arguments are intentionally passed positionally,
# to keep plugins who define a node type which inherits from
# (pytest.Item, pytest.File) working (see issue #8435).
# They can be made kwargs when the deprecation above is done.
super().__init__(
name,
parent,
config=config,
session=session,
nodeid=nodeid,
**kw,
)
self._report_sections: List[Tuple[str, str, str]] = []
#: A list of tuples (name, value) that holds user defined properties
#: for this test.
self.user_properties: List[Tuple[str, object]] = []
self._check_item_and_collector_diamond_inheritance()
def _check_item_and_collector_diamond_inheritance(self) -> None:
"""
Check if the current type inherits from both File and Collector
at the same time, emitting a warning accordingly (#8447).
"""
cls = type(self)
# We inject an attribute in the type to avoid issuing this warning
# for the same class more than once, which is not helpful.
# It is a hack, but was deemed acceptable in order to avoid
# flooding the user in the common case.
attr_name = "_pytest_diamond_inheritance_warning_shown"
if getattr(cls, attr_name, False):
return
setattr(cls, attr_name, True)
problems = ", ".join(
base.__name__ for base in cls.__bases__ if issubclass(base, Collector)
)
if problems:
warnings.warn(
f"{cls.__name__} is an Item subclass and should not be a collector, "
f"however its bases {problems} are collectors.\n"
"Please split the Collectors and the Item into separate node types.\n"
"Pytest Doc example: https://docs.pytest.org/en/latest/example/nonpython.html\n"
"example pull request on a plugin: https://github.com/asmeurer/pytest-flakes/pull/40/",
PytestWarning,
)
def runtest(self) -> None:
"""Run the test case for this item.
Must be implemented by subclasses.
.. seealso:: :ref:`non-python tests`
"""
raise NotImplementedError("runtest must be implemented by Item subclass")
def add_report_section(self, when: str, key: str, content: str) -> None:
"""Add a new report section, similar to what's done internally to add
stdout and stderr captured output::
item.add_report_section("call", "stdout", "report section contents")
:param str when:
One of the possible capture states, ``"setup"``, ``"call"``, ``"teardown"``.
:param str key:
Name of the section, can be customized at will. Pytest uses ``"stdout"`` and
``"stderr"`` internally.
:param str content:
The full contents as a string.
"""
if content:
self._report_sections.append((when, key, content))
def reportinfo(self) -> Tuple[Union["os.PathLike[str]", str], Optional[int], str]:
"""Get location information for this item for test reports.
Returns a tuple with three elements:
- The path of the test (default ``self.path``)
- The 0-based line number of the test (default ``None``)
- A name of the test to be shown (default ``""``)
.. seealso:: :ref:`non-python tests`
"""
return self.path, None, ""
@cached_property
def location(self) -> Tuple[str, Optional[int], str]:
"""
Returns a tuple of ``(relfspath, lineno, testname)`` for this item
where ``relfspath`` is file path relative to ``config.rootpath``
and lineno is a 0-based line number.
"""
location = self.reportinfo()
path = absolutepath(os.fspath(location[0]))
relfspath = self.session._node_location_to_relpath(path)
assert type(location[2]) is str
return (relfspath, location[1], location[2]) | PypiClean |
/flet_django-0.4.5-py3-none-any.whl/flet_django/controls/modeltable.py | import flet as ft
from django.utils.translation import gettext as _
from django.core.paginator import Paginator
from django.db.models import Q
ERROR_MSG = "-Err-"
FIELDS_MODELS = {
'IntegerField': 'text',
'DurationField': None,
'ManyToOneRel': None,
'DateTimeField': 'date',
'FileField': 'file',
'TextField': 'text',
'ImageField': None,
'ForeignKey': None,
'CharField': 'text',
'BigIntegerField': 'text',
'BooleanField': 'bool',
}
# Data column
class Col:
def __init__(self, name: str, label, field_type=None, width=None, on_sort=None):
if isinstance(label, ft.Control):
self.label = label
else:
self.label = ft.Text(label)
self.name = name
self.width = width
self.type = field_type
self.on_sort = on_sort
def build_header(self):
return ft.DataColumn(
label=self.label,
on_sort=self.on_sort,
)
def build_cell(self, obj):
return ft.DataCell(
content=ft.Text(
str(
getattr(
obj,
self.name,
_("cell_error")
)
),
width=self.width
)
)
def text_filter_kwargs(field_name: str, value: str):
return {
f"{field_name}__icontains": value.lower()
}
class ModelTableControl(ft.UserControl):
def __init__(
self,
model,
columns=None,
order=None,
rows_per_page=10,
title=None,
filters=None,
auto_filter=True,
get_data_table=None,
**data_table_params
):
super().__init__()
self.model = model
self.order = order
self.current_page = 1
self.rows_per_page = rows_per_page
self.num_pages = 1
self.num_rows = 0
self.title = title or model.__name__
self.filters = filters
self.auto_filter = auto_filter
self.data_table_params = data_table_params
self.v_current_page = ft.Text(str(self.current_page))
self.v_search = ft.TextField(
hint_text=_("Search..."),
dense=True,
on_change=self.on_search_auto,
prefix_icon=ft.icons.SEARCH,
border=ft.InputBorder.NONE,
filled=False,
on_submit=self.on_search_submit
)
self.v_count = ft.Text()
if columns is None:
self.columns = list(self.get_columns())
else:
self.columns = columns
self.fields_indexes = {col.name: i for i, col in enumerate(self.columns)}
if get_data_table is None:
self.data_table = ft.DataTable(
columns=[col.build_header() for col in self.columns],
rows=[],
**data_table_params
)
else:
self.data_table = get_data_table(self)
def get_columns(self):
fields = self.model._meta.get_fields()
def __on_sort(e):
sorted_field_name = self.columns[e.column_index].name
self.order = sorted_field_name if e.ascending else f"-{sorted_field_name}"
self.refresh_data()
for field in fields:
class_name = field.__class__.__name__
field_type = FIELDS_MODELS.get(class_name, None)
if field_type is not None:
name = field.name
label = field.verbose_name or field.name
yield Col(name=name, label=label, field_type=field_type, on_sort=__on_sort)
def set_page(self, page=None, delta=0):
if page is not None:
self.current_page = page
elif delta:
self.current_page += delta
else:
return
self.refresh_data()
def next_page(self, e):
self.set_page(delta=1)
def prev_page(self, e):
if self.current_page > 1:
self.set_page(delta=-1)
def goto_first_page(self, e):
self.set_page(page=1)
def goto_last_page(self, e):
self.set_page(page=self.num_pages)
def build_rows(self):
qs = self.model.objects.all()
if self.order:
qs = qs.order_by(self.order)
if self.v_search.value:
q = Q()
for column in self.columns:
if column.type == "text":
q |= Q(**text_filter_kwargs(column.name, self.v_search.value))
qs = qs.filter(q)
if self.filters is not None:
qs = qs.filter(**self.filters)
self.num_rows = qs.count()
paginator = Paginator(qs, self.rows_per_page)
p_int, p_add = divmod(self.num_rows, self.rows_per_page)
self.num_pages = p_int + (1 if p_add else 0)
page_qs = paginator.page(self.current_page)
# Load
return [ft.DataRow(cells=[col.build_cell(obj) for col in self.columns])
for obj in page_qs]
def build(self):
return ft.Card(
ft.Container(
ft.Column(
[
ft.Text(self.title, style=ft.TextThemeStyle.HEADLINE_SMALL),
ft.Row(
controls=[self.data_table],
scroll=ft.ScrollMode.ALWAYS,
spacing=10,
),
ft.Row([
ft.IconButton(ft.icons.KEYBOARD_DOUBLE_ARROW_LEFT, on_click=self.goto_first_page,
tooltip=_("First page")),
ft.IconButton(ft.icons.KEYBOARD_ARROW_LEFT, on_click=self.prev_page,
tooltip=_("Prev page")),
self.v_current_page,
ft.IconButton(ft.icons.KEYBOARD_ARROW_RIGHT, on_click=self.next_page,
tooltip=_("Next page")),
ft.IconButton(ft.icons.KEYBOARD_DOUBLE_ARROW_RIGHT, on_click=self.goto_last_page,
tooltip=_("Last page")),
self.v_search,
self.v_count,
ft.IconButton(ft.icons.REFRESH, on_click=self.refresh_data, tooltip=_("Refresh")),
ft.Slider(
min=10,
max=90,
divisions=8,
value=self.rows_per_page,
label=_("{value} rows per page"),
on_change=self.on_per_page_changed
),
]),
],
scroll=ft.ScrollMode.ALWAYS
),
padding=25,
),
)
def on_search_auto(self, e):
if self.auto_filter:
self.goto_first_page(e)
def on_search_submit(self, e):
self.goto_first_page(e)
def on_per_page_changed(self, e):
self.rows_per_page = int(e.control.value)
self.refresh_data()
def refresh_data(self, *args):
self.data_table.rows = self.build_rows()
if self.order:
ascending = self.order[0] != '-'
field_name = self.order if ascending else self.order[1:]
order_index = self.fields_indexes[field_name]
self.data_table.sort_ascending = ascending
self.data_table.sort_column_index = order_index
else:
self.data_table.sort_ascending = None
self.data_table.sort_column_index = None
self.v_count.value = _("{} rows").format(self.num_rows)
self.v_current_page.value = f"{self.current_page}/{self.num_pages}"
self.update()
def did_mount(self):
self.refresh_data() | PypiClean |
/bpy36-1.0.0-py3-none-any.whl/bpy2/2.79/scripts/addons/sequencer_kinoraw_tools/random_editor.py |
# Note: the Operator LoadRandomEditOperator was removed since is not
# working. If it is fixed, reimplemented it can be reintroduced later
import bpy
from bpy.types import (
Operator,
Panel,
)
from . import functions
# classes
class RandomScratchOperator(Operator):
bl_idname = "sequencer.randomscratchoperator"
bl_label = "Random Scratch Operator"
bl_description = "Random Scratch Operator"
@classmethod
def poll(self, context):
strip = functions.act_strip(context)
scn = context.scene
if scn and scn.sequence_editor and scn.sequence_editor.active_strip:
return strip.type in ('META')
else:
return False
def invoke(self, context, event):
preferences = context.user_preferences
random_frames = preferences.addons[__package__].preferences.random_frames
sce = context.scene
seq = sce.sequence_editor
markers = sce.timeline_markers
if seq:
strip = seq.active_strip
if strip is not None:
if "IN" and "OUT" in markers:
sin = markers["IN"].frame
sout = markers["OUT"].frame
# select active strip
strip = context.scene.sequence_editor.active_strip
stripname = strip.name
# collect strip names inside the meta
stripnames = []
stripnames.append(strip.name)
for i in seq.active_strip.sequences:
stripnames.append(i.name)
# get strip channel
channel = strip.channel
repeat = range(int((sout - sin) / random_frames))
print(sin, sout, sout - sin, (sout - sin) / random_frames, repeat)
for i in repeat:
# select all related strips
for j in stripnames:
strip = seq.sequences_all[j]
strip.select = True
strip = seq.sequences_all[stripname]
seq.active_strip = strip
# deselect all other strips
for j in context.selected_editable_sequences:
if j.name not in stripnames:
j.select = False
a = bpy.ops.sequencer.duplicate_move()
# select new strip
newstrip = seq.active_strip
# deselect all other strips
for j in context.selected_editable_sequences:
if j.name != newstrip.name:
j.select = False
# random cut
newstrip.frame_start = sin + i * random_frames
rand = functions.randomframe(newstrip)
functions.triminout(newstrip, rand, rand + random_frames)
newstrip.frame_start = i * random_frames + sin - newstrip.frame_offset_start
newstrip.channel = channel + 1
else:
self.report({'WARNING'}, "There is no IN and OUT Markers")
bpy.ops.sequencer.reload()
return {'FINISHED'}
class RandomEditorPanel(Panel):
bl_label = "Random Editor"
bl_idname = "OBJECT_PT_RandomEditor"
bl_space_type = 'SEQUENCE_EDITOR'
bl_region_type = 'UI'
@classmethod
def poll(self, context):
if context.space_data.view_type in {'SEQUENCER',
'SEQUENCER_PREVIEW'}:
strip = functions.act_strip(context)
scn = context.scene
preferences = context.user_preferences
prefs = preferences.addons[__package__].preferences
if scn and scn.sequence_editor and scn.sequence_editor.active_strip:
if prefs.use_random_editor:
return strip.type in ('META')
else:
return False
def draw_header(self, context):
layout = self.layout
layout.label(text="", icon="MOD_BUILD")
def draw(self, context):
preferences = context.user_preferences
prefs = preferences.addons[__package__].preferences
layout = self.layout
col = layout.column(align=True)
col.label("Cut duration:")
col.prop(prefs, "random_frames")
col.operator("sequencer.randomscratchoperator") | PypiClean |
/crown_pycurl-0.2.tar.gz/crown_pycurl-0.2/crown_pycurl/client.py | import json
from io import BytesIO
from pycurl import Curl
# Base curl client, with initial parameters for Crown
class Client():
def __init__(self, user, passwd, host, testnet=False):
self.client = Curl()
self.set_headers(user, passwd, host, testnet=testnet)
# Sets the connection headers
def set_headers(self, user, passwd, host, testnet=False):
if testnet:
self.client.setopt(self.client.PORT, 19341)
self.client.setopt(self.client.URL, 'http://'+user+':'+passwd+'@'+host+':19341')
else:
self.client.setopt(self.client.PORT, 9341)
self.client.setopt(self.client.URL, 'http://'+user+':'+passwd+'@'+host+':9341')
self.client.setopt(self.client.ENCODING, '')
self.client.setopt(self.client.MAXREDIRS, 10)
self.client.setopt(self.client.TIMEOUT, 30)
self.client.setopt(self.client.SSL_VERIFYHOST, 0)
self.client.setopt(self.client.SSL_VERIFYPEER, 0)
self.client.setopt(self.client.CUSTOMREQUEST, 'POST')
self.client.setopt(self.client.HTTPHEADER, ["cache-control: no-cache","content-type: application/json","user: {0}:{1}".format \
(user, passwd),])
# Execute the command set on 'method' with parameters 'params' as with the 'crown-cli' tool
def execute(self, method, params=[]):
# Instantiates the buffer just before the request to perform multiple requests per object
req_buffer = BytesIO()
# Sets the local buffer to be where the request data is written
self.client.setopt(self.client.WRITEDATA, req_buffer)
self.client.setopt(self.client.POSTFIELDS, '{"jsonrpc": "1.0", "id": "crown-pycurl", "method": "%s", "params": %s}' % \
(method, params))
print('{"jsonrpc": "1.0", "id": "crown-pycurl", "method": "%s", "params": %s}' % \
(method, params))
self.client.perform()
# Returns a JSON object with the response
return json.loads(req_buffer.getvalue().decode('utf-8'))
# A set of commands implementations
# == Blockchain ==
# getbestblockhash
def getbestblockhash(self):
return self.execute('getbestblockhash')
# getblock
def getblock(self, hash, verbose=True):
data = json.dumps([hash, verbose])
return self.execute('getblock', data)
# getblockchaininfo
def getblockchaininfo(self):
return self.execute('getblockchaininfo')
# getblockcount
def getblockcount(self):
return self.execute('getblockcount')
# getblockhash
def getblockhash(self, index):
data =json.dumps([index])
return self.execute('getblockhash', data)
# getblockheader
def getblockheader(self, hash, verbose):
data = json.dumps([hash, verbose])
return self.execute('getblockheader', data)
# getchaintips
def getchaintips(self):
return self.execute('getchaintips')
# getdifficulty
def getdifficulty(self):
return self.execute('getdifficulty')
# getmempoolinfo
def getmempoolinfo(self):
return self.execute('getmempoolinfo')
# getrawmempool
def getrawmempool(self, verbose=False):
data = json.dumps([verbose])
return self.execute('getrawmempool', data)
# gettxout
def gettxout(self, txid, n, includemempool=False):
data = json.dumps([txid, n, includemempool])
return self.execute('gettxout', data)
# gettxoutsetinfo
def gettxoutsetinfo(self):
return self.execute('gettxoutsetinfo')
# verifychain
def verifychain(self, checklevel=3, numblocks=288):
data = json.dumps([checklevel, numblocks])
return self.execute('verifychain', data)
# == Control ==
# getinfo
def getinfo(self):
return self.execute('getinfo')
# help
def help(self, command=None):
if command:
data = json.dumps([command])
else:
data = json.dumps([])
return self.execute('help', data)
# restart
def restart(self):
return self.execute('restart')
# stop
def stop(self):
return self.execute('stop')
# == Crown ==
# getstakepointers
def getstakepointers(self):
return self.execute('getstakepointers')
# masternode
def masternode(self, command, passphrase=None):
if passphrase:
data = json.dumps([command, passphrase])
else:
data = json.dumps([command])
return self.execute('masternode', data)
# masternodebroadcast
def masternodebroadcast(self, command, passphrase=None):
if passphrase:
data = json.dumps([command, passphrase])
else:
data = json.dumps([command])
return self.execute('masternodebroadcast', data)
# masternodelist
def masternodelist(self, mode="status", filter=None):
if filter:
data = json.dumps([mode, filter])
else:
data = json.dumps([mode])
return self.execute('masternodelist', data)
# mnbudget
def mnbudget(self, command, passphrase=None):
if passphrase:
data = json.dumps([command, passphrase])
else:
data = json.dumps([command])
return self.execute('mnbudget', data)
# mnbudgetvoteraw
# def mnbudgetvoteraw(self, mntxhash, mntxindex, prophash, vote, time, votesig):
# Pending due lack of info
# mnfinalbudget
def mnfinalbudget(self, command, passphrase=None):
if passphrase:
data = json.dumps([command, passphrase])
else:
data = json.dumps([command])
return self.execute('mnfinalbudget', data)
# mnsync
# Pending because it can be done manually
# node
def node(self, command, passphrase=None):
if passphrase:
data = json.dumps([command, passphrase])
else:
data = json.dumps([command])
return self.execute('node', data)
# snsync
# Same as line 131
# spork
# Same as line 125
# systemnode
def systemnode(self, command, passphrase=None):
if passphrase:
data = json.dumps([command, passphrase])
else:
data = json.dumps([command])
return self.execute('systemnode', data)
# systemnodebroadcast
def systemnodebroadcast(self, command, passphrase=None):
if passphrase:
data = json.dumps([command, passphrase])
else:
data = json.dumps([command])
return self.execute('systemnodebroadcast', data)
# systemnodelist
def systemnodelist(self, mode="status", filter=None):
if filter:
data = json.dumps([mode, filter])
else:
data = json.dumps([mode])
return self.execute('systemnodelist', data)
# == Generating ==
# getauxblock
def getauxblock(self, hash=None, auxpow=None):
if hash and auxpow:
data = json.dumps([hash, auxpow])
else:
return self.execute('getauxblock')
return self.execute('getauxblock', data)
# getgenerate
def getgenerate(self):
return self.execute('getgenerate')
# gethashespersec
def gethashespersec(self):
return self.execute('gethashespersec')
# setgenerate
def setgenerate(self, generate, genproclimit=None):
if genproclimit:
data = json.dumps([generate, genproclimit])
else:
data = json.dumps([generate])
return self.execute('setgenerate', data)
# == Mining ==
# getblocktemplate
def getblocktemplate(self, jsonrequestobject=None):
if jsonrequestobject:
data = json.dumps([jsonrequestobject])
else:
return self.execute('getblocktemplate')
return self.execute('getblocktemplate', data)
# getmininginfo
def getmininginfo(self):
return self.execute('getmininginfo')
# getnetworkhashps
def getnetworkhashps(self, blocks=120, height=-1):
data = json.dumps([blocks, height])
return self.execute('getnetworkhashps', data)
# prioritisetransaction
def prioritisetransaction(self, txid, priority, fee):
data = json.dumps([txid, priority, fee])
return self.execute('prioritisetransaction', data)
# submitblock
def submitblock(self, hexdata, jsonparametersobject=None):
if jsonparametersobject:
data = json.dumps([hexdata, jsonparametersobject])
else:
data = json.dumps([hexdata])
return self.execute('submitblock', data)
# == Network ==
# addnode
def addnode(self, node, command):
data = json.dumps([node, command])
return self.execute('addnode', data)
# getaddednodeinfo
def getaddednodeinfo(self, dns, node=None):
if node:
data = json.dumps([dns, node])
else:
data = json.dumps([dns])
print(data)
return self.execute('getaddednodeinfo', data)
# getconnectioncount
def getconnectioncount(self):
return self.execute('getconnectioncount')
# getnettotals
def getnettotals(self):
return self.execute('getnettotals')
# getnetworkinfo
def getnetworkinfo(self):
return self.execute('getnetworkinfo')
# getpeerinfo
def getpeerinfo(self):
return self.execute('getpeerinfo')
# ping
def ping(self):
return self.execute('ping')
# == Platform ==
# agents
# Same as line 140
# nftoken
def nftoken_issue(self, proto, id, owner, metadataAdmin='0', metadata=''):
data = json.dumps(['issue', proto, id, owner, metadataAdmin, metadata])
return self.execute('nftoken', data)
def nftoken_get(self, proto, id):
data = json.dumps(['get', proto, id])
return self.execute('nftoken', data)
def nftoken_getbytxid(self, txid):
data = json.dumps(['getbytxid', txid])
return self.execute('nftoken', data)
def nftoken_totalsupply(self, proto):
data = json.dumps(['totalsupply', proto])
return self.execute('nftoken', data)
def nftoken_balanceof(self, address, proto=None):
if proto:
data = json.dumps(['balanceof', address, proto])
else:
data = json.dumps(['balanceof', address])
return self.execute('nftoken', data)
def nftoken_ownerof(self, proto, id):
data = json.dumps(['ownerof', proto, id])
return self.execute('nftoken', data)
def nftoken_list(self, proto=None, address=None, count=None, skip=None, height=None, regtxonly=False):
args = ['list']
if proto:
args.append(proto)
if address:
args.append(address)
if count:
args.append(count)
if skip:
args.append(skip)
if height:
args.append(height)
if regtxonly:
args.append(regtxonly)
data = json.dumps(args)
return self.execute('nftoken', data)
# nftproto
def nftproto_register(self, id, name, owner, sign=2, mimetype='text/plain', schemauri='', transferable=True, embedded=False, size=255):
data = json.dumps(['register', id, name, owner, sign, mimetype, schemauri, transferable, embedded, size])
return self.execute('nftproto', data)
def nftproto_list(self, count=None, skip=None, height=None, regtxonly=False):
args = ['list']
if count:
args.append(count)
if skip:
args.append(skip)
if height:
args.append(height)
if regtxonly:
args.append(regtxonly)
data = json.dumps(args)
return self.execute('nftproto', data)
def nftproto_get(self, id):
data = json.dumps(['get', id])
return self.execute('nftproto', data)
def nftproto_getbytxid(self, txid):
data = json.dumps(['getbytxid', txid])
return self.execute('nftproto', data)
def nftproto_ownerof(self, proto):
data = json.dumps(['ownerof', proto])
return self.execute('nftproto', data)
# == Rawtransactions ==
# createrawtransaction
def createrawtransaction(self, transactions, addresses):
data = json.dumps([transactions, addresses])
return self.execute('createrawtransaction', data)
# decoderawtransaction
def decoderawtransaction(self, hex):
data = json.dumps([hex])
return self.execute('decoderawtransaction', data)
# decodescript
def decodescript(self, hex):
data = json.dumps([hex])
return self.execute('decodescript', data)
# getrawtransaction
def getrawtransaction(self, txid, verbose=0):
data = json.dumps([txid, verbose])
return self.execute('getrawtransaction', data)
# sendrawtransaction
def sendrawtransaction(self, hex, allowhighfees=False):
data = json.dumps([hex, allowhighfees])
return self.execute('sendrawtransaction', data)
# == Utils ==
# createmultisig
def createmultisig(self, nrequired, keys):
data = json.dumps([nrequired, keys])
return self.execute('createmultisig', data)
# estimatefee
def estimatefee(self, nblocks):
data = json.dumps([nblocks])
return self.execute('estimatefee', data)
# estimatepriority
def estimatepriority(self, nblocks):
data = json.dumps([nblocks])
return self.execute('estimatepriority', data)
# validateaddress
def validateaddress(self, address):
data = json.dumps([address])
return self.execute('validateaddress', data)
# verifymessage
def verifymessage(self, address, signature, message):
data = json.dumps([address, signature, message])
return self.execute('verifymessage', data)
# == Wallet ==
# addmultisigaddress
def addmultisigaddress(self, nrequired, keysobject, account=None):
if account:
data = json.dumps([nrequired, keysobject, account])
else:
data = json.dumps([nrequired, keysobject])
return self.execute('addmultisigaddress', data)
# backupwallet
def backupwallet(self, destination):
data = json.dumps([destination])
return self.execute('backupwallet', data)
# convertaddress
def convertaddress(self, oldaddress):
data = json.dumps([oldaddress])
return self.execute('convertaddress', data)
# dumpprivkey
def dumpprivkey(self, address):
data = json.dumps([address])
return self.execute('dumpprivkey', data)
# dumpwallet
def dumpwallet(self, filename):
data = json.dumps([filename])
return self.execute('dumpwallet', data)
# encryptwallet
def encryptwallet(self, passphrase):
data = json.dumps([passphrase])
return self.execute('encryptwallet', data)
# getaccount
def getaccount(self, address):
data = json.dumps([address])
return self.execute('getaccount', data)
# getaccountaddress
def getaccountaddress(self, account):
data = json.dumps([account])
return self.execute('getaccountaddress', data)
# getaddressesbyaccount
def getaddressesbyaccount(self, account):
data = json.dumps([account])
return self.execute('getaddressesbyaccount', data)
# getbalance
def getbalance(self, account=None, minconf=None, includewatchonly=None):
args = []
if account:
args.append(account)
if minconf:
args.append(minconf)
if includewatchonly:
args.append(includewatchonly)
data = json.dumps(args)
return self.execute('getbalance', data)
# getnewaddress
def getnewaddress(self, account=None):
if account:
data = json.dumps([account])
return self.execute('getnewaddress', data)
return self.execute('getnewaddress')
# getrawchangeaddress
def getrawchangeaddress(self):
return self.execute('getrawchangeaddress')
# getreceivedbyaccount
def getreceivedbyaccount(self, account, minconf=None):
if minconf:
data = json.dumps([account, minconf])
else:
data = json.dumps([account])
return self.execute('getreceivedbyaccount', data)
# getreceivedbyaddress
def getreceivedbyaddress(self, address, minconf=None):
if minconf:
data = json.dumps([address, minconf])
else:
data = json.dumps([address])
return self.execute('getreceivedbyaddress', data)
# gettransaction
def gettransaction(self, txid, includewatchonly=None):
if includewatchonly:
data = json.dumps([txid, includewatchonly])
else:
data = json.dumps([txid])
return self.execute('gettransaction', data)
# getunconfirmedbalance
def getunconfirmedbalance(self):
return self.execute('getunconfirmedbalance')
# getwalletinfo
def getwalletinfo(self):
return self.execute('getwalletinfo')
# importaddress
def importaddress(self, address, label='', rescan=True):
data = json.dumps([address, label, rescan])
return self.execute('importaddress', data)
# importprivkey
def importprivkey(self, privkey, label='', rescan=True):
data = json.dumps([privkey, label, rescan])
return self.execute('importprivkey', data)
# importwallet
def importwallet(self, filename):
data = json.dumps([filename])
return self.execute('importwallet', data)
# keypoolrefill
def keypoolrefill(self, newsize=100):
data = json.dumps([newsize])
return self.execute('keypoolrefill', data)
# listaccounts
def listaccounts(self, minconf=None, includewatchonly=None):
args = []
if minconf:
args.append(minconf)
if includewatchonly:
args.append(includewatchonly)
data = json.dumps(args)
return self.execute('listaccounts', data)
# listaddressgroupings
def listaddressgroupings(self):
return self.execute('listaddressgroupings')
# listlockunspent
def listlockunspent(self):
return self.execute('listlockunspent')
# listreceivedbyaccount
def listreceivedbyaccount(self, minconf=None, includeempty=None, includewatchonly=None):
args = []
if minconf:
args.append(minconf)
if includeempty:
args.append(includeempty)
if includewatchonly:
args.append(includewatchonly)
data = json.dumps(args)
return self.execute('listreceivedbyaccount', data)
# listreceivedbyaddress
def listreceivedbyaddress(self, minconf=None, includeempty=None, includewatchonly=None):
args = []
if minconf:
args.append(minconf)
if includeempty:
args.append(includeempty)
if includewatchonly:
args.append(includewatchonly)
data = json.dumps(args)
return self.execute('listreceivedbyaddress', data)
# listsinceblock
def listsinceblock(self, blockhash, confirmations=None, includewatchonly=None):
args = [blockhash]
if confirmations:
args.append(confirmations)
if includewatchonly:
args.append(includewatchonly)
data = json.dumps(args)
return self.execute('listsinceblock', data)
# listtransactions
def listtransactions(self, account=None, count=None, skip=None, includewatchonly=None):
args = []
if account:
args.append(account)
if count:
args.append(count)
if skip:
args.append(skip)
if includewatchonly:
args.append(includewatchonly)
data = json.dumps(args)
return self.execute('listtransactions', data)
# listunspent
def listunspent(self, minconf=1, maxconf=9999999, addresses=None):
if addresses:
data = json.dumps([minconf, maxconf, addresses])
else:
data = json.dumps([minconf, maxconf])
return self.execute('listunspent', data)
# lockunspent
def lockunspent(self, unlock, transactions):
data = json.dumps([unlock, transactions])
return self.execute('lockunspent', data)
# move
def move(self, fromaccount, toaccount, minconf=1, comment=''):
data = json.dumps([fromaccount, toaccount, minconf, comment])
return self.execute('move', data)
# sendfrom
def sendfrom(self, fromaccount, toaddress, amount, minconf=1, comment='', commentto=''):
data = json.dumps([fromaccount, toaddress, amount, minconf, comment, commentto])
return self.execute('sendfrom', data)
# sendmany
def sendmany(self, fromaccount, addresses, minconf=1, comment=''):
data = json.dumps([fromaccount, addresses, minconf, comment])
return self.execute('sendmany', data)
# sendtoaddress
def sendtoaddress(self, address, amount, comment='', commentto=''):
data = json.dumps([address, amount, comment, commentto])
return self.execute('sendtoaddress', data)
# sendtoaddressix
def sendtoaddressix(self, address, amount, comment='', commentto=''):
data = json.dumps([address, amount, comment, commentto])
return self.execute('sendtoaddressix', data)
# setaccount
def setaccount(self, address, account):
data = json.dumps([address, account])
return self.execute('setaccount', data)
# settxfee
def settxfee(self, amount):
data = json.dumps([amount])
return self.execute('settxfee', data)
# signmessage
def signmessage(self, address, message):
data = json.dumps([address, message])
return self.execute('signmessage', data)
# update
def update(self, command, passphrase=None):
if passphrase:
data = json.dumps([command, passphrase])
else:
data = json.dumps([command])
return self.execute('update', data) | PypiClean |
/obs_cli-0.6.2-py3-none-any.whl/obs_cli.py |
import argparse
import json
import logging
import os
import re
import sys
import obsws_python as obs
from rich import print, print_json
from rich.console import Console
from rich.table import Table
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("-D", "--debug", action="store_true", default=False)
parser.add_argument("-q", "--quiet", action="store_true", default=False)
parser.add_argument("-H", "--host", help="host name", default="localhost")
parser.add_argument(
"-P", "--port", help="port number", type=int, default=4455
)
parser.add_argument(
"-p", "--password", required=False, help="password ($OBS_API_PASSWORD)"
)
parser.add_argument("-j", "--json", action="store_true", default=False)
subparsers = parser.add_subparsers(dest="command", required=True)
scene_parser = subparsers.add_parser("scene")
scene_parser.add_argument(
"-e", "--exact", action="store_true", default=False, help="Exact match"
)
scene_parser.add_argument(
"-i",
"--ignorecase",
action="store_true",
default=False,
help="Exact match",
)
scene_parser.add_argument(
"action",
choices=["list", "switch", "current"],
default="current",
help="list/switch/current",
)
scene_parser.add_argument("SCENE", nargs="?", help="Scene name")
item_parser = subparsers.add_parser("item")
item_parser.add_argument(
"-s", "--scene", required=False, help="Scene name (default: current)"
)
item_parser.add_argument(
"action",
choices=["list", "show", "hide", "toggle"],
default="toggle",
help="show/hide/toggle",
)
item_parser.add_argument("ITEM", nargs="?", help="Item to interact with")
input_parser = subparsers.add_parser("input")
input_parser.add_argument(
"action",
choices=["list", "show", "get", "set"],
default="show",
help="list/show/get/set",
)
input_parser.add_argument("INPUT", nargs="?", help="Input name")
input_parser.add_argument("PROPERTY", nargs="?", help="Property name")
input_parser.add_argument("VALUE", nargs="?", help="Property value")
filter_parser = subparsers.add_parser("filter")
filter_parser.add_argument(
"action",
choices=["list", "toggle", "enable", "disable", "status"],
default="list",
help="list/toggle/enable/disable/status",
)
filter_parser.add_argument("INPUT", nargs="?", help="Input name")
filter_parser.add_argument("FILTER", nargs="?", help="Filter name")
hotkey_parser = subparsers.add_parser("hotkey")
hotkey_parser.add_argument(
"action",
choices=["list", "trigger"],
default="list",
help="list/trigger",
)
hotkey_parser.add_argument("HOTKEY", nargs="?", help="Hotkey name")
virtualcam_parser = subparsers.add_parser("virtualcam")
virtualcam_parser.add_argument(
"action",
choices=["status", "start", "stop", "toggle"],
default="status",
help="status/start/stop/toggle",
)
stream_parser = subparsers.add_parser("stream")
stream_parser.add_argument(
"action",
choices=["status", "start", "stop", "toggle"],
default="status",
help="status/start/stop/toggle",
)
record_parser = subparsers.add_parser("record")
record_parser.add_argument(
"action",
choices=["status", "start", "stop", "toggle"],
default="status",
help="status/start/stop/toggle",
)
return parser.parse_args()
def switch_to_scene(cl, scene, exact=False, ignorecase=True):
regex = re.compile(
f"^{scene}$" if exact else scene,
re.IGNORECASE if ignorecase else re.NOFLAG,
)
for sc in sorted(
cl.get_scene_list().scenes, key=lambda x: x.get("sceneName")
):
print(f"Compare {scene} with {sc.get('sceneName')}")
if re.search(regex, sc.get("sceneName")):
cl.set_current_program_scene(sc.get("sceneName"))
return True
def get_items(cl, scene=None, names_only=False):
scene = scene or get_current_scene_name(cl)
items = sorted(
cl.get_scene_item_list(scene).scene_items,
key=lambda x: x.get("sourceName"),
)
return [x.get("sourceName") for x in items] if names_only else items
def get_item_by_name(cl, item, ignorecase=True, exact=False, scene=None):
items = get_items(cl, scene)
regex = re.compile(
item if not exact else f"^{item}$",
re.IGNORECASE if ignorecase else re.NOFLAG,
)
for it in items:
if re.search(regex, it.get("sourceName")):
return it
def get_item_id(cl, item, scene=None):
data = get_item_by_name(cl, item, scene=scene)
if not data:
LOGGER.warning(f"Item not found: {item} (in {scene})")
return -1
return data.get("sceneItemId", -1)
def is_item_enabled(cl, item, scene=None):
data = get_item_by_name(cl, item, scene=scene)
if not data:
LOGGER.warning(f"Item not found: {item} (in {scene})")
return -1
return data.get("sceneItemEnabled", False)
def show_item(cl, item, scene=None):
scene = scene or get_current_scene_name(cl)
item_id = get_item_id(cl, item, scene)
return cl.set_scene_item_enabled(scene, item_id, True)
def hide_item(cl, item, scene=None):
scene = scene or get_current_scene_name(cl)
item_id = get_item_id(cl, item, scene)
return cl.set_scene_item_enabled(scene, item_id, False)
def toggle_item(cl, item, scene=None):
scene = scene or get_current_scene_name(cl)
item_id = get_item_id(cl, item, scene)
enabled = is_item_enabled(cl, item, scene)
return cl.set_scene_item_enabled(scene, item_id, not enabled)
def get_current_scene_name(cl):
return cl.get_current_program_scene().current_program_scene_name
def get_inputs(cl):
return sorted(cl.get_input_list().inputs, key=lambda x: x.get("inputName"))
def get_input_settings(cl, input):
return cl.get_input_settings(input).input_settings
def set_input_setting(cl, input, key, value):
try:
value = json.loads(value)
except (ValueError, TypeError):
pass
LOGGER.debug(f"Setting {key} to {value} ({type(value)})")
return cl.set_input_settings(input, {key: value}, overlay=True)
def get_filters(cl, input):
return cl.get_source_filter_list(input).filters
def is_filter_enabled(cl, source, filter):
return cl.get_source_filter(source, filter).filter_enabled
def enable_filter(cl, source, filter):
return cl.set_source_filter_enabled(source, filter, True)
def disable_filter(cl, source, filter):
return cl.set_source_filter_enabled(source, filter, False)
def toggle_filter(cl, source, filter):
enabled = is_filter_enabled(cl, source, filter)
return cl.set_source_filter_enabled(source, filter, not enabled)
def get_hotkeys(cl):
return cl.get_hot_key_list().hotkeys
def trigger_hotkey(cl, hotkey):
return cl.trigger_hot_key_by_name(hotkey)
def virtual_camera_status(cl):
return cl.get_virtual_cam_status().output_active
def virtual_camera_start(cl):
return cl.start_virtual_cam()
def virtual_camera_stop(cl):
return cl.stop_virtual_cam()
def virtual_camera_toggle(cl):
return cl.toggle_virtual_cam()
def stream_status(cl):
return cl.get_stream_status().output_active
def stream_start(cl):
return cl.start_stream()
def stream_stop(cl):
return cl.stop_stream()
def stream_toggle(cl):
return cl.toggle_stream()
def record_status(cl):
return cl.get_record_status().output_active
def record_start(cl):
return cl.start_record()
def record_stop(cl):
return cl.stop_record()
def record_toggle(cl):
return cl.toggle_record()
def main():
console = Console()
logging.basicConfig()
args = parse_args()
LOGGER.setLevel(logging.DEBUG if args.debug else logging.INFO)
LOGGER.debug(args)
password = args.password or os.environ.get("OBS_API_PASSWORD")
try:
cl = obs.ReqClient(host=args.host, port=args.port, password=password)
cmd = args.command
if cmd == "scene":
if args.action == "current":
print(get_current_scene_name(cl))
elif args.action == "list":
res = cl.get_scene_list()
print(
*sorted([x.get("sceneName") for x in res.scenes]), sep="\n"
)
LOGGER.debug(res)
elif args.action == "switch":
res = switch_to_scene(cl, args.SCENE, exact=False)
LOGGER.debug(res)
else:
print(get_current_scene_name(cl))
elif cmd == "item":
if args.action == "list":
# print(*get_items(cl, args.scene), sep="\n")
scene = args.scene or get_current_scene_name(cl)
data = get_items(cl, args.scene)
if args.json:
print_json(data=data)
return
table = Table(title=f"Items in scene '{scene}'")
table.add_column("ID")
table.add_column("Name")
table.add_column("Enabled", justify="center")
for item in data:
item_id = str(item.get("sceneItemId"))
name = item.get("sourceName")
enabled = "✅" if item.get("sceneItemEnabled") else "❌"
table.add_row(item_id, name, enabled)
console.print(table)
elif args.action == "toggle":
res = toggle_item(cl, args.ITEM, args.scene)
LOGGER.debug(res)
elif args.action == "show":
res = show_item(cl, args.ITEM, args.scene)
LOGGER.debug(res)
elif args.action == "hide":
res = hide_item(cl, args.ITEM, args.scene)
LOGGER.debug(res)
elif cmd == "input":
if args.action == "list":
data = get_inputs(cl)
if args.json:
print_json(data=data)
return
table = Table(title="Inputs")
table.add_column("Kind")
table.add_column("Name")
for input in data:
kind = input.get("inputKind")
name = input.get("inputName")
table.add_row(kind, name)
console.print(table)
elif args.action == "show" or args.action == "get":
data = get_input_settings(cl, args.INPUT)
if args.PROPERTY:
print(data.get(args.PROPERTY))
else:
# TODO Implement rich table output
print_json(data=data)
elif args.action == "set":
if not args.INPUT or not args.PROPERTY or not args.VALUE:
raise ValueError("Missing input name, property or value")
res = set_input_setting(
cl, args.INPUT, args.PROPERTY, args.VALUE
)
LOGGER.debug(res)
elif cmd == "filter":
if args.action == "list":
data = get_filters(cl, args.INPUT)
if args.json:
print_json(data=data)
return
table = Table(title=f"Filters for {args.INPUT}")
table.add_column("Kind")
table.add_column("Name")
table.add_column("Enabled", justify="center")
for filter in data:
kind = filter.get("filterKind")
name = filter.get("filterName")
enabled = "✅" if filter.get("filterEnabled") else "❌"
table.add_row(kind, name, enabled)
console.print(table)
elif args.action == "toggle":
res = toggle_filter(cl, args.INPUT, args.FILTER)
LOGGER.debug(res)
elif args.action == "enable":
res = enable_filter(cl, args.INPUT, args.FILTER)
LOGGER.debug(res)
elif args.action == "disable":
res = disable_filter(cl, args.INPUT, args.FILTER)
LOGGER.debug(res)
elif args.action == "status":
res = is_filter_enabled(cl, args.INPUT, args.FILTER)
LOGGER.debug(res)
if args.quiet:
sys.exit(0 if res else 1)
print("enabled" if res else "disabled")
elif cmd == "hotkey":
if args.action == "list":
data = get_hotkeys(cl)
if args.json:
print_json(data=data)
return
table = Table(title="Hotkeys")
table.add_column("Name")
for hk in data:
table.add_row(hk)
console.print(table)
elif args.action == "trigger":
res = trigger_hotkey(cl, args.HOTKEY)
LOGGER.debug(res)
elif cmd == "virtualcam":
if args.action == "status":
res = virtual_camera_status(cl)
LOGGER.debug(res)
if args.quiet:
sys.exit(0 if res else 1)
print("started" if res else "stopped")
elif args.action == "start":
res = virtual_camera_start(cl)
LOGGER.debug(res)
elif args.action == "stop":
res = virtual_camera_stop(cl)
LOGGER.debug(res)
elif args.action == "toggle":
res = virtual_camera_toggle(cl)
LOGGER.debug(res)
elif cmd == "stream":
if args.action == "status":
res = stream_status(cl)
LOGGER.debug(res)
if args.quiet:
sys.exit(0 if res else 1)
print("started" if res else "stopped")
elif args.action == "start":
res = stream_start(cl)
LOGGER.debug(res)
elif args.action == "stop":
res = stream_stop(cl)
LOGGER.debug(res)
elif args.action == "toggle":
res = stream_toggle(cl)
LOGGER.debug(res)
elif cmd == "record":
if args.action == "status":
res = record_status(cl)
LOGGER.debug(res)
if args.quiet:
sys.exit(0 if res else 1)
print("started" if res else "stopped")
elif args.action == "start":
res = record_start(cl)
LOGGER.debug(res)
elif args.action == "stop":
res = record_stop(cl)
LOGGER.debug(res)
elif args.action == "toggle":
res = record_toggle(cl)
LOGGER.debug(res)
return 0
except Exception:
console.print_exception(show_locals=True)
return 1
LOGGER = logging.getLogger(__name__)
if __name__ == "__main__":
sys.exit(main()) | PypiClean |
/dnv_bladed_models-0.3.44.tar.gz/dnv_bladed_models-0.3.44/src/dnv_bladed_models/standard_pitch_limit_switches.py |
from __future__ import annotations
from datetime import date, datetime # noqa: F401
from enum import Enum, IntEnum
import re # noqa: F401
from typing import Any, Dict, List, Optional, Type, Union, Callable # noqa: F401
from pathlib import Path
from typing import TypeVar
Model = TypeVar('Model', bound='BaseModel')
StrBytes = Union[str, bytes]
from pydantic import AnyUrl, BaseModel, EmailStr, Field, validator, root_validator, Extra # noqa: F401
from dnv_bladed_models.dnv import Dnv
class StandardPitchLimitSwitches(Dnv):
"""StandardPitchLimitSwitches - The limit switch positions of the control system.
Attributes:
----------
MinimumAngle : float
A number representing an angle. The SI units for angles are radians.
MaximumAngle : float
A number representing an angle. The SI units for angles are radians.
"""
MinimumAngle: Optional[float] = Field(alias="MinimumAngle", default=None)
MaximumAngle: Optional[float] = Field(alias="MaximumAngle", default=None)
class Config:
extra = Extra.forbid
validate_assignment = True
allow_population_by_field_name = True
pass
@root_validator(pre=True)
def _parsing_ignores_underscore_properties(cls, values: dict[str, any]):
allowed_vals = {}
for key, val in values.items():
if not key.startswith('_'):
if isinstance(val, dict):
allowed_child_vals = {}
for child_key, child_val in val.items():
if not child_key.startswith('_'):
allowed_child_vals[child_key] = child_val
allowed_vals[key] = allowed_child_vals
else:
allowed_vals[key] = val
return allowed_vals
def to_json(
self,
*,
include: Optional[Union['AbstractSetIntStr', 'MappingIntStrAny']] = None,
exclude: Optional[Union['AbstractSetIntStr', 'MappingIntStrAny']] = None,
by_alias: bool = True,
skip_defaults: Optional[bool] = None,
exclude_unset: bool = False,
exclude_defaults: bool = False,
exclude_none: bool = True,
encoder: Optional[Callable[[Any], Any]] = None,
models_as_dict: bool = True,
**dumps_kwargs: Any) -> str:
r"""
Generates a JSON string representation of the model.
Notes
-----
`include` and `exclude` arguments as per `dict()`.
`encoder` is an optional function to supply as `default` to json.dumps(), other arguments as per `json.dumps()`.
Examples
--------
>>> model.to_json()
Renders the full JSON representation of the model object.
"""
if dumps_kwargs.get('indent') is None:
dumps_kwargs.update(indent=2)
return super().json(
include=include,
exclude=exclude,
by_alias=by_alias,
skip_defaults=skip_defaults,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
encoder=encoder,
models_as_dict=models_as_dict,
**dumps_kwargs)
@classmethod
def from_file(
cls: Type['Model'],
path: Union[str, Path]) -> 'Model':
r"""
Loads a model from a given file path.
Parameters
----------
path : string
The file path to the model.
Returns
-------
StandardPitchLimitSwitches
The model object.
Raises
------
ValueError, ValidationError
If the JSON document does not correctly describe the model according to the model schema.
Examples
--------
>>> model = StandardPitchLimitSwitches.from_file('/path/to/file')
"""
return super().parse_file(path=path)
@classmethod
def from_json(
cls: Type['Model'],
b: StrBytes) -> 'Model':
r"""
Creates a model object from a JSON string.
Parameters
----------
b: StrBytes
The JSON string describing the model.
Returns
-------
StandardPitchLimitSwitches
The model object.
Raises
------
ValueError, ValidationError
If the JSON document does not correctly describe the model according to the model schema.
Examples
--------
>>> model = StandardPitchLimitSwitches.from_json('{ ... }')
"""
return super().parse_raw(
b=b,
content_type='application/json')
@classmethod
def from_dict(
cls: Type['Model'],
obj: Any) -> 'Model':
r"""
Creates a model object from a dict.
Parameters
----------
obj : Any
The dictionary object describing the model.
Returns
-------
StandardPitchLimitSwitches
The model object.
Raises
------
ValueError, ValidationError
If the JSON document does not correctly describe the model according to the model schema.
"""
return super().parse_obj(obj=obj)
def to_file(
self,
path: Union[str, Path]):
r"""
Writes the model as a JSON document to a file with UTF8 encoding.
Parameters
----------
path : string
The file path to which the model will be written.
Examples
--------
>>> model.to_file('/path/to/file')
"""
with open(file=path, mode='w', encoding="utf8") as output_file:
output_file.write(self.to_json())
StandardPitchLimitSwitches.update_forward_refs() | PypiClean |
/azure_mgmt_containerservice-26.0.0-py3-none-any.whl/azure/mgmt/containerservice/v2020_02_01/aio/_configuration.py |
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy, AsyncARMChallengeAuthenticationPolicy
from .._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class ContainerServiceClientConfiguration(Configuration): # pylint: disable=too-many-instance-attributes
"""Configuration for ContainerServiceClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Subscription credentials which uniquely identify Microsoft Azure
subscription. The subscription ID forms part of the URI for every service call. Required.
:type subscription_id: str
:keyword api_version: Api Version. Default value is "2020-02-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(self, credential: "AsyncTokenCredential", subscription_id: str, **kwargs: Any) -> None:
super(ContainerServiceClientConfiguration, self).__init__(**kwargs)
api_version: str = kwargs.pop("api_version", "2020-02-01")
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
self.credential = credential
self.subscription_id = subscription_id
self.api_version = api_version
self.credential_scopes = kwargs.pop("credential_scopes", ["https://management.azure.com/.default"])
kwargs.setdefault("sdk_moniker", "mgmt-containerservice/{}".format(VERSION))
self._configure(**kwargs)
def _configure(self, **kwargs: Any) -> None:
self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get("http_logging_policy") or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get("authentication_policy")
if self.credential and not self.authentication_policy:
self.authentication_policy = AsyncARMChallengeAuthenticationPolicy(
self.credential, *self.credential_scopes, **kwargs
) | PypiClean |
/git_upm_publisher-0.0.5-py3-none-any.whl/git_upm_publisher/utils/git_manager.py | from pathlib import Path
from git import Repo
import os
from datetime import datetime
import subprocess
class Git:
def __init__(self, repo_root_path):
self.repo_root_path = repo_root_path
self.dotgit_path = os.path.join(repo_root_path, ".git/")
assert os.path.exists(self.dotgit_path), "Cannot find the .git folder (are you sure the repo root is correct in the config.json?)"
self.repo = Repo(repo_root_path)
def is_dirty(self):
return self.repo.is_dirty()
def tag(self, commit, name, message):
print("Tagging")
self.repo.git.tag("-a", name, commit, "-m", message)
def commit_all(self, message):
print("Comitting all")
self.repo.git.add("--all")
self.repo.git.commit("-m", "\"" + message + "\"")
def publish(self, package_root_path, commit_message, branch_name, version_tag):
print("Publishing")
print("branch before publish: " + self.repo.active_branch.name)
package_root_path = Path(package_root_path).absolute()
original_cwd = os.getcwd()
print("Changing CWD...")
os.chdir(self.repo_root_path)
print("CWD: " + os.getcwd())
subprocess.run(["npm", "install", "-g", "[email protected]"], shell=True)
subprocess.run(["git", "snapshot", "--prefix=" + package_root_path.as_posix() + "", "--message=\'" + commit_message + "\'", "--branch=" + branch_name], shell=True)
os.chdir(original_cwd)
# self.repo.git.snapshot("--prefix=" + package_root_path.as_posix() + "", "--message=\'" + commit_message + "\'", "--branch=" + branch_name)
print("branch after publish: " + self.repo.active_branch.name)
self.tag(branch_name, version_tag, version_tag)
def soft_reset_last_commit(self):
print("Soft resetting last commit")
self.repo.git.reset("--soft", "HEAD~1")
def clean(self):
print("Cleaning")
print("Stashing everything (just in case)")
stash_name = "backup stash before repo clean by git-upm-publisher " + str(datetime.now())
self.repo.git.stash("save", stash_name)
print("Stash name: " + stash_name)
if self.is_dirty():
print("Still dirty after stash, doing a hard reset")
self.repo.git.reset("--hard")
def push_all(self):
print("Pushing")
self.repo.git.push("--all")
self.repo.git.push("--tags")
def status(self):
print("Status")
return self.repo.git.status()
def fetch(self):
print("Fetching")
self.repo.git.fetch("--all") | PypiClean |
/pulumi_azure_native-2.5.1a1693590910.tar.gz/pulumi_azure_native-2.5.1a1693590910/pulumi_azure_native/sql/v20211101/geo_backup_policy.py |
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ._enums import *
__all__ = ['GeoBackupPolicyArgs', 'GeoBackupPolicy']
@pulumi.input_type
class GeoBackupPolicyArgs:
def __init__(__self__, *,
database_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
server_name: pulumi.Input[str],
state: pulumi.Input['GeoBackupPolicyState'],
geo_backup_policy_name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a GeoBackupPolicy resource.
:param pulumi.Input[str] database_name: The name of the database.
:param pulumi.Input[str] resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param pulumi.Input[str] server_name: The name of the server.
:param pulumi.Input['GeoBackupPolicyState'] state: The state of the geo backup policy.
:param pulumi.Input[str] geo_backup_policy_name: The name of the Geo backup policy. This should always be 'Default'.
"""
pulumi.set(__self__, "database_name", database_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "server_name", server_name)
pulumi.set(__self__, "state", state)
if geo_backup_policy_name is not None:
pulumi.set(__self__, "geo_backup_policy_name", geo_backup_policy_name)
@property
@pulumi.getter(name="databaseName")
def database_name(self) -> pulumi.Input[str]:
"""
The name of the database.
"""
return pulumi.get(self, "database_name")
@database_name.setter
def database_name(self, value: pulumi.Input[str]):
pulumi.set(self, "database_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="serverName")
def server_name(self) -> pulumi.Input[str]:
"""
The name of the server.
"""
return pulumi.get(self, "server_name")
@server_name.setter
def server_name(self, value: pulumi.Input[str]):
pulumi.set(self, "server_name", value)
@property
@pulumi.getter
def state(self) -> pulumi.Input['GeoBackupPolicyState']:
"""
The state of the geo backup policy.
"""
return pulumi.get(self, "state")
@state.setter
def state(self, value: pulumi.Input['GeoBackupPolicyState']):
pulumi.set(self, "state", value)
@property
@pulumi.getter(name="geoBackupPolicyName")
def geo_backup_policy_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Geo backup policy. This should always be 'Default'.
"""
return pulumi.get(self, "geo_backup_policy_name")
@geo_backup_policy_name.setter
def geo_backup_policy_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "geo_backup_policy_name", value)
class GeoBackupPolicy(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
database_name: Optional[pulumi.Input[str]] = None,
geo_backup_policy_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
server_name: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input['GeoBackupPolicyState']] = None,
__props__=None):
"""
A Geo backup policy.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] database_name: The name of the database.
:param pulumi.Input[str] geo_backup_policy_name: The name of the Geo backup policy. This should always be 'Default'.
:param pulumi.Input[str] resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param pulumi.Input[str] server_name: The name of the server.
:param pulumi.Input['GeoBackupPolicyState'] state: The state of the geo backup policy.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: GeoBackupPolicyArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
A Geo backup policy.
:param str resource_name: The name of the resource.
:param GeoBackupPolicyArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(GeoBackupPolicyArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
database_name: Optional[pulumi.Input[str]] = None,
geo_backup_policy_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
server_name: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input['GeoBackupPolicyState']] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = GeoBackupPolicyArgs.__new__(GeoBackupPolicyArgs)
if database_name is None and not opts.urn:
raise TypeError("Missing required property 'database_name'")
__props__.__dict__["database_name"] = database_name
__props__.__dict__["geo_backup_policy_name"] = geo_backup_policy_name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if server_name is None and not opts.urn:
raise TypeError("Missing required property 'server_name'")
__props__.__dict__["server_name"] = server_name
if state is None and not opts.urn:
raise TypeError("Missing required property 'state'")
__props__.__dict__["state"] = state
__props__.__dict__["kind"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["storage_type"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-native:sql:GeoBackupPolicy"), pulumi.Alias(type_="azure-native:sql/v20140401:GeoBackupPolicy"), pulumi.Alias(type_="azure-native:sql/v20220201preview:GeoBackupPolicy"), pulumi.Alias(type_="azure-native:sql/v20220501preview:GeoBackupPolicy"), pulumi.Alias(type_="azure-native:sql/v20220801preview:GeoBackupPolicy"), pulumi.Alias(type_="azure-native:sql/v20221101preview:GeoBackupPolicy"), pulumi.Alias(type_="azure-native:sql/v20230201preview:GeoBackupPolicy")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(GeoBackupPolicy, __self__).__init__(
'azure-native:sql/v20211101:GeoBackupPolicy',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'GeoBackupPolicy':
"""
Get an existing GeoBackupPolicy resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = GeoBackupPolicyArgs.__new__(GeoBackupPolicyArgs)
__props__.__dict__["kind"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["state"] = None
__props__.__dict__["storage_type"] = None
__props__.__dict__["type"] = None
return GeoBackupPolicy(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def kind(self) -> pulumi.Output[str]:
"""
Kind of geo backup policy. This is metadata used for the Azure portal experience.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Backup policy location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def state(self) -> pulumi.Output[str]:
"""
The state of the geo backup policy.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="storageType")
def storage_type(self) -> pulumi.Output[str]:
"""
The storage type of the geo backup policy.
"""
return pulumi.get(self, "storage_type")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type") | PypiClean |
/mlm-pytorch-0.1.0.tar.gz/mlm-pytorch-0.1.0/mlm_pytorch/mlm_pytorch.py | import math
from functools import reduce
import torch
from torch import nn
import torch.nn.functional as F
# helpers
def prob_mask_like(t, prob):
return torch.zeros_like(t).float().uniform_(0, 1) < prob
def mask_with_tokens(t, token_ids):
init_no_mask = torch.full_like(t, False, dtype=torch.bool)
mask = reduce(lambda acc, el: acc | (t == el), token_ids, init_no_mask)
return mask
def get_mask_subset_with_prob(mask, prob):
batch, seq_len, device = *mask.shape, mask.device
max_masked = math.ceil(prob * seq_len)
num_tokens = mask.sum(dim=-1, keepdim=True)
mask_excess = (mask.cumsum(dim=-1) > (num_tokens * prob).ceil())
mask_excess = mask_excess[:, :max_masked]
rand = torch.rand((batch, seq_len), device=device).masked_fill(~mask, -1e9)
_, sampled_indices = rand.topk(max_masked, dim=-1)
sampled_indices = (sampled_indices + 1).masked_fill_(mask_excess, 0)
new_mask = torch.zeros((batch, seq_len + 1), device=device)
new_mask.scatter_(-1, sampled_indices, 1)
return new_mask[:, 1:].bool()
# main class
class MLM(nn.Module):
def __init__(
self,
transformer,
mask_prob = 0.15,
replace_prob = 0.9,
num_tokens = None,
random_token_prob = 0.,
mask_token_id = 2,
pad_token_id = 0,
mask_ignore_token_ids = []):
super().__init__()
self.transformer = transformer
# mlm related probabilities
self.mask_prob = mask_prob
self.replace_prob = replace_prob
self.num_tokens = num_tokens
self.random_token_prob = random_token_prob
# token ids
self.pad_token_id = pad_token_id
self.mask_token_id = mask_token_id
self.mask_ignore_token_ids = set([*mask_ignore_token_ids, pad_token_id])
def forward(self, seq, **kwargs):
# do not mask [pad] tokens, or any other tokens in the tokens designated to be excluded ([cls], [sep])
# also do not include these special tokens in the tokens chosen at random
no_mask = mask_with_tokens(seq, self.mask_ignore_token_ids)
mask = get_mask_subset_with_prob(~no_mask, self.mask_prob)
# mask input with mask tokens with probability of `replace_prob` (keep tokens the same with probability 1 - replace_prob)
masked_seq = seq.clone().detach()
# derive labels to predict
labels = seq.masked_fill(~mask, self.pad_token_id)
# if random token probability > 0 for mlm
if self.random_token_prob > 0:
assert self.num_tokens is not None, 'num_tokens keyword must be supplied when instantiating MLM if using random token replacement'
random_token_prob = prob_mask_like(seq, self.random_token_prob)
random_tokens = torch.randint(0, self.num_tokens, seq.shape, device=seq.device)
random_no_mask = mask_with_tokens(random_tokens, self.mask_ignore_token_ids)
random_token_prob &= ~random_no_mask
masked_seq = torch.where(random_token_prob, random_tokens, masked_seq)
# remove tokens that were substituted by random to be [mask]ed later
mask = mask & ~random_token_prob
# [mask] input
replace_prob = prob_mask_like(seq, self.replace_prob)
masked_seq = masked_seq.masked_fill(mask * replace_prob, self.mask_token_id)
# get generator output and get mlm loss
logits = self.transformer(masked_seq, **kwargs)
mlm_loss = F.cross_entropy(
logits.transpose(1, 2),
labels,
ignore_index = self.pad_token_id
)
return mlm_loss | PypiClean |
/Glances-3.4.0.3.tar.gz/Glances-3.4.0.3/docs/gw/kafka.rst | .. _kafka:
Kafka
=====
You can export statistics to a ``Kafka`` server.
The connection should be defined in the Glances configuration file as
following:
.. code-block:: ini
[kafka]
host=localhost
port=9092
topic=glances
#compression=gzip
# Tags will be added for all events
#tags=foo:bar,spam:eggs
# You can also use dynamic values
#tags=hostname:`hostname -f`
Note: you can enable the compression but it consume CPU on your host.
and run Glances with:
.. code-block:: console
$ glances --export kafka
Stats are sent in native ``JSON`` format to the topic:
- ``key``: plugin name
- ``value``: JSON dict
Example of record for the memory plugin:
.. code-block:: ini
ConsumerRecord(topic=u'glances', partition=0, offset=1305, timestamp=1490460592248, timestamp_type=0, key='mem', value=u'{"available": 2094710784, "used": 5777428480, "cached": 2513543168, "mem_careful": 50.0, "percent": 73.4, "free": 2094710784, "mem_critical": 90.0, "inactive": 2361626624, "shared": 475504640, "history_size": 28800.0, "mem_warning": 70.0, "total": 7872139264, "active": 4834361344, "buffers": 160112640}', checksum=214895201, serialized_key_size=3, serialized_value_size=303)
Python code example to consume Kafka Glances plugin:
.. code-block:: python
from kafka import KafkaConsumer
import json
consumer = KafkaConsumer('glances', value_deserializer=json.loads)
for s in consumer:
print(s)
| PypiClean |
/gow/io.py | import re
from typing import Tuple, List, Sequence, Callable
from gowpy.gow.builder import mk_undirected_edge, mk_directed_edge
from gowpy.gow.builder import GraphOfWords
from gowpy.gow.typing import Edge_label
def gow_to_data(gows: Sequence[GraphOfWords]) -> str:
"""
Convert a sequence of graph-of-words into a text representation for interoperability with other programs
Format:
- "t # N" means the Nth graph,
- "v M L" means that the Mth vertex in this graph has label L,
- "e P Q L" means that there is an edge connecting the Pth vertex with the Qth vertex. The edge has label L.
:param gows:
:return:
"""
result_data = []
for i, gow in enumerate(gows):
nodes = gow.nodes
edges = gow.edges
if len(nodes) > 0:
result_data.append(u"t # {}\n".format(i))
node_label_to_id = {}
for node_label in nodes:
if not (node_label in node_label_to_id):
new_id = len(node_label_to_id)
node_label_to_id[node_label] = new_id
node_id = node_label_to_id[node_label]
result_data.append(u"v {} {}\n".format(node_id, node_label))
edge_tuples = [] # TODO implementation with a heap to be more efficient?
for (node_start_label, node_end_label, edge_label_id) in edges:
# Computation of the node IDs in this graph given their node labels
node_start_id = node_label_to_id[node_start_label]
node_end_id = node_label_to_id[node_end_label]
edge_tuples.append((node_start_id, node_end_id, edge_label_id))
edge_tuples.sort()
for node_start_id, node_end_id, edge_label_id in edge_tuples:
result_data.append(u"e {} {} {}\n".format(node_start_id,
node_end_id,
edge_label_id))
result_data.append(u"t # {}".format(-1))
return u"".join(result_data)
r_new_graph_ = re.compile(u't +# +(\d+) +\\* +(\d+)')
r_new_vertex_ = re.compile(u'v +(\d+) +(\d+)')
r_new_edge_ = re.compile(u'e +(\d+) +(\d+) +(\d+)')
r_new_parent_graphs_ = re.compile(u'x: +([\d ]+)')
def load_graphs(input_file_subgraph: str,
input_file_frequent_nodes: str,
get_token: Callable[[int], str],
get_label: Callable[[int], Edge_label],
is_directed: bool=False) -> Sequence[GraphOfWords]:
#
current_id = None
current_freq = None
current_vertices = None
current_edges = None
current_parent_graph_ids = None
subgraphs = []
with open(input_file_subgraph, 'r') as f_input_file:
for line in f_input_file:
m_new_graph = r_new_graph_.search(line)
m_new_vertex = r_new_vertex_.search(line)
m_new_edge = r_new_edge_.search(line)
m_new_parent_graphs = r_new_parent_graphs_.search(line)
if m_new_graph:
# Saving
if current_id is not None:
subgraphs.append(_to_gow(current_id,
current_freq,
(current_vertices, current_edges),
current_parent_graph_ids,
get_token, get_label,
is_directed))
# Initialisation of the new graph
current_id = int(m_new_graph.group(1))
current_freq = int(m_new_graph.group(2))
current_vertices = []
current_edges = []
current_parent_graph_ids = None
elif m_new_vertex:
vertex_id = int(m_new_vertex.group(1))
vertex_label = int(m_new_vertex.group(2))
current_vertices.append((vertex_id, vertex_label))
elif m_new_edge:
node_start = int(m_new_edge.group(1))
node_end = int(m_new_edge.group(2))
edge_label = int(m_new_edge.group(3))
current_edges.append((node_start, node_end, edge_label))
elif m_new_parent_graphs:
current_parent_graph_ids = [int(graph_id) for graph_id in
m_new_parent_graphs.group(1).strip().split(' ')]
# assert len(current_parent_graph_ids) == current_freq
else:
pass # other lines (probably empty)
# Last line
if current_id and current_parent_graph_ids:
subgraphs.append(
_to_gow(current_id, current_freq, (current_vertices, current_edges), current_parent_graph_ids,
get_token, get_label,
is_directed))
current_id = None
PADDING_ID = len(subgraphs)
current_freq = None
current_vertices = None
current_edges = None
current_parent_graph_ids = None
with open(input_file_frequent_nodes, 'r') as f_input_file:
for line in f_input_file:
m_new_graph = r_new_graph_.search(line)
m_new_vertex = r_new_vertex_.search(line)
m_new_parent_graphs = r_new_parent_graphs_.search(line)
if m_new_graph:
# Saving
if current_id is not None:
subgraphs.append(_to_gow(current_id,
current_freq,
(current_vertices, current_edges),
current_parent_graph_ids,
get_token, get_label,
is_directed))
# Initialisation of the new graph
current_id = int(m_new_graph.group(1)) + PADDING_ID
current_freq = int(m_new_graph.group(2))
current_vertices = []
current_edges = []
current_parent_graph_ids = None
elif m_new_vertex:
vertex_id = int(m_new_vertex.group(1))
vertex_label = int(m_new_vertex.group(2))
current_vertices.append((vertex_id, vertex_label))
elif m_new_parent_graphs:
current_parent_graph_ids = [int(graph_id) for graph_id in
m_new_parent_graphs.group(1).strip().split(' ')]
# assert len(current_parent_graph_ids) == current_freq
else:
pass # other lines (probably empty)
# Last line
if current_id and current_parent_graph_ids:
subgraphs.append(
_to_gow(current_id, current_freq, (current_vertices, current_edges), current_parent_graph_ids,
get_token, get_label, is_directed))
return subgraphs
IO_Nodes = List[Tuple[int, int]] # (node_id, node_code)
IO_Edges = List[Tuple[int, int, int]] # (node_start_id, node_end_id, edge_code)
IO_Subgraph = Tuple[IO_Nodes, IO_Edges]
def _to_gow(subg_id: int,
subg_freq: int,
subgraph: IO_Subgraph,
subg_current_parent_graph_ids: Sequence[int],
get_token: Callable[[int], str],
get_label: Callable[[int], Edge_label],
is_directed: bool) -> GraphOfWords:
id_: int = subg_id
freq: int = subg_freq
subg_vertices, subg_edges = subgraph
size = len(subg_vertices)
parents = subg_current_parent_graph_ids
# Recomputation of nodes
# Dealing with nodes:
# Node = (node id in *this* graph, node code)
node_id_to_node_code = {}
nodes = set()
for node_id, node_code in subg_vertices:
node_id_to_node_code[node_id] = node_code
nodes.add(node_code)
# Dealing with edges
edges = set()
for node_start_id, node_end_id, edge_label_code in subg_edges:
node_start_code = node_id_to_node_code[node_start_id]
node_end_code = node_id_to_node_code[node_end_id]
if is_directed:
edges.add(mk_directed_edge(node_start_code, node_end_code, edge_label_code))
else:
edges.add(mk_undirected_edge(node_start_code, node_end_code, edge_label_code))
return GraphOfWords(nodes=nodes,
edges=edges,
get_token=get_token,
get_label=get_label,
freq=freq,
directed=is_directed) | PypiClean |
/cdktf-cdktf-provider-azurerm-10.0.1.tar.gz/cdktf-cdktf-provider-azurerm-10.0.1/src/cdktf_cdktf_provider_azurerm/network_packet_capture/__init__.py | import abc
import builtins
import datetime
import enum
import typing
import jsii
import publication
import typing_extensions
from typeguard import check_type
from .._jsii import *
import cdktf as _cdktf_9a9027ec
import constructs as _constructs_77d1e7e8
class NetworkPacketCapture(
_cdktf_9a9027ec.TerraformResource,
metaclass=jsii.JSIIMeta,
jsii_type="@cdktf/provider-azurerm.networkPacketCapture.NetworkPacketCapture",
):
'''Represents a {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture azurerm_network_packet_capture}.'''
def __init__(
self,
scope: _constructs_77d1e7e8.Construct,
id_: builtins.str,
*,
name: builtins.str,
network_watcher_name: builtins.str,
resource_group_name: builtins.str,
storage_location: typing.Union["NetworkPacketCaptureStorageLocation", typing.Dict[builtins.str, typing.Any]],
target_resource_id: builtins.str,
filter: typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, typing.Sequence[typing.Union["NetworkPacketCaptureFilter", typing.Dict[builtins.str, typing.Any]]]]] = None,
id: typing.Optional[builtins.str] = None,
maximum_bytes_per_packet: typing.Optional[jsii.Number] = None,
maximum_bytes_per_session: typing.Optional[jsii.Number] = None,
maximum_capture_duration: typing.Optional[jsii.Number] = None,
timeouts: typing.Optional[typing.Union["NetworkPacketCaptureTimeouts", typing.Dict[builtins.str, typing.Any]]] = None,
connection: typing.Optional[typing.Union[typing.Union[_cdktf_9a9027ec.SSHProvisionerConnection, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.WinrmProvisionerConnection, typing.Dict[builtins.str, typing.Any]]]] = None,
count: typing.Optional[typing.Union[jsii.Number, _cdktf_9a9027ec.TerraformCount]] = None,
depends_on: typing.Optional[typing.Sequence[_cdktf_9a9027ec.ITerraformDependable]] = None,
for_each: typing.Optional[_cdktf_9a9027ec.ITerraformIterator] = None,
lifecycle: typing.Optional[typing.Union[_cdktf_9a9027ec.TerraformResourceLifecycle, typing.Dict[builtins.str, typing.Any]]] = None,
provider: typing.Optional[_cdktf_9a9027ec.TerraformProvider] = None,
provisioners: typing.Optional[typing.Sequence[typing.Union[typing.Union[_cdktf_9a9027ec.FileProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.LocalExecProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.RemoteExecProvisioner, typing.Dict[builtins.str, typing.Any]]]]] = None,
) -> None:
'''Create a new {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture azurerm_network_packet_capture} Resource.
:param scope: The scope in which to define this construct.
:param id_: The scoped construct ID. Must be unique amongst siblings in the same scope
:param name: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture#name NetworkPacketCapture#name}.
:param network_watcher_name: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture#network_watcher_name NetworkPacketCapture#network_watcher_name}.
:param resource_group_name: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture#resource_group_name NetworkPacketCapture#resource_group_name}.
:param storage_location: storage_location block. Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture#storage_location NetworkPacketCapture#storage_location}
:param target_resource_id: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture#target_resource_id NetworkPacketCapture#target_resource_id}.
:param filter: filter block. Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture#filter NetworkPacketCapture#filter}
:param id: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture#id NetworkPacketCapture#id}. Please be aware that the id field is automatically added to all resources in Terraform providers using a Terraform provider SDK version below 2. If you experience problems setting this value it might not be settable. Please take a look at the provider documentation to ensure it should be settable.
:param maximum_bytes_per_packet: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture#maximum_bytes_per_packet NetworkPacketCapture#maximum_bytes_per_packet}.
:param maximum_bytes_per_session: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture#maximum_bytes_per_session NetworkPacketCapture#maximum_bytes_per_session}.
:param maximum_capture_duration: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture#maximum_capture_duration NetworkPacketCapture#maximum_capture_duration}.
:param timeouts: timeouts block. Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture#timeouts NetworkPacketCapture#timeouts}
:param connection:
:param count:
:param depends_on:
:param for_each:
:param lifecycle:
:param provider:
:param provisioners:
'''
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__b5e3c560147495e2d8f597dee2fe43afe5db4036c6bca2f7920f0360f303c119)
check_type(argname="argument scope", value=scope, expected_type=type_hints["scope"])
check_type(argname="argument id_", value=id_, expected_type=type_hints["id_"])
config = NetworkPacketCaptureConfig(
name=name,
network_watcher_name=network_watcher_name,
resource_group_name=resource_group_name,
storage_location=storage_location,
target_resource_id=target_resource_id,
filter=filter,
id=id,
maximum_bytes_per_packet=maximum_bytes_per_packet,
maximum_bytes_per_session=maximum_bytes_per_session,
maximum_capture_duration=maximum_capture_duration,
timeouts=timeouts,
connection=connection,
count=count,
depends_on=depends_on,
for_each=for_each,
lifecycle=lifecycle,
provider=provider,
provisioners=provisioners,
)
jsii.create(self.__class__, self, [scope, id_, config])
@jsii.member(jsii_name="putFilter")
def put_filter(
self,
value: typing.Union[_cdktf_9a9027ec.IResolvable, typing.Sequence[typing.Union["NetworkPacketCaptureFilter", typing.Dict[builtins.str, typing.Any]]]],
) -> None:
'''
:param value: -
'''
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__cb4ba221b9ca64b230a733aa5bfd6a28b7522ea446cc0789118802663ffdac5d)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
return typing.cast(None, jsii.invoke(self, "putFilter", [value]))
@jsii.member(jsii_name="putStorageLocation")
def put_storage_location(
self,
*,
file_path: typing.Optional[builtins.str] = None,
storage_account_id: typing.Optional[builtins.str] = None,
) -> None:
'''
:param file_path: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture#file_path NetworkPacketCapture#file_path}.
:param storage_account_id: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture#storage_account_id NetworkPacketCapture#storage_account_id}.
'''
value = NetworkPacketCaptureStorageLocation(
file_path=file_path, storage_account_id=storage_account_id
)
return typing.cast(None, jsii.invoke(self, "putStorageLocation", [value]))
@jsii.member(jsii_name="putTimeouts")
def put_timeouts(
self,
*,
create: typing.Optional[builtins.str] = None,
delete: typing.Optional[builtins.str] = None,
read: typing.Optional[builtins.str] = None,
update: typing.Optional[builtins.str] = None,
) -> None:
'''
:param create: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture#create NetworkPacketCapture#create}.
:param delete: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture#delete NetworkPacketCapture#delete}.
:param read: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture#read NetworkPacketCapture#read}.
:param update: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture#update NetworkPacketCapture#update}.
'''
value = NetworkPacketCaptureTimeouts(
create=create, delete=delete, read=read, update=update
)
return typing.cast(None, jsii.invoke(self, "putTimeouts", [value]))
@jsii.member(jsii_name="resetFilter")
def reset_filter(self) -> None:
return typing.cast(None, jsii.invoke(self, "resetFilter", []))
@jsii.member(jsii_name="resetId")
def reset_id(self) -> None:
return typing.cast(None, jsii.invoke(self, "resetId", []))
@jsii.member(jsii_name="resetMaximumBytesPerPacket")
def reset_maximum_bytes_per_packet(self) -> None:
return typing.cast(None, jsii.invoke(self, "resetMaximumBytesPerPacket", []))
@jsii.member(jsii_name="resetMaximumBytesPerSession")
def reset_maximum_bytes_per_session(self) -> None:
return typing.cast(None, jsii.invoke(self, "resetMaximumBytesPerSession", []))
@jsii.member(jsii_name="resetMaximumCaptureDuration")
def reset_maximum_capture_duration(self) -> None:
return typing.cast(None, jsii.invoke(self, "resetMaximumCaptureDuration", []))
@jsii.member(jsii_name="resetTimeouts")
def reset_timeouts(self) -> None:
return typing.cast(None, jsii.invoke(self, "resetTimeouts", []))
@jsii.member(jsii_name="synthesizeAttributes")
def _synthesize_attributes(self) -> typing.Mapping[builtins.str, typing.Any]:
return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.invoke(self, "synthesizeAttributes", []))
@jsii.python.classproperty
@jsii.member(jsii_name="tfResourceType")
def TF_RESOURCE_TYPE(cls) -> builtins.str:
return typing.cast(builtins.str, jsii.sget(cls, "tfResourceType"))
@builtins.property
@jsii.member(jsii_name="filter")
def filter(self) -> "NetworkPacketCaptureFilterList":
return typing.cast("NetworkPacketCaptureFilterList", jsii.get(self, "filter"))
@builtins.property
@jsii.member(jsii_name="storageLocation")
def storage_location(self) -> "NetworkPacketCaptureStorageLocationOutputReference":
return typing.cast("NetworkPacketCaptureStorageLocationOutputReference", jsii.get(self, "storageLocation"))
@builtins.property
@jsii.member(jsii_name="timeouts")
def timeouts(self) -> "NetworkPacketCaptureTimeoutsOutputReference":
return typing.cast("NetworkPacketCaptureTimeoutsOutputReference", jsii.get(self, "timeouts"))
@builtins.property
@jsii.member(jsii_name="filterInput")
def filter_input(
self,
) -> typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, typing.List["NetworkPacketCaptureFilter"]]]:
return typing.cast(typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, typing.List["NetworkPacketCaptureFilter"]]], jsii.get(self, "filterInput"))
@builtins.property
@jsii.member(jsii_name="idInput")
def id_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "idInput"))
@builtins.property
@jsii.member(jsii_name="maximumBytesPerPacketInput")
def maximum_bytes_per_packet_input(self) -> typing.Optional[jsii.Number]:
return typing.cast(typing.Optional[jsii.Number], jsii.get(self, "maximumBytesPerPacketInput"))
@builtins.property
@jsii.member(jsii_name="maximumBytesPerSessionInput")
def maximum_bytes_per_session_input(self) -> typing.Optional[jsii.Number]:
return typing.cast(typing.Optional[jsii.Number], jsii.get(self, "maximumBytesPerSessionInput"))
@builtins.property
@jsii.member(jsii_name="maximumCaptureDurationInput")
def maximum_capture_duration_input(self) -> typing.Optional[jsii.Number]:
return typing.cast(typing.Optional[jsii.Number], jsii.get(self, "maximumCaptureDurationInput"))
@builtins.property
@jsii.member(jsii_name="nameInput")
def name_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "nameInput"))
@builtins.property
@jsii.member(jsii_name="networkWatcherNameInput")
def network_watcher_name_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "networkWatcherNameInput"))
@builtins.property
@jsii.member(jsii_name="resourceGroupNameInput")
def resource_group_name_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "resourceGroupNameInput"))
@builtins.property
@jsii.member(jsii_name="storageLocationInput")
def storage_location_input(
self,
) -> typing.Optional["NetworkPacketCaptureStorageLocation"]:
return typing.cast(typing.Optional["NetworkPacketCaptureStorageLocation"], jsii.get(self, "storageLocationInput"))
@builtins.property
@jsii.member(jsii_name="targetResourceIdInput")
def target_resource_id_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "targetResourceIdInput"))
@builtins.property
@jsii.member(jsii_name="timeoutsInput")
def timeouts_input(
self,
) -> typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, "NetworkPacketCaptureTimeouts"]]:
return typing.cast(typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, "NetworkPacketCaptureTimeouts"]], jsii.get(self, "timeoutsInput"))
@builtins.property
@jsii.member(jsii_name="id")
def id(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "id"))
@id.setter
def id(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__783e646ebd7eb73eecf6acb7fce7b21ee9b735453423f72e234851da341cc9d3)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "id", value)
@builtins.property
@jsii.member(jsii_name="maximumBytesPerPacket")
def maximum_bytes_per_packet(self) -> jsii.Number:
return typing.cast(jsii.Number, jsii.get(self, "maximumBytesPerPacket"))
@maximum_bytes_per_packet.setter
def maximum_bytes_per_packet(self, value: jsii.Number) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__287f86a37cb9fc29e750d0a9d9edb64d455e57da69a7198d83951a5345c65b1a)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "maximumBytesPerPacket", value)
@builtins.property
@jsii.member(jsii_name="maximumBytesPerSession")
def maximum_bytes_per_session(self) -> jsii.Number:
return typing.cast(jsii.Number, jsii.get(self, "maximumBytesPerSession"))
@maximum_bytes_per_session.setter
def maximum_bytes_per_session(self, value: jsii.Number) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__ac1100b4fb8058bc9a91b4970dd24478ffe1eeb699fefa69e255e8240a945aca)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "maximumBytesPerSession", value)
@builtins.property
@jsii.member(jsii_name="maximumCaptureDuration")
def maximum_capture_duration(self) -> jsii.Number:
return typing.cast(jsii.Number, jsii.get(self, "maximumCaptureDuration"))
@maximum_capture_duration.setter
def maximum_capture_duration(self, value: jsii.Number) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__8836dc30a0d04488def4d0f0573cb7b2b3d70695ba6c4c120fd6a3fee08c5777)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "maximumCaptureDuration", value)
@builtins.property
@jsii.member(jsii_name="name")
def name(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "name"))
@name.setter
def name(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__2dad01ceec066300f106ed24514b032df161bd9705dcbb3cf099b26647f1c5d7)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "name", value)
@builtins.property
@jsii.member(jsii_name="networkWatcherName")
def network_watcher_name(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "networkWatcherName"))
@network_watcher_name.setter
def network_watcher_name(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__6630cf64256211385e3c54e28577590e66daa2a4fd547ed3d861658e07d5dc0e)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "networkWatcherName", value)
@builtins.property
@jsii.member(jsii_name="resourceGroupName")
def resource_group_name(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "resourceGroupName"))
@resource_group_name.setter
def resource_group_name(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__8b7efcf5f5e45aa602b310f2e5a41df8160625659edb649dd8d31bcdc785f8bb)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "resourceGroupName", value)
@builtins.property
@jsii.member(jsii_name="targetResourceId")
def target_resource_id(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "targetResourceId"))
@target_resource_id.setter
def target_resource_id(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__ba1b06ae80fffa1b6f706d997c90792c2fcbf79fc71a61aecfa6e32d430776d2)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "targetResourceId", value)
@jsii.data_type(
jsii_type="@cdktf/provider-azurerm.networkPacketCapture.NetworkPacketCaptureConfig",
jsii_struct_bases=[_cdktf_9a9027ec.TerraformMetaArguments],
name_mapping={
"connection": "connection",
"count": "count",
"depends_on": "dependsOn",
"for_each": "forEach",
"lifecycle": "lifecycle",
"provider": "provider",
"provisioners": "provisioners",
"name": "name",
"network_watcher_name": "networkWatcherName",
"resource_group_name": "resourceGroupName",
"storage_location": "storageLocation",
"target_resource_id": "targetResourceId",
"filter": "filter",
"id": "id",
"maximum_bytes_per_packet": "maximumBytesPerPacket",
"maximum_bytes_per_session": "maximumBytesPerSession",
"maximum_capture_duration": "maximumCaptureDuration",
"timeouts": "timeouts",
},
)
class NetworkPacketCaptureConfig(_cdktf_9a9027ec.TerraformMetaArguments):
def __init__(
self,
*,
connection: typing.Optional[typing.Union[typing.Union[_cdktf_9a9027ec.SSHProvisionerConnection, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.WinrmProvisionerConnection, typing.Dict[builtins.str, typing.Any]]]] = None,
count: typing.Optional[typing.Union[jsii.Number, _cdktf_9a9027ec.TerraformCount]] = None,
depends_on: typing.Optional[typing.Sequence[_cdktf_9a9027ec.ITerraformDependable]] = None,
for_each: typing.Optional[_cdktf_9a9027ec.ITerraformIterator] = None,
lifecycle: typing.Optional[typing.Union[_cdktf_9a9027ec.TerraformResourceLifecycle, typing.Dict[builtins.str, typing.Any]]] = None,
provider: typing.Optional[_cdktf_9a9027ec.TerraformProvider] = None,
provisioners: typing.Optional[typing.Sequence[typing.Union[typing.Union[_cdktf_9a9027ec.FileProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.LocalExecProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.RemoteExecProvisioner, typing.Dict[builtins.str, typing.Any]]]]] = None,
name: builtins.str,
network_watcher_name: builtins.str,
resource_group_name: builtins.str,
storage_location: typing.Union["NetworkPacketCaptureStorageLocation", typing.Dict[builtins.str, typing.Any]],
target_resource_id: builtins.str,
filter: typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, typing.Sequence[typing.Union["NetworkPacketCaptureFilter", typing.Dict[builtins.str, typing.Any]]]]] = None,
id: typing.Optional[builtins.str] = None,
maximum_bytes_per_packet: typing.Optional[jsii.Number] = None,
maximum_bytes_per_session: typing.Optional[jsii.Number] = None,
maximum_capture_duration: typing.Optional[jsii.Number] = None,
timeouts: typing.Optional[typing.Union["NetworkPacketCaptureTimeouts", typing.Dict[builtins.str, typing.Any]]] = None,
) -> None:
'''
:param connection:
:param count:
:param depends_on:
:param for_each:
:param lifecycle:
:param provider:
:param provisioners:
:param name: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture#name NetworkPacketCapture#name}.
:param network_watcher_name: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture#network_watcher_name NetworkPacketCapture#network_watcher_name}.
:param resource_group_name: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture#resource_group_name NetworkPacketCapture#resource_group_name}.
:param storage_location: storage_location block. Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture#storage_location NetworkPacketCapture#storage_location}
:param target_resource_id: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture#target_resource_id NetworkPacketCapture#target_resource_id}.
:param filter: filter block. Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture#filter NetworkPacketCapture#filter}
:param id: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture#id NetworkPacketCapture#id}. Please be aware that the id field is automatically added to all resources in Terraform providers using a Terraform provider SDK version below 2. If you experience problems setting this value it might not be settable. Please take a look at the provider documentation to ensure it should be settable.
:param maximum_bytes_per_packet: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture#maximum_bytes_per_packet NetworkPacketCapture#maximum_bytes_per_packet}.
:param maximum_bytes_per_session: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture#maximum_bytes_per_session NetworkPacketCapture#maximum_bytes_per_session}.
:param maximum_capture_duration: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture#maximum_capture_duration NetworkPacketCapture#maximum_capture_duration}.
:param timeouts: timeouts block. Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture#timeouts NetworkPacketCapture#timeouts}
'''
if isinstance(lifecycle, dict):
lifecycle = _cdktf_9a9027ec.TerraformResourceLifecycle(**lifecycle)
if isinstance(storage_location, dict):
storage_location = NetworkPacketCaptureStorageLocation(**storage_location)
if isinstance(timeouts, dict):
timeouts = NetworkPacketCaptureTimeouts(**timeouts)
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__f441f1d3a462367d5d95747872c257dbad946ddffbac2c01eadb5035fc3d0548)
check_type(argname="argument connection", value=connection, expected_type=type_hints["connection"])
check_type(argname="argument count", value=count, expected_type=type_hints["count"])
check_type(argname="argument depends_on", value=depends_on, expected_type=type_hints["depends_on"])
check_type(argname="argument for_each", value=for_each, expected_type=type_hints["for_each"])
check_type(argname="argument lifecycle", value=lifecycle, expected_type=type_hints["lifecycle"])
check_type(argname="argument provider", value=provider, expected_type=type_hints["provider"])
check_type(argname="argument provisioners", value=provisioners, expected_type=type_hints["provisioners"])
check_type(argname="argument name", value=name, expected_type=type_hints["name"])
check_type(argname="argument network_watcher_name", value=network_watcher_name, expected_type=type_hints["network_watcher_name"])
check_type(argname="argument resource_group_name", value=resource_group_name, expected_type=type_hints["resource_group_name"])
check_type(argname="argument storage_location", value=storage_location, expected_type=type_hints["storage_location"])
check_type(argname="argument target_resource_id", value=target_resource_id, expected_type=type_hints["target_resource_id"])
check_type(argname="argument filter", value=filter, expected_type=type_hints["filter"])
check_type(argname="argument id", value=id, expected_type=type_hints["id"])
check_type(argname="argument maximum_bytes_per_packet", value=maximum_bytes_per_packet, expected_type=type_hints["maximum_bytes_per_packet"])
check_type(argname="argument maximum_bytes_per_session", value=maximum_bytes_per_session, expected_type=type_hints["maximum_bytes_per_session"])
check_type(argname="argument maximum_capture_duration", value=maximum_capture_duration, expected_type=type_hints["maximum_capture_duration"])
check_type(argname="argument timeouts", value=timeouts, expected_type=type_hints["timeouts"])
self._values: typing.Dict[builtins.str, typing.Any] = {
"name": name,
"network_watcher_name": network_watcher_name,
"resource_group_name": resource_group_name,
"storage_location": storage_location,
"target_resource_id": target_resource_id,
}
if connection is not None:
self._values["connection"] = connection
if count is not None:
self._values["count"] = count
if depends_on is not None:
self._values["depends_on"] = depends_on
if for_each is not None:
self._values["for_each"] = for_each
if lifecycle is not None:
self._values["lifecycle"] = lifecycle
if provider is not None:
self._values["provider"] = provider
if provisioners is not None:
self._values["provisioners"] = provisioners
if filter is not None:
self._values["filter"] = filter
if id is not None:
self._values["id"] = id
if maximum_bytes_per_packet is not None:
self._values["maximum_bytes_per_packet"] = maximum_bytes_per_packet
if maximum_bytes_per_session is not None:
self._values["maximum_bytes_per_session"] = maximum_bytes_per_session
if maximum_capture_duration is not None:
self._values["maximum_capture_duration"] = maximum_capture_duration
if timeouts is not None:
self._values["timeouts"] = timeouts
@builtins.property
def connection(
self,
) -> typing.Optional[typing.Union[_cdktf_9a9027ec.SSHProvisionerConnection, _cdktf_9a9027ec.WinrmProvisionerConnection]]:
'''
:stability: experimental
'''
result = self._values.get("connection")
return typing.cast(typing.Optional[typing.Union[_cdktf_9a9027ec.SSHProvisionerConnection, _cdktf_9a9027ec.WinrmProvisionerConnection]], result)
@builtins.property
def count(
self,
) -> typing.Optional[typing.Union[jsii.Number, _cdktf_9a9027ec.TerraformCount]]:
'''
:stability: experimental
'''
result = self._values.get("count")
return typing.cast(typing.Optional[typing.Union[jsii.Number, _cdktf_9a9027ec.TerraformCount]], result)
@builtins.property
def depends_on(
self,
) -> typing.Optional[typing.List[_cdktf_9a9027ec.ITerraformDependable]]:
'''
:stability: experimental
'''
result = self._values.get("depends_on")
return typing.cast(typing.Optional[typing.List[_cdktf_9a9027ec.ITerraformDependable]], result)
@builtins.property
def for_each(self) -> typing.Optional[_cdktf_9a9027ec.ITerraformIterator]:
'''
:stability: experimental
'''
result = self._values.get("for_each")
return typing.cast(typing.Optional[_cdktf_9a9027ec.ITerraformIterator], result)
@builtins.property
def lifecycle(self) -> typing.Optional[_cdktf_9a9027ec.TerraformResourceLifecycle]:
'''
:stability: experimental
'''
result = self._values.get("lifecycle")
return typing.cast(typing.Optional[_cdktf_9a9027ec.TerraformResourceLifecycle], result)
@builtins.property
def provider(self) -> typing.Optional[_cdktf_9a9027ec.TerraformProvider]:
'''
:stability: experimental
'''
result = self._values.get("provider")
return typing.cast(typing.Optional[_cdktf_9a9027ec.TerraformProvider], result)
@builtins.property
def provisioners(
self,
) -> typing.Optional[typing.List[typing.Union[_cdktf_9a9027ec.FileProvisioner, _cdktf_9a9027ec.LocalExecProvisioner, _cdktf_9a9027ec.RemoteExecProvisioner]]]:
'''
:stability: experimental
'''
result = self._values.get("provisioners")
return typing.cast(typing.Optional[typing.List[typing.Union[_cdktf_9a9027ec.FileProvisioner, _cdktf_9a9027ec.LocalExecProvisioner, _cdktf_9a9027ec.RemoteExecProvisioner]]], result)
@builtins.property
def name(self) -> builtins.str:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture#name NetworkPacketCapture#name}.'''
result = self._values.get("name")
assert result is not None, "Required property 'name' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def network_watcher_name(self) -> builtins.str:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture#network_watcher_name NetworkPacketCapture#network_watcher_name}.'''
result = self._values.get("network_watcher_name")
assert result is not None, "Required property 'network_watcher_name' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def resource_group_name(self) -> builtins.str:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture#resource_group_name NetworkPacketCapture#resource_group_name}.'''
result = self._values.get("resource_group_name")
assert result is not None, "Required property 'resource_group_name' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def storage_location(self) -> "NetworkPacketCaptureStorageLocation":
'''storage_location block.
Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture#storage_location NetworkPacketCapture#storage_location}
'''
result = self._values.get("storage_location")
assert result is not None, "Required property 'storage_location' is missing"
return typing.cast("NetworkPacketCaptureStorageLocation", result)
@builtins.property
def target_resource_id(self) -> builtins.str:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture#target_resource_id NetworkPacketCapture#target_resource_id}.'''
result = self._values.get("target_resource_id")
assert result is not None, "Required property 'target_resource_id' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def filter(
self,
) -> typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, typing.List["NetworkPacketCaptureFilter"]]]:
'''filter block.
Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture#filter NetworkPacketCapture#filter}
'''
result = self._values.get("filter")
return typing.cast(typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, typing.List["NetworkPacketCaptureFilter"]]], result)
@builtins.property
def id(self) -> typing.Optional[builtins.str]:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture#id NetworkPacketCapture#id}.
Please be aware that the id field is automatically added to all resources in Terraform providers using a Terraform provider SDK version below 2.
If you experience problems setting this value it might not be settable. Please take a look at the provider documentation to ensure it should be settable.
'''
result = self._values.get("id")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def maximum_bytes_per_packet(self) -> typing.Optional[jsii.Number]:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture#maximum_bytes_per_packet NetworkPacketCapture#maximum_bytes_per_packet}.'''
result = self._values.get("maximum_bytes_per_packet")
return typing.cast(typing.Optional[jsii.Number], result)
@builtins.property
def maximum_bytes_per_session(self) -> typing.Optional[jsii.Number]:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture#maximum_bytes_per_session NetworkPacketCapture#maximum_bytes_per_session}.'''
result = self._values.get("maximum_bytes_per_session")
return typing.cast(typing.Optional[jsii.Number], result)
@builtins.property
def maximum_capture_duration(self) -> typing.Optional[jsii.Number]:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture#maximum_capture_duration NetworkPacketCapture#maximum_capture_duration}.'''
result = self._values.get("maximum_capture_duration")
return typing.cast(typing.Optional[jsii.Number], result)
@builtins.property
def timeouts(self) -> typing.Optional["NetworkPacketCaptureTimeouts"]:
'''timeouts block.
Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture#timeouts NetworkPacketCapture#timeouts}
'''
result = self._values.get("timeouts")
return typing.cast(typing.Optional["NetworkPacketCaptureTimeouts"], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "NetworkPacketCaptureConfig(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="@cdktf/provider-azurerm.networkPacketCapture.NetworkPacketCaptureFilter",
jsii_struct_bases=[],
name_mapping={
"protocol": "protocol",
"local_ip_address": "localIpAddress",
"local_port": "localPort",
"remote_ip_address": "remoteIpAddress",
"remote_port": "remotePort",
},
)
class NetworkPacketCaptureFilter:
def __init__(
self,
*,
protocol: builtins.str,
local_ip_address: typing.Optional[builtins.str] = None,
local_port: typing.Optional[builtins.str] = None,
remote_ip_address: typing.Optional[builtins.str] = None,
remote_port: typing.Optional[builtins.str] = None,
) -> None:
'''
:param protocol: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture#protocol NetworkPacketCapture#protocol}.
:param local_ip_address: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture#local_ip_address NetworkPacketCapture#local_ip_address}.
:param local_port: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture#local_port NetworkPacketCapture#local_port}.
:param remote_ip_address: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture#remote_ip_address NetworkPacketCapture#remote_ip_address}.
:param remote_port: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture#remote_port NetworkPacketCapture#remote_port}.
'''
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__da45bcfdcd79e1f418a2fa7505c82dbfb1abc265032bcee65775f04e92d3422b)
check_type(argname="argument protocol", value=protocol, expected_type=type_hints["protocol"])
check_type(argname="argument local_ip_address", value=local_ip_address, expected_type=type_hints["local_ip_address"])
check_type(argname="argument local_port", value=local_port, expected_type=type_hints["local_port"])
check_type(argname="argument remote_ip_address", value=remote_ip_address, expected_type=type_hints["remote_ip_address"])
check_type(argname="argument remote_port", value=remote_port, expected_type=type_hints["remote_port"])
self._values: typing.Dict[builtins.str, typing.Any] = {
"protocol": protocol,
}
if local_ip_address is not None:
self._values["local_ip_address"] = local_ip_address
if local_port is not None:
self._values["local_port"] = local_port
if remote_ip_address is not None:
self._values["remote_ip_address"] = remote_ip_address
if remote_port is not None:
self._values["remote_port"] = remote_port
@builtins.property
def protocol(self) -> builtins.str:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture#protocol NetworkPacketCapture#protocol}.'''
result = self._values.get("protocol")
assert result is not None, "Required property 'protocol' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def local_ip_address(self) -> typing.Optional[builtins.str]:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture#local_ip_address NetworkPacketCapture#local_ip_address}.'''
result = self._values.get("local_ip_address")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def local_port(self) -> typing.Optional[builtins.str]:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture#local_port NetworkPacketCapture#local_port}.'''
result = self._values.get("local_port")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def remote_ip_address(self) -> typing.Optional[builtins.str]:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture#remote_ip_address NetworkPacketCapture#remote_ip_address}.'''
result = self._values.get("remote_ip_address")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def remote_port(self) -> typing.Optional[builtins.str]:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture#remote_port NetworkPacketCapture#remote_port}.'''
result = self._values.get("remote_port")
return typing.cast(typing.Optional[builtins.str], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "NetworkPacketCaptureFilter(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
class NetworkPacketCaptureFilterList(
_cdktf_9a9027ec.ComplexList,
metaclass=jsii.JSIIMeta,
jsii_type="@cdktf/provider-azurerm.networkPacketCapture.NetworkPacketCaptureFilterList",
):
def __init__(
self,
terraform_resource: _cdktf_9a9027ec.IInterpolatingParent,
terraform_attribute: builtins.str,
wraps_set: builtins.bool,
) -> None:
'''
:param terraform_resource: The parent resource.
:param terraform_attribute: The attribute on the parent resource this class is referencing.
:param wraps_set: whether the list is wrapping a set (will add tolist() to be able to access an item via an index).
'''
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__4c91cdb38743ef46cdf132089a9266973146c4b908df11ca69506b5e924e6b81)
check_type(argname="argument terraform_resource", value=terraform_resource, expected_type=type_hints["terraform_resource"])
check_type(argname="argument terraform_attribute", value=terraform_attribute, expected_type=type_hints["terraform_attribute"])
check_type(argname="argument wraps_set", value=wraps_set, expected_type=type_hints["wraps_set"])
jsii.create(self.__class__, self, [terraform_resource, terraform_attribute, wraps_set])
@jsii.member(jsii_name="get")
def get(self, index: jsii.Number) -> "NetworkPacketCaptureFilterOutputReference":
'''
:param index: the index of the item to return.
'''
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__79b838064d7e316be2e6230d158c7e147d05dfa05034ce6e8c70da500138834e)
check_type(argname="argument index", value=index, expected_type=type_hints["index"])
return typing.cast("NetworkPacketCaptureFilterOutputReference", jsii.invoke(self, "get", [index]))
@builtins.property
@jsii.member(jsii_name="terraformAttribute")
def _terraform_attribute(self) -> builtins.str:
'''The attribute on the parent resource this class is referencing.'''
return typing.cast(builtins.str, jsii.get(self, "terraformAttribute"))
@_terraform_attribute.setter
def _terraform_attribute(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__3a3512daa04257364aa75569bac8144a4685c014d33294caeb1a08c75f90c3d1)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "terraformAttribute", value)
@builtins.property
@jsii.member(jsii_name="terraformResource")
def _terraform_resource(self) -> _cdktf_9a9027ec.IInterpolatingParent:
'''The parent resource.'''
return typing.cast(_cdktf_9a9027ec.IInterpolatingParent, jsii.get(self, "terraformResource"))
@_terraform_resource.setter
def _terraform_resource(self, value: _cdktf_9a9027ec.IInterpolatingParent) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__4f35107cd448983d59b071898ab9afd4d98255279203811bd0bdf958c6af081a)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "terraformResource", value)
@builtins.property
@jsii.member(jsii_name="wrapsSet")
def _wraps_set(self) -> builtins.bool:
'''whether the list is wrapping a set (will add tolist() to be able to access an item via an index).'''
return typing.cast(builtins.bool, jsii.get(self, "wrapsSet"))
@_wraps_set.setter
def _wraps_set(self, value: builtins.bool) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__9096916dd6149d057c0e800cdbd105afe1ba52f6640a328157617897df256bee)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "wrapsSet", value)
@builtins.property
@jsii.member(jsii_name="internalValue")
def internal_value(
self,
) -> typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, typing.List[NetworkPacketCaptureFilter]]]:
return typing.cast(typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, typing.List[NetworkPacketCaptureFilter]]], jsii.get(self, "internalValue"))
@internal_value.setter
def internal_value(
self,
value: typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, typing.List[NetworkPacketCaptureFilter]]],
) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__62f0660b575e48a5b9112b879fe57a158eb1b391f5bc140e39bb2f6a967f076f)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "internalValue", value)
class NetworkPacketCaptureFilterOutputReference(
_cdktf_9a9027ec.ComplexObject,
metaclass=jsii.JSIIMeta,
jsii_type="@cdktf/provider-azurerm.networkPacketCapture.NetworkPacketCaptureFilterOutputReference",
):
def __init__(
self,
terraform_resource: _cdktf_9a9027ec.IInterpolatingParent,
terraform_attribute: builtins.str,
complex_object_index: jsii.Number,
complex_object_is_from_set: builtins.bool,
) -> None:
'''
:param terraform_resource: The parent resource.
:param terraform_attribute: The attribute on the parent resource this class is referencing.
:param complex_object_index: the index of this item in the list.
:param complex_object_is_from_set: whether the list is wrapping a set (will add tolist() to be able to access an item via an index).
'''
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__a1e96b0fec5cda5787ea563ea9bab08ce5b06a4b650db14ac05fd6c579643d3a)
check_type(argname="argument terraform_resource", value=terraform_resource, expected_type=type_hints["terraform_resource"])
check_type(argname="argument terraform_attribute", value=terraform_attribute, expected_type=type_hints["terraform_attribute"])
check_type(argname="argument complex_object_index", value=complex_object_index, expected_type=type_hints["complex_object_index"])
check_type(argname="argument complex_object_is_from_set", value=complex_object_is_from_set, expected_type=type_hints["complex_object_is_from_set"])
jsii.create(self.__class__, self, [terraform_resource, terraform_attribute, complex_object_index, complex_object_is_from_set])
@jsii.member(jsii_name="resetLocalIpAddress")
def reset_local_ip_address(self) -> None:
return typing.cast(None, jsii.invoke(self, "resetLocalIpAddress", []))
@jsii.member(jsii_name="resetLocalPort")
def reset_local_port(self) -> None:
return typing.cast(None, jsii.invoke(self, "resetLocalPort", []))
@jsii.member(jsii_name="resetRemoteIpAddress")
def reset_remote_ip_address(self) -> None:
return typing.cast(None, jsii.invoke(self, "resetRemoteIpAddress", []))
@jsii.member(jsii_name="resetRemotePort")
def reset_remote_port(self) -> None:
return typing.cast(None, jsii.invoke(self, "resetRemotePort", []))
@builtins.property
@jsii.member(jsii_name="localIpAddressInput")
def local_ip_address_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "localIpAddressInput"))
@builtins.property
@jsii.member(jsii_name="localPortInput")
def local_port_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "localPortInput"))
@builtins.property
@jsii.member(jsii_name="protocolInput")
def protocol_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "protocolInput"))
@builtins.property
@jsii.member(jsii_name="remoteIpAddressInput")
def remote_ip_address_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "remoteIpAddressInput"))
@builtins.property
@jsii.member(jsii_name="remotePortInput")
def remote_port_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "remotePortInput"))
@builtins.property
@jsii.member(jsii_name="localIpAddress")
def local_ip_address(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "localIpAddress"))
@local_ip_address.setter
def local_ip_address(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__f08866833e3211ae715f252399d1714e0678492831227a17c73795978d2378ef)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "localIpAddress", value)
@builtins.property
@jsii.member(jsii_name="localPort")
def local_port(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "localPort"))
@local_port.setter
def local_port(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__51ccb60af3887536dd228728f7f86fdbb3049b03686001183e975ddf19ab3527)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "localPort", value)
@builtins.property
@jsii.member(jsii_name="protocol")
def protocol(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "protocol"))
@protocol.setter
def protocol(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__d18b360b592115d2bb9b1f1ebd0093ac85aa84f2bfc49541d9f9d9dbbc31d99d)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "protocol", value)
@builtins.property
@jsii.member(jsii_name="remoteIpAddress")
def remote_ip_address(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "remoteIpAddress"))
@remote_ip_address.setter
def remote_ip_address(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__7598ede225c62e9616c880db4f3b6150793266318d1739fc6ba1072bb907c42d)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "remoteIpAddress", value)
@builtins.property
@jsii.member(jsii_name="remotePort")
def remote_port(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "remotePort"))
@remote_port.setter
def remote_port(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__80bc2f5cb67a625ab30bb65d04eade7b2fa661d58174e113c15dc712f34a4eb6)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "remotePort", value)
@builtins.property
@jsii.member(jsii_name="internalValue")
def internal_value(
self,
) -> typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, NetworkPacketCaptureFilter]]:
return typing.cast(typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, NetworkPacketCaptureFilter]], jsii.get(self, "internalValue"))
@internal_value.setter
def internal_value(
self,
value: typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, NetworkPacketCaptureFilter]],
) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__4f19eaddce51aa3f579af6615b4f84aa1572abd89a8afb6bea40ef8c2563c95d)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "internalValue", value)
@jsii.data_type(
jsii_type="@cdktf/provider-azurerm.networkPacketCapture.NetworkPacketCaptureStorageLocation",
jsii_struct_bases=[],
name_mapping={"file_path": "filePath", "storage_account_id": "storageAccountId"},
)
class NetworkPacketCaptureStorageLocation:
def __init__(
self,
*,
file_path: typing.Optional[builtins.str] = None,
storage_account_id: typing.Optional[builtins.str] = None,
) -> None:
'''
:param file_path: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture#file_path NetworkPacketCapture#file_path}.
:param storage_account_id: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture#storage_account_id NetworkPacketCapture#storage_account_id}.
'''
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__8020dc0f78df468d7e674d0f07570fd939e966223c6b504a1da382ee374ede1f)
check_type(argname="argument file_path", value=file_path, expected_type=type_hints["file_path"])
check_type(argname="argument storage_account_id", value=storage_account_id, expected_type=type_hints["storage_account_id"])
self._values: typing.Dict[builtins.str, typing.Any] = {}
if file_path is not None:
self._values["file_path"] = file_path
if storage_account_id is not None:
self._values["storage_account_id"] = storage_account_id
@builtins.property
def file_path(self) -> typing.Optional[builtins.str]:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture#file_path NetworkPacketCapture#file_path}.'''
result = self._values.get("file_path")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def storage_account_id(self) -> typing.Optional[builtins.str]:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture#storage_account_id NetworkPacketCapture#storage_account_id}.'''
result = self._values.get("storage_account_id")
return typing.cast(typing.Optional[builtins.str], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "NetworkPacketCaptureStorageLocation(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
class NetworkPacketCaptureStorageLocationOutputReference(
_cdktf_9a9027ec.ComplexObject,
metaclass=jsii.JSIIMeta,
jsii_type="@cdktf/provider-azurerm.networkPacketCapture.NetworkPacketCaptureStorageLocationOutputReference",
):
def __init__(
self,
terraform_resource: _cdktf_9a9027ec.IInterpolatingParent,
terraform_attribute: builtins.str,
) -> None:
'''
:param terraform_resource: The parent resource.
:param terraform_attribute: The attribute on the parent resource this class is referencing.
'''
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__4e196c56aa5e6aa90476a6f23581336043155b894f471f10f1132fe9e6cf980b)
check_type(argname="argument terraform_resource", value=terraform_resource, expected_type=type_hints["terraform_resource"])
check_type(argname="argument terraform_attribute", value=terraform_attribute, expected_type=type_hints["terraform_attribute"])
jsii.create(self.__class__, self, [terraform_resource, terraform_attribute])
@jsii.member(jsii_name="resetFilePath")
def reset_file_path(self) -> None:
return typing.cast(None, jsii.invoke(self, "resetFilePath", []))
@jsii.member(jsii_name="resetStorageAccountId")
def reset_storage_account_id(self) -> None:
return typing.cast(None, jsii.invoke(self, "resetStorageAccountId", []))
@builtins.property
@jsii.member(jsii_name="storagePath")
def storage_path(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "storagePath"))
@builtins.property
@jsii.member(jsii_name="filePathInput")
def file_path_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "filePathInput"))
@builtins.property
@jsii.member(jsii_name="storageAccountIdInput")
def storage_account_id_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "storageAccountIdInput"))
@builtins.property
@jsii.member(jsii_name="filePath")
def file_path(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "filePath"))
@file_path.setter
def file_path(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__ff208d361e82542ec7072d9a2536ec103ec462a9f1b997966d38436c855670e6)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "filePath", value)
@builtins.property
@jsii.member(jsii_name="storageAccountId")
def storage_account_id(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "storageAccountId"))
@storage_account_id.setter
def storage_account_id(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__b3418f81bbb8993a3abe430463118a8defa1be1b4ffe6ea7ffe4a817d761225e)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "storageAccountId", value)
@builtins.property
@jsii.member(jsii_name="internalValue")
def internal_value(self) -> typing.Optional[NetworkPacketCaptureStorageLocation]:
return typing.cast(typing.Optional[NetworkPacketCaptureStorageLocation], jsii.get(self, "internalValue"))
@internal_value.setter
def internal_value(
self,
value: typing.Optional[NetworkPacketCaptureStorageLocation],
) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__ee20792259c82c88664c2c153e62bdf2cae1b92c87facfdb6c81777890d4f418)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "internalValue", value)
@jsii.data_type(
jsii_type="@cdktf/provider-azurerm.networkPacketCapture.NetworkPacketCaptureTimeouts",
jsii_struct_bases=[],
name_mapping={
"create": "create",
"delete": "delete",
"read": "read",
"update": "update",
},
)
class NetworkPacketCaptureTimeouts:
def __init__(
self,
*,
create: typing.Optional[builtins.str] = None,
delete: typing.Optional[builtins.str] = None,
read: typing.Optional[builtins.str] = None,
update: typing.Optional[builtins.str] = None,
) -> None:
'''
:param create: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture#create NetworkPacketCapture#create}.
:param delete: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture#delete NetworkPacketCapture#delete}.
:param read: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture#read NetworkPacketCapture#read}.
:param update: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture#update NetworkPacketCapture#update}.
'''
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__0647fbcbb6b70d279835b3c91c8164f2a9cf16a4a1e97ddbe7b07239dfd31b59)
check_type(argname="argument create", value=create, expected_type=type_hints["create"])
check_type(argname="argument delete", value=delete, expected_type=type_hints["delete"])
check_type(argname="argument read", value=read, expected_type=type_hints["read"])
check_type(argname="argument update", value=update, expected_type=type_hints["update"])
self._values: typing.Dict[builtins.str, typing.Any] = {}
if create is not None:
self._values["create"] = create
if delete is not None:
self._values["delete"] = delete
if read is not None:
self._values["read"] = read
if update is not None:
self._values["update"] = update
@builtins.property
def create(self) -> typing.Optional[builtins.str]:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture#create NetworkPacketCapture#create}.'''
result = self._values.get("create")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def delete(self) -> typing.Optional[builtins.str]:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture#delete NetworkPacketCapture#delete}.'''
result = self._values.get("delete")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def read(self) -> typing.Optional[builtins.str]:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture#read NetworkPacketCapture#read}.'''
result = self._values.get("read")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def update(self) -> typing.Optional[builtins.str]:
'''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/azurerm/3.71.0/docs/resources/network_packet_capture#update NetworkPacketCapture#update}.'''
result = self._values.get("update")
return typing.cast(typing.Optional[builtins.str], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "NetworkPacketCaptureTimeouts(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
class NetworkPacketCaptureTimeoutsOutputReference(
_cdktf_9a9027ec.ComplexObject,
metaclass=jsii.JSIIMeta,
jsii_type="@cdktf/provider-azurerm.networkPacketCapture.NetworkPacketCaptureTimeoutsOutputReference",
):
def __init__(
self,
terraform_resource: _cdktf_9a9027ec.IInterpolatingParent,
terraform_attribute: builtins.str,
) -> None:
'''
:param terraform_resource: The parent resource.
:param terraform_attribute: The attribute on the parent resource this class is referencing.
'''
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__e995e77a473a49e0ade032ed29bc4e053767c1ac6278265f472f34c65cf0ea66)
check_type(argname="argument terraform_resource", value=terraform_resource, expected_type=type_hints["terraform_resource"])
check_type(argname="argument terraform_attribute", value=terraform_attribute, expected_type=type_hints["terraform_attribute"])
jsii.create(self.__class__, self, [terraform_resource, terraform_attribute])
@jsii.member(jsii_name="resetCreate")
def reset_create(self) -> None:
return typing.cast(None, jsii.invoke(self, "resetCreate", []))
@jsii.member(jsii_name="resetDelete")
def reset_delete(self) -> None:
return typing.cast(None, jsii.invoke(self, "resetDelete", []))
@jsii.member(jsii_name="resetRead")
def reset_read(self) -> None:
return typing.cast(None, jsii.invoke(self, "resetRead", []))
@jsii.member(jsii_name="resetUpdate")
def reset_update(self) -> None:
return typing.cast(None, jsii.invoke(self, "resetUpdate", []))
@builtins.property
@jsii.member(jsii_name="createInput")
def create_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "createInput"))
@builtins.property
@jsii.member(jsii_name="deleteInput")
def delete_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "deleteInput"))
@builtins.property
@jsii.member(jsii_name="readInput")
def read_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "readInput"))
@builtins.property
@jsii.member(jsii_name="updateInput")
def update_input(self) -> typing.Optional[builtins.str]:
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "updateInput"))
@builtins.property
@jsii.member(jsii_name="create")
def create(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "create"))
@create.setter
def create(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__11d6e4ef3e6985da5c8209af6043130b83b3e531dc3b5ee8f95796a809a39643)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "create", value)
@builtins.property
@jsii.member(jsii_name="delete")
def delete(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "delete"))
@delete.setter
def delete(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__6474c23cf6eaaa6a6d27e160eeb87d3f56c1d28762ce44029d258e44e8e981a6)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "delete", value)
@builtins.property
@jsii.member(jsii_name="read")
def read(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "read"))
@read.setter
def read(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__ca81b8f5ed4945ede1cb27b3ac3c5f7e402272700d6cca3c44e6d5fb3807fd1f)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "read", value)
@builtins.property
@jsii.member(jsii_name="update")
def update(self) -> builtins.str:
return typing.cast(builtins.str, jsii.get(self, "update"))
@update.setter
def update(self, value: builtins.str) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__35502a9922f08fab946b473a25e90c9e6485cbfbbb892da2ba50d5c6fab16e41)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "update", value)
@builtins.property
@jsii.member(jsii_name="internalValue")
def internal_value(
self,
) -> typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, NetworkPacketCaptureTimeouts]]:
return typing.cast(typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, NetworkPacketCaptureTimeouts]], jsii.get(self, "internalValue"))
@internal_value.setter
def internal_value(
self,
value: typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, NetworkPacketCaptureTimeouts]],
) -> None:
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__b54e705e51220ff5514b630ed1e465781de05b2dbebe8352836d16697429761d)
check_type(argname="argument value", value=value, expected_type=type_hints["value"])
jsii.set(self, "internalValue", value)
__all__ = [
"NetworkPacketCapture",
"NetworkPacketCaptureConfig",
"NetworkPacketCaptureFilter",
"NetworkPacketCaptureFilterList",
"NetworkPacketCaptureFilterOutputReference",
"NetworkPacketCaptureStorageLocation",
"NetworkPacketCaptureStorageLocationOutputReference",
"NetworkPacketCaptureTimeouts",
"NetworkPacketCaptureTimeoutsOutputReference",
]
publication.publish()
def _typecheckingstub__b5e3c560147495e2d8f597dee2fe43afe5db4036c6bca2f7920f0360f303c119(
scope: _constructs_77d1e7e8.Construct,
id_: builtins.str,
*,
name: builtins.str,
network_watcher_name: builtins.str,
resource_group_name: builtins.str,
storage_location: typing.Union[NetworkPacketCaptureStorageLocation, typing.Dict[builtins.str, typing.Any]],
target_resource_id: builtins.str,
filter: typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, typing.Sequence[typing.Union[NetworkPacketCaptureFilter, typing.Dict[builtins.str, typing.Any]]]]] = None,
id: typing.Optional[builtins.str] = None,
maximum_bytes_per_packet: typing.Optional[jsii.Number] = None,
maximum_bytes_per_session: typing.Optional[jsii.Number] = None,
maximum_capture_duration: typing.Optional[jsii.Number] = None,
timeouts: typing.Optional[typing.Union[NetworkPacketCaptureTimeouts, typing.Dict[builtins.str, typing.Any]]] = None,
connection: typing.Optional[typing.Union[typing.Union[_cdktf_9a9027ec.SSHProvisionerConnection, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.WinrmProvisionerConnection, typing.Dict[builtins.str, typing.Any]]]] = None,
count: typing.Optional[typing.Union[jsii.Number, _cdktf_9a9027ec.TerraformCount]] = None,
depends_on: typing.Optional[typing.Sequence[_cdktf_9a9027ec.ITerraformDependable]] = None,
for_each: typing.Optional[_cdktf_9a9027ec.ITerraformIterator] = None,
lifecycle: typing.Optional[typing.Union[_cdktf_9a9027ec.TerraformResourceLifecycle, typing.Dict[builtins.str, typing.Any]]] = None,
provider: typing.Optional[_cdktf_9a9027ec.TerraformProvider] = None,
provisioners: typing.Optional[typing.Sequence[typing.Union[typing.Union[_cdktf_9a9027ec.FileProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.LocalExecProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.RemoteExecProvisioner, typing.Dict[builtins.str, typing.Any]]]]] = None,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__cb4ba221b9ca64b230a733aa5bfd6a28b7522ea446cc0789118802663ffdac5d(
value: typing.Union[_cdktf_9a9027ec.IResolvable, typing.Sequence[typing.Union[NetworkPacketCaptureFilter, typing.Dict[builtins.str, typing.Any]]]],
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__783e646ebd7eb73eecf6acb7fce7b21ee9b735453423f72e234851da341cc9d3(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__287f86a37cb9fc29e750d0a9d9edb64d455e57da69a7198d83951a5345c65b1a(
value: jsii.Number,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__ac1100b4fb8058bc9a91b4970dd24478ffe1eeb699fefa69e255e8240a945aca(
value: jsii.Number,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__8836dc30a0d04488def4d0f0573cb7b2b3d70695ba6c4c120fd6a3fee08c5777(
value: jsii.Number,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__2dad01ceec066300f106ed24514b032df161bd9705dcbb3cf099b26647f1c5d7(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__6630cf64256211385e3c54e28577590e66daa2a4fd547ed3d861658e07d5dc0e(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__8b7efcf5f5e45aa602b310f2e5a41df8160625659edb649dd8d31bcdc785f8bb(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__ba1b06ae80fffa1b6f706d997c90792c2fcbf79fc71a61aecfa6e32d430776d2(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__f441f1d3a462367d5d95747872c257dbad946ddffbac2c01eadb5035fc3d0548(
*,
connection: typing.Optional[typing.Union[typing.Union[_cdktf_9a9027ec.SSHProvisionerConnection, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.WinrmProvisionerConnection, typing.Dict[builtins.str, typing.Any]]]] = None,
count: typing.Optional[typing.Union[jsii.Number, _cdktf_9a9027ec.TerraformCount]] = None,
depends_on: typing.Optional[typing.Sequence[_cdktf_9a9027ec.ITerraformDependable]] = None,
for_each: typing.Optional[_cdktf_9a9027ec.ITerraformIterator] = None,
lifecycle: typing.Optional[typing.Union[_cdktf_9a9027ec.TerraformResourceLifecycle, typing.Dict[builtins.str, typing.Any]]] = None,
provider: typing.Optional[_cdktf_9a9027ec.TerraformProvider] = None,
provisioners: typing.Optional[typing.Sequence[typing.Union[typing.Union[_cdktf_9a9027ec.FileProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.LocalExecProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.RemoteExecProvisioner, typing.Dict[builtins.str, typing.Any]]]]] = None,
name: builtins.str,
network_watcher_name: builtins.str,
resource_group_name: builtins.str,
storage_location: typing.Union[NetworkPacketCaptureStorageLocation, typing.Dict[builtins.str, typing.Any]],
target_resource_id: builtins.str,
filter: typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, typing.Sequence[typing.Union[NetworkPacketCaptureFilter, typing.Dict[builtins.str, typing.Any]]]]] = None,
id: typing.Optional[builtins.str] = None,
maximum_bytes_per_packet: typing.Optional[jsii.Number] = None,
maximum_bytes_per_session: typing.Optional[jsii.Number] = None,
maximum_capture_duration: typing.Optional[jsii.Number] = None,
timeouts: typing.Optional[typing.Union[NetworkPacketCaptureTimeouts, typing.Dict[builtins.str, typing.Any]]] = None,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__da45bcfdcd79e1f418a2fa7505c82dbfb1abc265032bcee65775f04e92d3422b(
*,
protocol: builtins.str,
local_ip_address: typing.Optional[builtins.str] = None,
local_port: typing.Optional[builtins.str] = None,
remote_ip_address: typing.Optional[builtins.str] = None,
remote_port: typing.Optional[builtins.str] = None,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__4c91cdb38743ef46cdf132089a9266973146c4b908df11ca69506b5e924e6b81(
terraform_resource: _cdktf_9a9027ec.IInterpolatingParent,
terraform_attribute: builtins.str,
wraps_set: builtins.bool,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__79b838064d7e316be2e6230d158c7e147d05dfa05034ce6e8c70da500138834e(
index: jsii.Number,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__3a3512daa04257364aa75569bac8144a4685c014d33294caeb1a08c75f90c3d1(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__4f35107cd448983d59b071898ab9afd4d98255279203811bd0bdf958c6af081a(
value: _cdktf_9a9027ec.IInterpolatingParent,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__9096916dd6149d057c0e800cdbd105afe1ba52f6640a328157617897df256bee(
value: builtins.bool,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__62f0660b575e48a5b9112b879fe57a158eb1b391f5bc140e39bb2f6a967f076f(
value: typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, typing.List[NetworkPacketCaptureFilter]]],
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__a1e96b0fec5cda5787ea563ea9bab08ce5b06a4b650db14ac05fd6c579643d3a(
terraform_resource: _cdktf_9a9027ec.IInterpolatingParent,
terraform_attribute: builtins.str,
complex_object_index: jsii.Number,
complex_object_is_from_set: builtins.bool,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__f08866833e3211ae715f252399d1714e0678492831227a17c73795978d2378ef(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__51ccb60af3887536dd228728f7f86fdbb3049b03686001183e975ddf19ab3527(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__d18b360b592115d2bb9b1f1ebd0093ac85aa84f2bfc49541d9f9d9dbbc31d99d(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__7598ede225c62e9616c880db4f3b6150793266318d1739fc6ba1072bb907c42d(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__80bc2f5cb67a625ab30bb65d04eade7b2fa661d58174e113c15dc712f34a4eb6(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__4f19eaddce51aa3f579af6615b4f84aa1572abd89a8afb6bea40ef8c2563c95d(
value: typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, NetworkPacketCaptureFilter]],
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__8020dc0f78df468d7e674d0f07570fd939e966223c6b504a1da382ee374ede1f(
*,
file_path: typing.Optional[builtins.str] = None,
storage_account_id: typing.Optional[builtins.str] = None,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__4e196c56aa5e6aa90476a6f23581336043155b894f471f10f1132fe9e6cf980b(
terraform_resource: _cdktf_9a9027ec.IInterpolatingParent,
terraform_attribute: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__ff208d361e82542ec7072d9a2536ec103ec462a9f1b997966d38436c855670e6(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__b3418f81bbb8993a3abe430463118a8defa1be1b4ffe6ea7ffe4a817d761225e(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__ee20792259c82c88664c2c153e62bdf2cae1b92c87facfdb6c81777890d4f418(
value: typing.Optional[NetworkPacketCaptureStorageLocation],
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__0647fbcbb6b70d279835b3c91c8164f2a9cf16a4a1e97ddbe7b07239dfd31b59(
*,
create: typing.Optional[builtins.str] = None,
delete: typing.Optional[builtins.str] = None,
read: typing.Optional[builtins.str] = None,
update: typing.Optional[builtins.str] = None,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__e995e77a473a49e0ade032ed29bc4e053767c1ac6278265f472f34c65cf0ea66(
terraform_resource: _cdktf_9a9027ec.IInterpolatingParent,
terraform_attribute: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__11d6e4ef3e6985da5c8209af6043130b83b3e531dc3b5ee8f95796a809a39643(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__6474c23cf6eaaa6a6d27e160eeb87d3f56c1d28762ce44029d258e44e8e981a6(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__ca81b8f5ed4945ede1cb27b3ac3c5f7e402272700d6cca3c44e6d5fb3807fd1f(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__35502a9922f08fab946b473a25e90c9e6485cbfbbb892da2ba50d5c6fab16e41(
value: builtins.str,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__b54e705e51220ff5514b630ed1e465781de05b2dbebe8352836d16697429761d(
value: typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, NetworkPacketCaptureTimeouts]],
) -> None:
"""Type checking stubs"""
pass | PypiClean |
/gs2-cdk-1.0.22.tar.gz/gs2-cdk-1.0.22/src/gs2_cdk/lottery/model/Namespace.py | from __future__ import annotations
from typing import *
from ...core.model import CdkResource, Stack
from ...core.func import GetAttr
from ...core.model import TransactionSetting
from ...core.model import LogSetting
from ..ref.NamespaceRef import NamespaceRef
from .CurrentMasterData import CurrentMasterData
from .LotteryModel import LotteryModel
from .PrizeTable import PrizeTable
from .options.NamespaceOptions import NamespaceOptions
class Namespace(CdkResource):
stack: Stack
name: str
transaction_setting: TransactionSetting
description: Optional[str] = None
lottery_trigger_script_id: Optional[str] = None
choice_prize_table_script_id: Optional[str] = None
log_setting: Optional[LogSetting] = None
def __init__(
self,
stack: Stack,
name: str,
transaction_setting: TransactionSetting,
options: Optional[NamespaceOptions] = NamespaceOptions(),
):
super().__init__(
"Lottery_Namespace_" + name
)
self.stack = stack
self.name = name
self.transaction_setting = transaction_setting
self.description = options.description if options.description else None
self.lottery_trigger_script_id = options.lottery_trigger_script_id if options.lottery_trigger_script_id else None
self.choice_prize_table_script_id = options.choice_prize_table_script_id if options.choice_prize_table_script_id else None
self.log_setting = options.log_setting if options.log_setting else None
stack.add_resource(
self,
)
def alternate_keys(
self,
):
return "name"
def resource_type(
self,
) -> str:
return "GS2::Lottery::Namespace"
def properties(
self,
) -> Dict[str, Any]:
properties: Dict[str, Any] = {}
if self.name is not None:
properties["Name"] = self.name
if self.description is not None:
properties["Description"] = self.description
if self.transaction_setting is not None:
properties["TransactionSetting"] = self.transaction_setting.properties(
)
if self.lottery_trigger_script_id is not None:
properties["LotteryTriggerScriptId"] = self.lottery_trigger_script_id
if self.choice_prize_table_script_id is not None:
properties["ChoicePrizeTableScriptId"] = self.choice_prize_table_script_id
if self.log_setting is not None:
properties["LogSetting"] = self.log_setting.properties(
)
return properties
def ref(
self,
) -> NamespaceRef:
return NamespaceRef(
self.name,
)
def get_attr_namespace_id(
self,
) -> GetAttr:
return GetAttr(
self,
"Item.NamespaceId",
None,
)
def master_data(
self,
lottery_models: List[LotteryModel],
prize_tables: List[PrizeTable],
) -> Namespace:
CurrentMasterData(
self.stack,
self.name,
lottery_models,
prize_tables,
).add_depends_on(
self,
)
return self | PypiClean |
/ciefunctions-1.0.2.tar.gz/ciefunctions-1.0.2/tc1_97/MathJax-2.7.5/jax/output/SVG/fonts/TeX/Typewriter/Regular/CombDiacritMarks.js | MathJax.Hub.Insert(MathJax.OutputJax.SVG.FONTDATA.FONTS.MathJax_Typewriter,{768:[611,-485,0,-409,-195,"-409 569Q-409 586 -399 596T-377 610Q-376 610 -372 610T-365 611Q-355 610 -284 588T-210 563Q-195 556 -195 537Q-195 533 -197 522T-208 498T-229 485Q-238 485 -312 508T-388 533Q-400 538 -405 552Q-409 559 -409 569"],769:[611,-485,0,-331,-117,"-297 485Q-315 485 -323 505T-331 537Q-331 556 -316 563Q-307 569 -170 610Q-169 610 -165 610T-157 611Q-141 609 -131 600T-119 584T-117 569Q-117 555 -124 545T-138 533Q-140 531 -214 508T-297 485"],770:[611,-460,0,-429,-97,"-387 460Q-404 460 -416 479T-429 512Q-429 527 -419 534Q-416 536 -347 571T-272 609Q-269 611 -261 611Q-254 610 -182 574Q-168 567 -156 561T-136 550T-123 543T-114 538T-109 535T-105 532T-103 529T-100 525Q-97 518 -97 512Q-97 498 -109 479T-139 460H-141Q-148 460 -209 496L-263 526L-317 496Q-378 460 -387 460"],771:[611,-466,0,-438,-88,"-400 467Q-412 467 -425 480T-438 509Q-437 520 -414 543Q-353 602 -316 609Q-306 611 -301 611Q-279 611 -262 596T-235 566T-221 551Q-206 551 -158 594Q-142 610 -129 610H-125Q-114 610 -101 597T-88 568Q-89 557 -112 534Q-177 469 -220 466Q-247 466 -265 481T-291 511T-305 526Q-320 526 -368 483Q-384 467 -396 467H-400"],772:[578,-500,0,-452,-74,"-429 500Q-440 504 -445 511T-450 522T-452 536Q-452 552 -451 556Q-445 571 -434 574T-379 578Q-369 578 -330 578T-261 577H-96Q-94 575 -90 573T-85 569T-81 564T-77 558T-75 550T-74 538Q-74 522 -78 515T-96 500H-429"],774:[611,-504,0,-447,-79,"-446 579Q-446 611 -412 611H-407Q-383 609 -378 599T-358 587Q-340 583 -263 583H-235Q-159 583 -152 593Q-145 611 -120 611H-117H-115Q-79 611 -79 577Q-80 552 -95 536T-140 514T-191 506T-251 504H-263H-274Q-311 504 -334 505T-386 513T-431 536T-446 579"],776:[612,-519,0,-421,-104,"-421 565Q-421 590 -405 600T-370 611Q-350 611 -345 610Q-308 599 -308 565Q-308 545 -323 532T-359 519H-366H-370Q-405 519 -418 547Q-421 553 -421 565ZM-218 565Q-218 580 -208 593T-179 610Q-177 610 -175 610T-171 611Q-170 612 -158 612Q-130 611 -117 597T-104 565T-116 534T-160 519H-167Q-189 519 -203 532T-218 565"],778:[619,-499,0,-344,-182,"-344 558Q-344 583 -321 601T-262 619Q-225 619 -204 600T-182 560Q-182 536 -205 518T-264 499Q-301 499 -322 519T-344 558ZM-223 559Q-223 570 -234 579T-261 588T-289 580T-303 559Q-303 549 -293 540T-263 530T-234 539T-223 559"],780:[577,-449,0,-427,-99,"-427 525Q-427 542 -417 559T-392 577Q-385 577 -323 553L-263 530L-203 553Q-143 576 -136 576Q-118 576 -109 559T-99 525Q-99 508 -107 502T-161 481Q-177 475 -186 472Q-256 449 -263 449Q-272 449 -339 472T-412 498Q-420 501 -423 508T-427 520V525"]});MathJax.Ajax.loadComplete(MathJax.OutputJax.SVG.fontDir+"/Typewriter/Regular/CombDiacritMarks.js"); | PypiClean |
/monk_pytorch_cuda90_test-0.0.1-py3-none-any.whl/monk/pytorch/finetune/level_14_master_main.py | from monk.pytorch.finetune.imports import *
from monk.system.imports import *
from monk.pytorch.finetune.level_13_updates_main import prototype_updates
class prototype_master(prototype_updates):
'''
Main class for all functions in expert mode
Args:
verbose (int): Set verbosity levels
0 - Print Nothing
1 - Print desired details
'''
@accepts("self", verbose=int, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def __init__(self, verbose=1):
super().__init__(verbose=verbose);
###############################################################################################################################################
@accepts("self", post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def Dataset(self):
'''
Load transforms and set dataloader
Args:
None
Returns:
None
'''
self.set_dataset_final(test=self.system_dict["states"]["eval_infer"]);
save(self.system_dict);
if(self.system_dict["states"]["eval_infer"]):
self.custom_print("Pre-Composed Test Transforms");
self.custom_print(self.system_dict["dataset"]["transforms"]["test"]);
self.custom_print("");
self.custom_print("Dataset Numbers");
self.custom_print(" Num test images: {}".format(self.system_dict["dataset"]["params"]["num_test_images"]));
self.custom_print(" Num classes: {}".format(self.system_dict["dataset"]["params"]["num_classes"]))
self.custom_print("");
else:
self.custom_print("Pre-Composed Train Transforms");
self.custom_print(self.system_dict["dataset"]["transforms"]["train"]);
self.custom_print("");
self.custom_print("Pre-Composed Val Transforms");
self.custom_print(self.system_dict["dataset"]["transforms"]["val"]);
self.custom_print("");
self.custom_print("Dataset Numbers");
self.custom_print(" Num train images: {}".format(self.system_dict["dataset"]["params"]["num_train_images"]));
self.custom_print(" Num val images: {}".format(self.system_dict["dataset"]["params"]["num_val_images"]));
self.custom_print(" Num classes: {}".format(self.system_dict["dataset"]["params"]["num_classes"]))
self.custom_print("");
###############################################################################################################################################
###############################################################################################################################################
@accepts("self", [int, float], post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def Dataset_Percent(self, percent):
'''
Select a portion of dataset
Args:
percent (bool): percentage of sub-dataset
Returns:
None
'''
sampled_dataset = None;
image_datasets = {};
dataset_type = self.system_dict["dataset"]["dataset_type"];
dataset_train_path = self.system_dict["dataset"]["train_path"];
dataset_val_path = self.system_dict["dataset"]["val_path"];
csv_train = self.system_dict["dataset"]["csv_train"];
csv_val = self.system_dict["dataset"]["csv_val"];
train_val_split = self.system_dict["dataset"]["params"]["train_val_split"];
delimiter = self.system_dict["dataset"]["params"]["delimiter"];
batch_size = self.system_dict["dataset"]["params"]["batch_size"];
shuffle = self.system_dict["dataset"]["params"]["train_shuffle"];
num_workers = self.system_dict["dataset"]["params"]["num_workers"];
if(dataset_type == "train"):
label_list = [];
image_list = [];
classes = os.listdir(dataset_train_path);
for i in range(len(classes)):
tmp_image_list = os.listdir(dataset_train_path + "/" + classes[i]);
subset_image_list = tmp_image_list[:int(len(tmp_image_list)*percent/100.0)];
result = list(map(lambda x: classes[i] + "/" + x, subset_image_list))
tmp_label_list = [classes[i]]*len(subset_image_list);
label_list += tmp_label_list;
image_list += result;
image_label_dict = {'ID': image_list, 'Label': label_list}
df = pd.DataFrame(image_label_dict);
df.to_csv("sampled_dataset_train.csv", index=False);
elif(dataset_type == "train-val"):
label_list = [];
image_list = [];
classes = os.listdir(dataset_train_path);
for i in range(len(classes)):
tmp_image_list = os.listdir(dataset_train_path + "/" + classes[i]);
subset_image_list = tmp_image_list[:int(len(tmp_image_list)*percent/100.0)];
result = list(map(lambda x: classes[i] + "/" + x, subset_image_list))
tmp_label_list = [classes[i]]*len(subset_image_list);
label_list += tmp_label_list;
image_list += result;
image_label_dict = {'ID': image_list, 'Label': label_list}
df = pd.DataFrame(image_label_dict);
df.to_csv("sampled_dataset_train.csv", index=False);
label_list = [];
image_list = [];
classes = os.listdir(dataset_train_path);
for i in range(len(classes)):
tmp_image_list = os.listdir(dataset_val_path + "/" + classes[i]);
subset_image_list = tmp_image_list[:int(len(tmp_image_list)*percent/100.0)];
result = list(map(lambda x: classes[i] + "/" + x, subset_image_list))
tmp_label_list = [classes[i]]*len(subset_image_list);
label_list += tmp_label_list;
image_list += result;
image_label_dict = {'ID': image_list, 'Label': label_list}
df = pd.DataFrame(image_label_dict);
df.to_csv("sampled_dataset_val.csv", index=False);
elif(dataset_type == "csv_train"):
df = pd.read_csv(csv_train);
df = df.iloc[np.random.permutation(len(df))]
df_sampled = df.iloc[:int(len(df)*percent/100.0)];
df_sampled.to_csv("sampled_dataset_train.csv", index=False);
elif(dataset_type == "csv_train-val"):
df = pd.read_csv(csv_train);
df = df.iloc[np.random.permutation(len(df))]
df_sampled = df.iloc[:int(len(df)*percent/100.0)];
df_sampled.to_csv("sampled_dataset_train.csv", index=False);
df = pd.read_csv(csv_val);
df = df.iloc[np.random.permutation(len(df))]
df_sampled = df.iloc[:int(len(df)*percent/100.0)];
df_sampled.to_csv("sampled_dataset_val.csv", index=False);
###############################################################################################################################################
###############################################################################################################################################
@accepts("self", post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def Model(self):
'''
Load Model as per paraameters set
Args:
None
Returns:
None
'''
if(self.system_dict["states"]["copy_from"]):
msg = "Cannot set model in Copy-From mode.\n";
raise ConstraintError(msg)
self.set_model_final();
save(self.system_dict)
###############################################################################################################################################
###############################################################################################################################################
@accepts("self", post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def Train(self):
'''
Master function for training
Args:
None
Returns:
None
'''
self.set_training_final();
save(self.system_dict);
###############################################################################################################################################
###############################################################################################################################################
@accepts("self", post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def Evaluate(self):
'''
Master function for external validation
Args:
None
Returns:
None
'''
if(self.system_dict["dataset"]["label_type"] == "single" or self.system_dict["dataset"]["label_type"] == False):
accuracy, class_based_accuracy = self.set_evaluation_final();
save(self.system_dict);
else:
accuracy, class_based_accuracy = self.set_evaluation_final_multiple();
save(self.system_dict);
return accuracy, class_based_accuracy;
###############################################################################################################################################
###############################################################################################################################################
@error_checks(None, img_name=["file", "r"], img_dir=["folder", "r"], return_raw=False, img_thresh=["gte", 0.0, "lte", 1.0], post_trace=False)
@accepts("self", img_name=[str, bool], img_dir=[str, bool], return_raw=bool, img_thresh=float, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def Infer(self, img_name=False, img_dir=False, return_raw=False, img_thresh=0.5):
'''
Master function for inference
Args:
img_name (str): path to image
img_dir (str): path to folders containing images.
(Optional)
return_raw (bool): If True, then output dictionary contains image probability for every class in the set.
Else, only the most probable class score is returned back.
img_thresh (float): Thresholding for multi label image classification.
Returns:
dict: Dictionary containing details on predictions.
'''
if(self.system_dict["dataset"]["label_type"] == "single" or self.system_dict["dataset"]["label_type"] == False):
if(not img_dir):
predictions = self.set_prediction_final(img_name=img_name, return_raw=return_raw);
else:
predictions = self.set_prediction_final(img_dir=img_dir, return_raw=return_raw);
return predictions;
else:
if(not img_dir):
predictions = self.set_prediction_final_multiple(img_name=img_name, return_raw=return_raw, img_thresh=img_thresh);
else:
predictions = self.set_prediction_final_multiple(img_dir=img_dir, return_raw=return_raw, img_thresh=img_thresh);
return predictions;
###############################################################################################################################################
###############################################################################################################################################
@accepts("self", network=list, data_shape=[tuple, int], use_gpu=bool, network_initializer=str, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def Compile_Network(self, network, data_shape=(3, 224, 224), use_gpu=True, network_initializer="xavier_normal"):
'''
Master function for compiling custom network and initializing it
Args:
network: Network stacked as list of lists
data_shape (tuple): Input shape of data in format C, H, W
use_gpu (bool): If True, model loaded on gpu
network_initializer (str): Initialize network with random weights. Select the random generator type function.
Returns:
None
'''
self.system_dict["custom_model"]["network_stack"] = network;
self.system_dict["custom_model"]["network_initializer"] = network_initializer;
self.system_dict["model"]["type"] = "custom";
self.system_dict["dataset"]["params"]["data_shape"] = data_shape;
self.system_dict = set_device(use_gpu, self.system_dict);
save(self.system_dict);
self.set_model_final();
###############################################################################################################################################
###############################################################################################################################################
@accepts("self", data_shape=tuple, port=int, post_trace=False)
#@TraceFunction(trace_args=True, trace_rv=True)
def Visualize_With_Netron(self, data_shape=None, port=None):
'''
Visualize network with netron library
Args:
data_shape (tuple): Input shape of data in format C, H, W
port (int): Local host free port.
Returns:
None
'''
self.custom_print("Using Netron To Visualize");
self.custom_print("Not compatible on kaggle");
self.custom_print("Compatible only for Jupyter Notebooks");
if not data_shape:
self.custom_print("Provide data_shape argument");
pass;
else:
c, h, w = data_shape;
# Input to the model
x = torch.randn(1, c, h, w, requires_grad=True)
x = x.to(self.system_dict["local"]["device"])
torch_out = self.system_dict["local"]["model"](x)
# Export the model
torch.onnx.export(self.system_dict["local"]["model"], # model being run
x, # model input (or a tuple for multiple inputs)
"model.onnx", # where to save the model (can be a file or file-like object)
export_params=True, # store the trained parameter weights inside the model file
opset_version=10, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names = ['input'], # the model's input names
output_names = ['output'], # the model's output names
dynamic_axes={'input' : {0 : 'batch_size'}, # variable lenght axes
'output' : {0 : 'batch_size'}})
import netron
if(not port):
netron.start('model.onnx')
else:
netron.start('model.onnx', port=port)
############################################################################################################################################### | PypiClean |
/sympy.keras-1.0.23.tar.gz/sympy.keras-1.0.23/sympy/stats/matrix_distributions.py | from sympy import S, Basic, exp, multigamma, pi
from sympy.core.sympify import sympify, _sympify
from sympy.matrices import (ImmutableMatrix, Inverse, Trace, Determinant,
MatrixSymbol, MatrixBase, Transpose, MatrixSet,
matrix2numpy)
from sympy.stats.rv import (_value_check, RandomMatrixSymbol, NamedArgsMixin, PSpace,
_symbol_converter, MatrixDomain)
from sympy.external import import_module
################################################################################
#------------------------Matrix Probability Space------------------------------#
################################################################################
class MatrixPSpace(PSpace):
"""
Represents probability space for
Matrix Distributions
"""
def __new__(cls, sym, distribution, dim_n, dim_m):
sym = _symbol_converter(sym)
dim_n, dim_m = _sympify(dim_n), _sympify(dim_m)
if not (dim_n.is_integer and dim_m.is_integer):
raise ValueError("Dimensions should be integers")
return Basic.__new__(cls, sym, distribution, dim_n, dim_m)
distribution = property(lambda self: self.args[1])
symbol = property(lambda self: self.args[0])
@property
def domain(self):
return MatrixDomain(self.symbol, self.distribution.set)
@property
def value(self):
return RandomMatrixSymbol(self.symbol, self.args[2], self.args[3], self)
@property
def values(self):
return {self.value}
def compute_density(self, expr, *args):
rms = expr.atoms(RandomMatrixSymbol)
if len(rms) > 1 or (not isinstance(expr, RandomMatrixSymbol)):
raise NotImplementedError("Currently, no algorithm has been "
"implemented to handle general expressions containing "
"multiple matrix distributions.")
return self.distribution.pdf(expr)
def sample(self, size=(), library='scipy'):
"""
Internal sample method
Returns dictionary mapping RandomMatrixSymbol to realization value.
"""
return {self.value: self.distribution.sample(size, library=library)}
def rv(symbol, cls, args):
args = list(map(sympify, args))
dist = cls(*args)
dist.check(*args)
dim = dist.dimension
pspace = MatrixPSpace(symbol, dist, dim[0], dim[1])
return pspace.value
class SampleMatrixScipy:
"""Returns the sample from scipy of the given distribution"""
def __new__(cls, dist, size):
return cls._sample_scipy(dist, size)
@classmethod
def _sample_scipy(cls, dist, size):
"""Sample from SciPy."""
from scipy import stats as scipy_stats
scipy_rv_map = {
'WishartDistribution': lambda dist, size: scipy_stats.wishart.rvs(
df=int(dist.n), scale=matrix2numpy(dist.scale_matrix, float), size=size),
'MatrixNormalDistribution': lambda dist, size: scipy_stats.matrix_normal.rvs(
mean=matrix2numpy(dist.location_matrix, float),
rowcov=matrix2numpy(dist.scale_matrix_1, float),
colcov=matrix2numpy(dist.scale_matrix_2, float), size=size)
}
dist_list = scipy_rv_map.keys()
if dist.__class__.__name__ not in dist_list:
return None
return scipy_rv_map[dist.__class__.__name__](dist, size)
class SampleMatrixNumpy:
"""Returns the sample from numpy of the given distribution"""
### TODO: Add tests after adding matrix distributions in numpy_rv_map
def __new__(cls, dist, size):
return cls._sample_numpy(dist, size)
@classmethod
def _sample_numpy(cls, dist, size):
"""Sample from NumPy."""
numpy_rv_map = {
}
dist_list = numpy_rv_map.keys()
if dist.__class__.__name__ not in dist_list:
return None
return numpy_rv_map[dist.__class__.__name__](dist, size)
class SampleMatrixPymc:
"""Returns the sample from pymc3 of the given distribution"""
def __new__(cls, dist, size):
return cls._sample_pymc3(dist, size)
@classmethod
def _sample_pymc3(cls, dist, size):
"""Sample from PyMC3."""
import pymc3
pymc3_rv_map = {
'MatrixNormalDistribution': lambda dist: pymc3.MatrixNormal('X',
mu=matrix2numpy(dist.location_matrix, float),
rowcov=matrix2numpy(dist.scale_matrix_1, float),
colcov=matrix2numpy(dist.scale_matrix_2, float),
shape=dist.location_matrix.shape),
'WishartDistribution': lambda dist: pymc3.WishartBartlett('X',
nu=int(dist.n), S=matrix2numpy(dist.scale_matrix, float))
}
dist_list = pymc3_rv_map.keys()
if dist.__class__.__name__ not in dist_list:
return None
with pymc3.Model():
pymc3_rv_map[dist.__class__.__name__](dist)
return pymc3.sample(size, chains=1, progressbar=False)[:]['X']
_get_sample_class_matrixrv = {
'scipy': SampleMatrixScipy,
'pymc3': SampleMatrixPymc,
'numpy': SampleMatrixNumpy
}
################################################################################
#-------------------------Matrix Distribution----------------------------------#
################################################################################
class MatrixDistribution(Basic, NamedArgsMixin):
"""
Abstract class for Matrix Distribution
"""
def __new__(cls, *args):
args = list(map(sympify, args))
return Basic.__new__(cls, *args)
@staticmethod
def check(*args):
pass
def __call__(self, expr):
if isinstance(expr, list):
expr = ImmutableMatrix(expr)
return self.pdf(expr)
def sample(self, size=(), library='scipy'):
"""
Internal sample method
Returns dictionary mapping RandomSymbol to realization value.
"""
libraries = ['scipy', 'numpy', 'pymc3']
if library not in libraries:
raise NotImplementedError("Sampling from %s is not supported yet."
% str(library))
if not import_module(library):
raise ValueError("Failed to import %s" % library)
samps = _get_sample_class_matrixrv[library](self, size)
if samps is not None:
return samps
raise NotImplementedError(
"Sampling for %s is not currently implemented from %s"
% (self.__class__.__name__, library)
)
################################################################################
#------------------------Matrix Distribution Types-----------------------------#
################################################################################
#-------------------------------------------------------------------------------
# Matrix Gamma distribution ----------------------------------------------------
class MatrixGammaDistribution(MatrixDistribution):
_argnames = ('alpha', 'beta', 'scale_matrix')
@staticmethod
def check(alpha, beta, scale_matrix):
if not isinstance(scale_matrix , MatrixSymbol):
_value_check(scale_matrix.is_positive_definite, "The shape "
"matrix must be positive definite.")
_value_check(scale_matrix.is_square, "Should "
"be square matrix")
_value_check(alpha.is_positive, "Shape parameter should be positive.")
_value_check(beta.is_positive, "Scale parameter should be positive.")
@property
def set(self):
k = self.scale_matrix.shape[0]
return MatrixSet(k, k, Reals)
@property
def dimension(self):
return self.scale_matrix.shape
def pdf(self, x):
alpha , beta , scale_matrix = self.alpha, self.beta, self.scale_matrix
p = scale_matrix.shape[0]
if isinstance(x, list):
x = ImmutableMatrix(x)
if not isinstance(x, (MatrixBase, MatrixSymbol)):
raise ValueError("%s should be an isinstance of Matrix "
"or MatrixSymbol" % str(x))
sigma_inv_x = - Inverse(scale_matrix)*x / beta
term1 = exp(Trace(sigma_inv_x))/((beta**(p*alpha)) * multigamma(alpha, p))
term2 = (Determinant(scale_matrix))**(-alpha)
term3 = (Determinant(x))**(alpha - S(p + 1)/2)
return term1 * term2 * term3
def MatrixGamma(symbol, alpha, beta, scale_matrix):
"""
Creates a random variable with Matrix Gamma Distribution.
The density of the said distribution can be found at [1].
Parameters
==========
alpha: Positive Real number
Shape Parameter
beta: Positive Real number
Scale Parameter
scale_matrix: Positive definite real square matrix
Scale Matrix
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import density, MatrixGamma
>>> from sympy import MatrixSymbol, symbols
>>> a, b = symbols('a b', positive=True)
>>> M = MatrixGamma('M', a, b, [[2, 1], [1, 2]])
>>> X = MatrixSymbol('X', 2, 2)
>>> density(M)(X).doit()
3**(-a)*b**(-2*a)*exp(Trace(Matrix([
[-2/3, 1/3],
[ 1/3, -2/3]])*X)/b)*Determinant(X)**(a - 3/2)/(sqrt(pi)*gamma(a)*gamma(a - 1/2))
>>> density(M)([[1, 0], [0, 1]]).doit()
3**(-a)*b**(-2*a)*exp(-4/(3*b))/(sqrt(pi)*gamma(a)*gamma(a - 1/2))
References
==========
.. [1] https://en.wikipedia.org/wiki/Matrix_gamma_distribution
"""
if isinstance(scale_matrix, list):
scale_matrix = ImmutableMatrix(scale_matrix)
return rv(symbol, MatrixGammaDistribution, (alpha, beta, scale_matrix))
#-------------------------------------------------------------------------------
# Wishart Distribution ---------------------------------------------------------
class WishartDistribution(MatrixDistribution):
_argnames = ('n', 'scale_matrix')
@staticmethod
def check(n, scale_matrix):
if not isinstance(scale_matrix , MatrixSymbol):
_value_check(scale_matrix.is_positive_definite, "The shape "
"matrix must be positive definite.")
_value_check(scale_matrix.is_square, "Should "
"be square matrix")
_value_check(n.is_positive, "Shape parameter should be positive.")
@property
def set(self):
k = self.scale_matrix.shape[0]
return MatrixSet(k, k, Reals)
@property
def dimension(self):
return self.scale_matrix.shape
def pdf(self, x):
n, scale_matrix = self.n, self.scale_matrix
p = scale_matrix.shape[0]
if isinstance(x, list):
x = ImmutableMatrix(x)
if not isinstance(x, (MatrixBase, MatrixSymbol)):
raise ValueError("%s should be an isinstance of Matrix "
"or MatrixSymbol" % str(x))
sigma_inv_x = - Inverse(scale_matrix)*x / S(2)
term1 = exp(Trace(sigma_inv_x))/((2**(p*n/S(2))) * multigamma(n/S(2), p))
term2 = (Determinant(scale_matrix))**(-n/S(2))
term3 = (Determinant(x))**(S(n - p - 1)/2)
return term1 * term2 * term3
def Wishart(symbol, n, scale_matrix):
"""
Creates a random variable with Wishart Distribution.
The density of the said distribution can be found at [1].
Parameters
==========
n: Positive Real number
Represents degrees of freedom
scale_matrix: Positive definite real square matrix
Scale Matrix
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import density, Wishart
>>> from sympy import MatrixSymbol, symbols
>>> n = symbols('n', positive=True)
>>> W = Wishart('W', n, [[2, 1], [1, 2]])
>>> X = MatrixSymbol('X', 2, 2)
>>> density(W)(X).doit()
2**(-n)*3**(-n/2)*exp(Trace(Matrix([
[-1/3, 1/6],
[ 1/6, -1/3]])*X))*Determinant(X)**(n/2 - 3/2)/(sqrt(pi)*gamma(n/2)*gamma(n/2 - 1/2))
>>> density(W)([[1, 0], [0, 1]]).doit()
2**(-n)*3**(-n/2)*exp(-2/3)/(sqrt(pi)*gamma(n/2)*gamma(n/2 - 1/2))
References
==========
.. [1] https://en.wikipedia.org/wiki/Wishart_distribution
"""
if isinstance(scale_matrix, list):
scale_matrix = ImmutableMatrix(scale_matrix)
return rv(symbol, WishartDistribution, (n, scale_matrix))
#-------------------------------------------------------------------------------
# Matrix Normal distribution ---------------------------------------------------
class MatrixNormalDistribution(MatrixDistribution):
_argnames = ('location_matrix', 'scale_matrix_1', 'scale_matrix_2')
@staticmethod
def check(location_matrix, scale_matrix_1, scale_matrix_2):
if not isinstance(scale_matrix_1 , MatrixSymbol):
_value_check(scale_matrix_1.is_positive_definite, "The shape "
"matrix must be positive definite.")
if not isinstance(scale_matrix_2 , MatrixSymbol):
_value_check(scale_matrix_2.is_positive_definite, "The shape "
"matrix must be positive definite.")
_value_check(scale_matrix_1.is_square, "Scale matrix 1 should be "
"be square matrix")
_value_check(scale_matrix_2.is_square, "Scale matrix 2 should be "
"be square matrix")
n = location_matrix.shape[0]
p = location_matrix.shape[1]
_value_check(scale_matrix_1.shape[0] == n, "Scale matrix 1 should be"
" of shape %s x %s"% (str(n), str(n)))
_value_check(scale_matrix_2.shape[0] == p, "Scale matrix 2 should be"
" of shape %s x %s"% (str(p), str(p)))
@property
def set(self):
n, p = self.location_matrix.shape
return MatrixSet(n, p, Reals)
@property
def dimension(self):
return self.location_matrix.shape
def pdf(self, x):
M , U , V = self.location_matrix, self.scale_matrix_1, self.scale_matrix_2
n, p = M.shape
if isinstance(x, list):
x = ImmutableMatrix(x)
if not isinstance(x, (MatrixBase, MatrixSymbol)):
raise ValueError("%s should be an isinstance of Matrix "
"or MatrixSymbol" % str(x))
term1 = Inverse(V)*Transpose(x - M)*Inverse(U)*(x - M)
num = exp(-Trace(term1)/S(2))
den = (2*pi)**(S(n*p)/2) * Determinant(U)**S(p)/2 * Determinant(V)**S(n)/2
return num/den
def MatrixNormal(symbol, location_matrix, scale_matrix_1, scale_matrix_2):
"""
Creates a random variable with Matrix Normal Distribution.
The density of the said distribution can be found at [1].
Parameters
==========
location_matrix: Real ``n x p`` matrix
Represents degrees of freedom
scale_matrix_1: Positive definite matrix
Scale Matrix of shape ``n x n``
scale_matrix_2: Positive definite matrix
Scale Matrix of shape ``p x p``
Returns
=======
RandomSymbol
Examples
========
>>> from sympy import MatrixSymbol
>>> from sympy.stats import density, MatrixNormal
>>> M = MatrixNormal('M', [[1, 2]], [1], [[1, 0], [0, 1]])
>>> X = MatrixSymbol('X', 1, 2)
>>> density(M)(X).doit()
2*exp(-Trace((Matrix([
[-1],
[-2]]) + X.T)*(Matrix([[-1, -2]]) + X))/2)/pi
>>> density(M)([[3, 4]]).doit()
2*exp(-4)/pi
References
==========
.. [1] https://en.wikipedia.org/wiki/Matrix_normal_distribution
"""
if isinstance(location_matrix, list):
location_matrix = ImmutableMatrix(location_matrix)
if isinstance(scale_matrix_1, list):
scale_matrix_1 = ImmutableMatrix(scale_matrix_1)
if isinstance(scale_matrix_2, list):
scale_matrix_2 = ImmutableMatrix(scale_matrix_2)
args = (location_matrix, scale_matrix_1, scale_matrix_2)
return rv(symbol, MatrixNormalDistribution, args) | PypiClean |
/xadrpy-0.6.3.tar.gz/xadrpy-0.6.3/src/ckeditor/static/ckeditor/ckeditor/_source/plugins/forms/dialogs/checkbox.js | /*
Copyright (c) 2003-2011, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.html or http://ckeditor.com/license
*/
CKEDITOR.dialog.add( 'checkbox', function( editor )
{
return {
title : editor.lang.checkboxAndRadio.checkboxTitle,
minWidth : 350,
minHeight : 140,
onShow : function()
{
delete this.checkbox;
var element = this.getParentEditor().getSelection().getSelectedElement();
if ( element && element.getAttribute( 'type' ) == 'checkbox' )
{
this.checkbox = element;
this.setupContent( element );
}
},
onOk : function()
{
var editor,
element = this.checkbox,
isInsertMode = !element;
if ( isInsertMode )
{
editor = this.getParentEditor();
element = editor.document.createElement( 'input' );
element.setAttribute( 'type', 'checkbox' );
editor.insertElement( element );
}
this.commitContent( { element : element } );
},
contents : [
{
id : 'info',
label : editor.lang.checkboxAndRadio.checkboxTitle,
title : editor.lang.checkboxAndRadio.checkboxTitle,
startupFocus : 'txtName',
elements : [
{
id : 'txtName',
type : 'text',
label : editor.lang.common.name,
'default' : '',
accessKey : 'N',
setup : function( element )
{
this.setValue(
element.data( 'cke-saved-name' ) ||
element.getAttribute( 'name' ) ||
'' );
},
commit : function( data )
{
var element = data.element;
// IE failed to update 'name' property on input elements, protect it now.
if ( this.getValue() )
element.data( 'cke-saved-name', this.getValue() );
else
{
element.data( 'cke-saved-name', false );
element.removeAttribute( 'name' );
}
}
},
{
id : 'txtValue',
type : 'text',
label : editor.lang.checkboxAndRadio.value,
'default' : '',
accessKey : 'V',
setup : function( element )
{
var value = element.getAttribute( 'value' );
// IE Return 'on' as default attr value.
this.setValue( CKEDITOR.env.ie && value == 'on' ? '' : value );
},
commit : function( data )
{
var element = data.element,
value = this.getValue();
if ( value && !( CKEDITOR.env.ie && value == 'on' ) )
element.setAttribute( 'value', value );
else
{
if ( CKEDITOR.env.ie )
{
// Remove attribute 'value' of checkbox (#4721).
var checkbox = new CKEDITOR.dom.element( 'input', element.getDocument() );
element.copyAttributes( checkbox, { value: 1 } );
checkbox.replace( element );
editor.getSelection().selectElement( checkbox );
data.element = checkbox;
}
else
element.removeAttribute( 'value' );
}
}
},
{
id : 'cmbSelected',
type : 'checkbox',
label : editor.lang.checkboxAndRadio.selected,
'default' : '',
accessKey : 'S',
value : "checked",
setup : function( element )
{
this.setValue( element.getAttribute( 'checked' ) );
},
commit : function( data )
{
var element = data.element;
if ( CKEDITOR.env.ie )
{
var isElementChecked = !!element.getAttribute( 'checked' ),
isChecked = !!this.getValue();
if ( isElementChecked != isChecked )
{
var replace = CKEDITOR.dom.element.createFromHtml( '<input type="checkbox"'
+ ( isChecked ? ' checked="checked"' : '' )
+ '/>', editor.document );
element.copyAttributes( replace, { type : 1, checked : 1 } );
replace.replace( element );
editor.getSelection().selectElement( replace );
data.element = replace;
}
}
else
{
var value = this.getValue();
if ( value )
element.setAttribute( 'checked', 'checked' );
else
element.removeAttribute( 'checked' );
}
}
}
]
}
]
};
}); | PypiClean |
/pulumi_azure_native-2.5.1a1693590910.tar.gz/pulumi_azure_native-2.5.1a1693590910/pulumi_azure_native/peering/v20221001/get_registered_prefix.py |
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetRegisteredPrefixResult',
'AwaitableGetRegisteredPrefixResult',
'get_registered_prefix',
'get_registered_prefix_output',
]
@pulumi.output_type
class GetRegisteredPrefixResult:
"""
The customer's prefix that is registered by the peering service provider.
"""
def __init__(__self__, error_message=None, id=None, name=None, peering_service_prefix_key=None, prefix=None, prefix_validation_state=None, provisioning_state=None, type=None):
if error_message and not isinstance(error_message, str):
raise TypeError("Expected argument 'error_message' to be a str")
pulumi.set(__self__, "error_message", error_message)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if peering_service_prefix_key and not isinstance(peering_service_prefix_key, str):
raise TypeError("Expected argument 'peering_service_prefix_key' to be a str")
pulumi.set(__self__, "peering_service_prefix_key", peering_service_prefix_key)
if prefix and not isinstance(prefix, str):
raise TypeError("Expected argument 'prefix' to be a str")
pulumi.set(__self__, "prefix", prefix)
if prefix_validation_state and not isinstance(prefix_validation_state, str):
raise TypeError("Expected argument 'prefix_validation_state' to be a str")
pulumi.set(__self__, "prefix_validation_state", prefix_validation_state)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="errorMessage")
def error_message(self) -> str:
"""
The error message associated with the validation state, if any.
"""
return pulumi.get(self, "error_message")
@property
@pulumi.getter
def id(self) -> str:
"""
The ID of the resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="peeringServicePrefixKey")
def peering_service_prefix_key(self) -> str:
"""
The peering service prefix key that is to be shared with the customer.
"""
return pulumi.get(self, "peering_service_prefix_key")
@property
@pulumi.getter
def prefix(self) -> Optional[str]:
"""
The customer's prefix from which traffic originates.
"""
return pulumi.get(self, "prefix")
@property
@pulumi.getter(name="prefixValidationState")
def prefix_validation_state(self) -> str:
"""
The prefix validation state.
"""
return pulumi.get(self, "prefix_validation_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
class AwaitableGetRegisteredPrefixResult(GetRegisteredPrefixResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetRegisteredPrefixResult(
error_message=self.error_message,
id=self.id,
name=self.name,
peering_service_prefix_key=self.peering_service_prefix_key,
prefix=self.prefix,
prefix_validation_state=self.prefix_validation_state,
provisioning_state=self.provisioning_state,
type=self.type)
def get_registered_prefix(peering_name: Optional[str] = None,
registered_prefix_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetRegisteredPrefixResult:
"""
Gets an existing registered prefix with the specified name under the given subscription, resource group and peering.
:param str peering_name: The name of the peering.
:param str registered_prefix_name: The name of the registered prefix.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['peeringName'] = peering_name
__args__['registeredPrefixName'] = registered_prefix_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:peering/v20221001:getRegisteredPrefix', __args__, opts=opts, typ=GetRegisteredPrefixResult).value
return AwaitableGetRegisteredPrefixResult(
error_message=pulumi.get(__ret__, 'error_message'),
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
peering_service_prefix_key=pulumi.get(__ret__, 'peering_service_prefix_key'),
prefix=pulumi.get(__ret__, 'prefix'),
prefix_validation_state=pulumi.get(__ret__, 'prefix_validation_state'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_registered_prefix)
def get_registered_prefix_output(peering_name: Optional[pulumi.Input[str]] = None,
registered_prefix_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetRegisteredPrefixResult]:
"""
Gets an existing registered prefix with the specified name under the given subscription, resource group and peering.
:param str peering_name: The name of the peering.
:param str registered_prefix_name: The name of the registered prefix.
:param str resource_group_name: The name of the resource group.
"""
... | PypiClean |
/django_xblog-0.1.0-py3-none-any.whl/xblog/metaWeblog.py | import string
import xmlrpclib
import urllib
import re
import time
import datetime
import os
import urlparse
import sys
from django.conf import settings
try:
from django.contrib.auth import get_user_model
User = get_user_model() # settings.AUTH_USER_MODEL
except ImportError:
from django.contrib.auth.models import User
import django
# from django.contrib.comments.models import FreeComment
from django.conf import settings
from .models import Tag, Post, Blog, Author, Category, FILTER_CHOICES
import BeautifulSoup
from .ping_modes import send_pings
try:
from xmlrpc.client import Fault
from xmlrpc.client import DateTime
from urllib.parse import urljoin
except ImportError: # Python 2
from xmlrpclib import Fault
from xmlrpclib import DateTime
from urlparse import urljoin
# import config
# import xmlrpclib.DateTime
# I guess it's time to fix that upload issue...
from django.core.files.storage import default_storage
from django.core.files.base import ContentFile
# this is for getting the URL of xmlrpc endpoing
try:
from django.urls import reverse
except ImportError: # django < 2
from django.core.urlresolvers import reverse
import logging
logger = logging.getLogger(__name__)
# Add these to your existing RPC methods in settings.py
# i.e.
LOGIN_ERROR = 801
PERMISSION_DENIED = 803
def authenticated(pos=1):
# tells the method that the visitor is authenticated
logger.debug("authenticated entered")
def _decorate(func):
def _wrapper(*args, **kwargs):
username = args[pos+0]
password = args[pos+1]
args = args[0:pos]+args[pos+2:]
try:
logger.info("Username: %s" % username)
user = User.objects.get(username__exact=username)
except User.DoesNotExist:
logger.debug("username %s, password %s, args %s" % (username, "password", args))
logger.warn( "User.DoesNotExist")
raise ValueError("Authentication Failure")
if not user.check_password(password):
logger.warn( "User.check_password")
raise ValueError("Authentication Failure")
if not user.is_superuser:
logger.warn("user.is_superuser")
raise ValueError("Authorization Failure")
return func(user, *args, **kwargs)
return _wrapper
return _decorate
def full_url(url):
return urljoin(settings.SITE_URL, url)
# @public
# @authenticated()
def get_user(username, apikey, blogid=None):
"""
checks if a user is authorized to make this call
"""
logger.debug("%s.get_user entered" % __name__)
logger.debug("user: %s" % username)
logger.debug("apikey: %s" % apikey)
try:
user = User.objects.get(**{'username':username})
except User.DoesNotExist:
raise Fault(LOGIN_ERROR, 'Username is incorrect.')
if not apikey == user.author.remote_access_key:
raise Fault(LOGIN_ERROR, 'Password is invalid.')
if not user.author.remote_access_enabled:
raise Fault(PERMISSION_DENIED, 'Remote access not enabled for this user.')
# if not author.is_staff or not author.is_active:
# raise Fault(PERMISSION_DENIED, _('User account unavailable.'))
# raise Fault(PERMISSION_DENIED, _('User cannot %s.') % permission)
return user
def is_user_blog(user, blogid):
"""
checks if the blog in question belongs to the use
"""
blog = Blog.objects.get(pk=blogid)
if blog.owner==user:
return True
else:
return False
def blogger_getRecentPosts(appkey, blogid, username, password, num_posts=50):
""" returns a list of recent posts """
logger.debug( "blogger.getRecentPosts called...")
user = get_user(username, password)
if not is_user_blog(user, blogid):
raise Fault(PERMISSION_DENIED, 'Permission denied for %s on blogid %s' % (user, blogid))
blog = Blog.objects.get(id=blogid)
posts = blog.post_set.order_by('-pub_date')[:num_posts]
return [post_struct(post) for post in posts]
# @public
def blogger_getUserInfo(appkey, username, password):
""" returns userinfo for particular user..."""
logger.debug( "blogger.getUserInfo called")
# author = user # Author.objects.get(user=user)
user = get_user(username, password)
firstname = user.first_name
lastname = user.last_name
struct = {}
struct['username']=user.username
struct['firstname']=firstname
struct['lastname']=lastname
struct['nickname']= user.author.fullname
struct['url'] = user.author.url
struct['email'] = user.email
struct['userid'] = str(user.id)
return struct
# @public
# @authenticated()
def blogger_getUsersBlogs(appkey, username, password):
"""
Parameters
string appkey: Not applicable for WordPress, can be any value and will be ignored.
string username
string password
Return Values
array
struct
string blogid
string url: Homepage URL for this blog.
string blogName
bool isAdmin
string xmlrpc: URL endpoint to use for XML-RPC requests on this blog.
"""
logger.debug( "blogger.getUsersBlogs called")
user = get_user(username, password)
# print "Got user", user
usersblogs = Blog.objects.filter(owner=user)
logger.debug( "%s blogs for %s" % (usersblogs, user))
# return usersblogs
res = [
{
'blogid':str(blog.id),
'blogName': blog.title,
'url': blog.get_url()
} for blog in usersblogs
]
logger.debug(res)
return res
def mt_publishPost(postid, username, password):
"""
lies that it publishes the thing, mostly for compatibility
porpoises...
"""
return True
# @public
def blogger_deletePost(appkey, post_id, username, password, publish=False):
""" deletes the specified post..."""
logger.debug("blogger.deletePost called")
user = get_user(username, password)
post = Post.objects.get(pk=post_id)
if post.author.user != user:
raise Fault(PERMISSION_DENIED, 'Permission denied for %s on post %s' % (user, postid))
logger.warn("Deleting post %s by user %s" % (post.id, user))
post.delete()
return True
def mt_getCategoryList(blogid, username, password):
""" takes the blogid, and returns a list of categories"""
logger.debug( "mt_getCategoryList called")
logger.warn("Categories no longer supported")
user = get_user(username, password)
categories = Category.objects.filter(blog__owner=user.id)
res=[]
for c in categories:
struct={}
struct['categoryId']= str(c.id)
struct['categoryName']= c.title
res.append(struct)
return res
def post_struct(post):
""" returns the meta-blah equiv of a post """
logger.debug("post_struct called")
# link = full_url(post.get_absolute_url())
link = post.get_absolute_url()
categories = [c.title for c in post.categories.all()]
# categories = []
# check to see if there's a more tag...
if post.body.find('<!--more-->') > -1:
description, mt_text_more = post.body.split('<!--more-->')
else:
description = post.body
mt_text_more = ""
if post.enable_comments:
mt_allow_comments = 1
else:
mt_allow_comments = 2
struct = {
'postid': post.id,
'title':post.title,
'permaLink':link,
'description':description,
'mt_text_more':mt_text_more,
'mt_convert_breaks':post.text_filter,
'categories': categories,
'userid': post.author.id,
'mt_allow_comments':str(mt_allow_comments)
}
if post.pub_date:
struct['dateCreated'] = format_date(post.pub_date)
logger.debug("Returning from post_struct")
logger.debug(struct)
return struct
def format_date(d):
logger.debug( "format_date called...")
logger.debug("date passed: %s" % str(d))
# if not d: return None
#print 80*"x",fd
# return xmlrpclib.DateTime(d.isoformat())
return xmlrpclib.DateTime(d.isoformat())
def setTags(post, struct, key="tags"):
logger.debug( "setTags entered")
tags = struct.get(key, None)
if tags is None:
logger.info("No tags set")
post.tags = []
else:
# post.categories = [Category.objects.get(title__iexact=name) for name in tags]
logger.info("Setting tags")
for tag in tags:
logger.debug("setting tag '%s'" % tag)
t, created = Tag.objects.get_or_create(title=tag.lower())
if created:
logger.info("Adding new tag: %s" % t)
else:
logger.info("Found tag: %s" % t)
t.save()
post.tags.add(t)
post.save()
logger.debug(tags)
logger.debug("Post Tags: %s" % str(post.tags))
post.save()
return True
# @public
def mt_supportedMethods(*args):
""" returns the xmlrpc-server's list of supported methods"""
logger.debug( "mt.listSupportedMethods called...")
# from blog import xmlrpc_views
# return xmlrpc_views.list_public_methods(blog.metaWeblog)
res = []
for method in settings.XMLRPC_METHODS:
res.append(method[1])
return res
# @public
def mt_getPostCategories(postid, username, password):
"""
returns a list of categories for postid *postid*
"""
logger.debug( "mt_getPostCategories called...")
logger.warn("Categories no longer supported")
user = get_user(username, password)
# if not is_user_blog(user, blogid):
# raise Fault(PERMISSION_DENIED, 'Permission denied for %s on blogid %s' % (user, blogid))
if post.author.user != user:
raise Fault(PERMISSION_DENIED, 'Permission denied for %s on post %s' % (user, postid))
res = []
try:
p = Post.objects.get(pk=postid)
# print "Processing", p.categories.all()
counter = 0
res = []
for c in p.categories.all():
# print "Got post category:", c
primary = False
if p.primary_category_name == c:
# print "%s is the primary category" % c
primary=True
res.append(
dict(categoryName=c.title, categoryId=str(c.id), isPrimary=primary)
)
except:
import traceback
traceback.print_exc(sys.stderr)
res = None
return res
# @public
def mt_supportedTextFilters():
"""
"""
logger.debug( "Called mt_supportedTextFilters")
res = []
for key, label in FILTER_CHOICES:
# print "%s => %s" % (label, key)
res.append(dict(label=label, key=key))
return res
# @public
def mt_setPostCategories(postid, username, password, cats):
"""
mt version of setpostcats
takes a primary as argument
"""
logger.debug( "mt_setPostCategories called...")
logger.info("Submitted with %s" % cats)
user = get_user(username, password)
post = Post.objects.get(pk=postid)
if post.author.user != user:
raise Fault(PERMISSION_DENIED, 'Permission denied for %s on post %s' % (user, postid))
logger.debug("Old cats: %s" % post.categories.all())
post.categories.clear()
catlist = []
for cat in cats:
category = Category.objects.get(pk=cat['categoryId'])
logger.debug("Got %s" % category)
if 'isPrimary' in cat and cat['isPrimary']:
# if cat.has_key('isPrimary') and cat['isPrimary']:
logger.debug("Got primary category '%s'" % cat)
post.primary_category_name = category
post.categories.add(category)
logger.debug("New cats: %s" % post.categories.all())
post.save()
logger.debug(" mt_setPostCategories Done.")
return True | PypiClean |
/site24x7_openai_observability-1.0.0-py3-none-any.whl/site24x7_openai_observability/instrumentation.py | import time
import platform
from importlib import import_module
def check_module():
global apm_module_status
if apm_module_status is not None:
return apm_module_status
try:
module_status = import_module("apminsight")
if module_status is not None:
apm_module_status = True
except Exception as exc:
# print("Failed to load apminsight module",str(exc))
apm_module_status = False
return apm_module_status
apm_module_status = None
check_module()
def get_message(return_value):
if return_value :
if "text" in return_value['choices'][0]:
return return_value['choices'][0]['text']
elif "message" in return_value['choices'][0]:
return return_value['choices'][0]['message']['content']
return ""
def get_prompt(kwargs):
if "prompt" in kwargs:
return kwargs['prompt']
elif "messages" in kwargs:
return kwargs['messages'][-1]['content']
def get_system_message(kwargs):
content = ""
if "messages" in kwargs:
for messages in kwargs['messages']:
if messages.role == 'system':
content.appned(messages.content)
return content
def get_error_details(err):
if err :
try:
import openai
if isinstance(err,openai.error.OpenAIError):
return {"error":err._message, "response_code": err.http_status}
except Exception:
pass
return {"error":str(err),"response_code":500}
else:
return {"error":"-","response_code":200}
def get_openai_key():
import openai
try:
api_key = openai.api_key or openai.util.default_api_key()
if api_key :
return api_key[:3]+"..."+api_key[-4:]
except :
pass
return None
def extract_info(tracker, args=(), kwargs={}, return_value=None, error=None,starttime=None):
try:
from site24x7_openai_observability import openai_tracker, payload_print
api_info = ({'starttime':starttime, #tracker.get_start_time(),
'model': kwargs.get('model',kwargs.get('engine',"")),
'requesttime':int(round(time.time() * 1000))-starttime,#tracker.get_start_time(),
'total_token':return_value.usage.total_tokens if return_value else 0,
"prompt_token":return_value.usage.prompt_tokens if return_value else 0,
"response_token":return_value.usage.completion_tokens if return_value else 0,
'api_key':get_openai_key(),
'host':platform.node()
})
api_info.update(get_error_details(error))
if openai_tracker.record_message():
api_info.update({
'prompt': get_prompt(kwargs),
#"system_content":get_system_message(kwargs),
'message': get_message(return_value),
})
# if tracker:
# api_info.update({'spanid':tracker.get_span_id()
# #,'traceid':tracker.get_trace_id()
# })
if payload_print:
print("openai call info",api_info)
openai_tracker.increment_call()
openai_tracker.record_request(api_info)
except Exception as exc:
print("Exception in openai instumentation",str(exc))
def create_apm_tracker(module, method_info):
try:
if apm_module_status:
from apminsight import get_agent
from apminsight.context import get_cur_tracker
from apminsight.instrumentation.util import create_tracker_info
parent_tracker = get_cur_tracker()
if parent_tracker:
agent = get_agent()
tracker_info = create_tracker_info(module, method_info, parent_tracker)
return agent.check_and_create_tracker(tracker_info)
except Exception as exc:
pass
return None
def close_apm_tracker(tracker, method_info,args,kwargs,res,err,starttime):
if apm_module_status and tracker :
from apminsight.context import set_cur_tracker
from apminsight.instrumentation.wrapper import handle_tracker_end
handle_tracker_end(tracker, method_info, args, kwargs, res, err)
set_cur_tracker(tracker.get_parent())
def default_openai_wrapper(original, module, method_info):
def wrapper(*args, **kwargs):
res = None
err = None
try:
cur_tracker = create_apm_tracker(module, method_info)
starttime = int(round(time.time() * 1000))
res = original(*args, **kwargs)
except Exception as exc:
err = exc
raise exc
finally:
close_apm_tracker(cur_tracker, method_info,args,kwargs,res,err,starttime)
extract_info(cur_tracker,args,kwargs,res,err,starttime)
return res
wrapper.__name__ = original.__name__
return wrapper
def check_and_instrument(module_info):
for module_name in module_info:
try:
act_module = import_module(module_name)
if hasattr(act_module, 'apminsight_instrumented'):
return
for method_info in module_info.get(module_name):
instrument_method(module_name, act_module, method_info)
setattr(act_module, 'apminsight_instrumented', True)
except Exception:
print(module_name + " not presnt")
#agentlogger.info(each_mod +' is not present')
def instrument_method(module_name, act_module, method_info):
parent_ref = act_module
class_name = ''
if type(method_info) is not dict:
return
if "class" in method_info:
class_name = method_info.get("class")
if hasattr(act_module, class_name):
parent_ref = getattr(act_module, class_name)
module_name = module_name+'.'+class_name
method_name = method_info.get("method", '')
if hasattr(parent_ref, method_name):
original = getattr(parent_ref, method_name)
# use default wrapper if there is no wrapper attribute
wrapper_factory = method_info.get("wrapper") if "wrapper" in method_info else default_openai_wrapper
wrapper = wrapper_factory(original, module_name, method_info)
setattr(parent_ref, method_name, wrapper)
module_info = {
'openai.api_resources.completion' : [
{
"class" : 'Completion',
"method" : 'create',
"component" : "OPENAI",
"wrapper" : default_openai_wrapper,
}
],
'openai.api_resources.chat_completion' : [
{
"class" : 'ChatCompletion',
"method" : 'create',
"component" : "OPENAI",
"wrapper" : default_openai_wrapper,
}
],
} | PypiClean |
/jupyterlab_remote_contents-0.1.1.tar.gz/jupyterlab_remote_contents-0.1.1/node_modules/@typescript-eslint/eslint-plugin/dist/rules/no-for-in-array.js | "use strict";
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
if (k2 === undefined) k2 = k;
var desc = Object.getOwnPropertyDescriptor(m, k);
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
desc = { enumerable: true, get: function() { return m[k]; } };
}
Object.defineProperty(o, k2, desc);
}) : (function(o, m, k, k2) {
if (k2 === undefined) k2 = k;
o[k2] = m[k];
}));
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
Object.defineProperty(o, "default", { enumerable: true, value: v });
}) : function(o, v) {
o["default"] = v;
});
var __importStar = (this && this.__importStar) || function (mod) {
if (mod && mod.__esModule) return mod;
var result = {};
if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);
__setModuleDefault(result, mod);
return result;
};
Object.defineProperty(exports, "__esModule", { value: true });
const ts = __importStar(require("typescript"));
const util = __importStar(require("../util"));
exports.default = util.createRule({
name: 'no-for-in-array',
meta: {
docs: {
description: 'Disallow iterating over an array with a for-in loop',
recommended: 'error',
requiresTypeChecking: true,
},
messages: {
forInViolation: 'For-in loops over arrays are forbidden. Use for-of or array.forEach instead.',
},
schema: [],
type: 'problem',
},
defaultOptions: [],
create(context) {
return {
ForInStatement(node) {
const parserServices = util.getParserServices(context);
const checker = parserServices.program.getTypeChecker();
const originalNode = parserServices.esTreeNodeToTSNodeMap.get(node);
const type = util.getConstrainedTypeAtLocation(checker, originalNode.expression);
if (util.isTypeArrayTypeOrUnionOfArrayTypes(type, checker) ||
(type.flags & ts.TypeFlags.StringLike) !== 0) {
context.report({
node,
messageId: 'forInViolation',
});
}
},
};
},
});
//# sourceMappingURL=no-for-in-array.js.map | PypiClean |
/runml_checks-1.0.0-py3-none-any.whl/runml_checks/core/serialization/suite_result/json.py | """Module containing JSON serializer for the SuiteResult type."""
import typing as t
from runml_checks.core import check_result as check_types
from runml_checks.core import suite
from runml_checks.core.serialization.abc import JsonSerializer
from runml_checks.core.serialization.check_failure.json import CheckFailureSerializer
from runml_checks.core.serialization.check_result.json import CheckResultSerializer
__all__ = ['SuiteResultSerializer']
class SuiteResultSerializer(JsonSerializer['suite.SuiteResult']):
"""Serializes any SuiteResult instance into JSON format.
Parameters
----------
value : SuiteResult
SuiteResult instance that needed to be serialized.
"""
def __init__(self, value: 'suite.SuiteResult', **kwargs):
if not isinstance(value, suite.SuiteResult):
raise TypeError(
f'Expected "SuiteResult" but got "{type(value).__name__}"'
)
super().__init__(value=value)
def serialize(
self,
with_display: bool = True,
**kwargs
) -> t.Union[t.Dict[t.Any, t.Any], t.List[t.Any]]:
"""Serialize a SuiteResult instance into JSON format.
Parameters
----------
with_display : bool, default True
whether to include serialized `CheckResult.display` items into
the output or not
**kwargs :
all other key-value arguments will be passed to the CheckResult/CheckFailure
serializers
Returns
-------
Union[Dict[Any, Any], List[Any]]
"""
results = []
for it in self.value.results:
if isinstance(it, check_types.CheckResult):
results.append(CheckResultSerializer(it).serialize(with_display=with_display))
elif isinstance(it, check_types.CheckFailure):
results.append(CheckFailureSerializer(it).serialize())
else:
raise TypeError(f'Unknown result type - {type(it)}')
return {'name': self.value.name, 'results': results, 'type' : 'SuiteResult'} | PypiClean |
/cis_checks_2023_u1_3-2.1.2-py3-none-any.whl/cis_checks_2023_u1_3/utils.py | import csv
import json
import logging.config
import os
import re
import tempfile
import time
from datetime import datetime
import botocore.exceptions
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("Simple Logger")
# BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) LOG_CONF_PATH = os.path.join( BASE_DIR,'..',
# 'logging.conf') LOG_FILE_PATH = os.path.join(BASE_DIR, '..', 'logs', 'cis_automation_'+ datetime.now().strftime(
# '%Y-%m-%d_%H-%M-%S')+ '.log') logging.config.fileConfig(LOG_CONF_PATH, defaults={'logfilename': LOG_FILE_PATH})
# --- Script controls ---
# CIS Benchmark version referenced. Only used in web report.
AWS_CIS_BENCHMARK_VERSION = "1.1"
# Would you like an HTML file generated with the result?
# This file will be delivered using a signed URL.
S3_WEB_REPORT = True
# Where should the report be delivered to?
# Make sure to update permissions for the Lambda role if you change bucket name.
# S3_WEB_REPORT_BUCKET = "CHANGE_ME_TO_YOUR_S3_BUCKET"
S3_WEB_REPORT_BUCKET = "dkumar-cicd"
# Create separate report files?
# This will add date and account number than prefix. Example: cis_report_111111111111_161220_1213.html
S3_WEB_REPORT_NAME_DETAILS = True
# How many hours should the report be available? Default = 168h/7days
S3_WEB_REPORT_EXPIRE = "168"
# Set to true if you wish to anonymize the account number in the report.
# This is mostly used for demo/sharing purposes.
S3_WEB_REPORT_OBFUSCATE_ACCOUNT = False
# Would you like to send the report signedURL to an SNS topic
SEND_REPORT_URL_TO_SNS = False
SNS_TOPIC_ARN = "CHANGE_ME_TO_YOUR_TOPIC_ARN"
# Would you like to print the results as JSON to output?
SCRIPT_OUTPUT_JSON = True
# Would you like to supress all output except JSON result?
# Can be used when you want to pipe result to another system.
# If using S3 reporting, please enable SNS integration to get S3 signed URL
OUTPUT_ONLY_JSON = False
# def set_globals(self, param_IAM_CLIENT, param_S3_CLIENT):
# logger.info(" ---Inside utils :: set_globals()--- ")
# logger.info(" Setting global clients...")
# # --- Global ---
# global IAM_CLIENT
# global S3_CLIENT
# IAM_CLIENT = param_IAM_CLIENT
# S3_CLIENT = param_S3_CLIENT
class utils:
def get_cred_report(self):
logger.info(" ---Inside utils :: get_cred_report()--- ")
"""
Returns:
TYPE: Description
"""
x = 0
status = ""
# global self.session.client('iam')
cred_report_obj = {}
try:
cred_report_obj = self.session.client('iam').generate_credential_report()
logger.info(f" cred_report_obj: {cred_report_obj} ")
except botocore.exceptions.ClientError as error:
logger.error(f" Exception while generate_credential_report(): {error}")
return str(error)
try:
while cred_report_obj['State'] != "COMPLETE":
logger.info(" State of self.session.client('iam').generate_credential_report() is not complete")
time.sleep(2)
x += 1
cred_report_obj = self.session.client('iam').generate_credential_report()
# If no credentail report is delivered within this time fail the check.
if x > 50:
status = "Fail: rootUse - no CredentialReport available."
break
if "Fail" in status:
return status
except KeyError as e:
status = "Fail: rootUse - no CredentialReport available."
return status
response = self.session.client('iam').get_credential_report()
report = []
# logger.info("resp ", response['Content'])
reader = csv.DictReader(response['Content'].decode().splitlines(), delimiter=',')
# logger.info("reader ", reader)
for row in reader:
report.append(row)
# Verify if root key's never been used, if so add N/A
try:
if report[0]['access_key_1_last_used_date']:
pass
except:
report[0]['access_key_1_last_used_date'] = "N/A"
try:
if report[0]['access_key_2_last_used_date']:
pass
except:
report[0]['access_key_2_last_used_date'] = "N/A"
return report
def get_account_password_policy(self):
logger.info(" ---Inside utils :: get_account_password_policy()--- ")
"""Check if a IAM password policy exists, if not return false
Returns:
Account IAM password policy or False
"""
try:
response = self.session.client('iam').get_account_password_policy()
return response['PasswordPolicy']
except Exception as e:
if "cannot be found" in str(e):
return False
def get_regions(self):
logger.info(" ---Inside utils :: get_regions()--- ")
"""Summary
Returns:
TYPE: Description
"""
client = self.session.client('ec2', region_name='us-east-1')
region_response = {}
# try:
region_response = client.describe_regions()
# except botocore.exceptions.ClientError as error:
# if error.response['Error']['Code'] == 'AuthFailure':
# logger.error(f" AccessKey credentails not found here: {error}")
# return {
# 'Result': 'Auth Failure',
# 'failReason': 'Auth Failure',
# 'Offenders': [],
# 'ScoredControl': False,
# 'Description': 'Auth Failure',
# 'ControlId': 'Auth Failure'
# }
# except botocore.exceptions.NoCredentialsError as e:
# logger.error(f" Unable to locate credentials: {e} ")
# return {
# 'Result': 'Auth Failure',
# 'failReason': 'Auth Failure',
# 'Offenders': [],
# 'ScoredControl': False,
# 'Description': 'Auth Failure',
# 'ControlId': 'Auth Failure'
# }
logger.debug(region_response)
regions = [region['RegionName'] for region in region_response['Regions']]
return regions
def get_cloudtrails(self, regions):
logger.info(" ---Inside utils :: get_cloudtrails()--- ")
"""Summary
Returns:
TYPE: Description
"""
trails = dict()
for n in regions:
client = self.session.client('cloudtrail', region_name=n)
response = client.describe_trails()
temp = []
for m in response['trailList']:
if m['IsMultiRegionTrail'] is True:
if m['HomeRegion'] == n:
temp.append(m)
else:
temp.append(m)
if len(temp) > 0:
trails[n] = temp
return trails
def find_in_string(self, pattern, target):
logger.info(" ---Inside utils :: find_in_string()--- ")
"""Summary
Returns:
TYPE: Description
"""
result = True
for n in pattern:
if not re.search(n, target):
result = False
break
return result
def get_account_number(self):
logger.info(" ---Inside utils :: get_account_number()--- ")
"""Summary
Returns:
TYPE: Description
"""
if S3_WEB_REPORT_OBFUSCATE_ACCOUNT is False:
client = self.session.client("sts")
account = client.get_caller_identity()["Account"]
else:
account = "111111111111"
return account
def set_evaluation(self, invokeEvent, mainEvent, annotation):
logger.info(" ---Inside utils :: set_evaluation()--- ")
"""Summary
Args:
event (TYPE): Description
annotation (TYPE): Description
Returns:
TYPE: Description
"""
configClient = self.session.client('config')
if len(annotation) > 0:
configClient.put_evaluations(
Evaluations=[
{
'ComplianceResourceType': 'AWS::::Account',
'ComplianceResourceId': mainEvent['accountId'],
'ComplianceType': 'NON_COMPLIANT',
'Annotation': str(annotation),
'OrderingTimestamp': invokeEvent['notificationCreationTime']
},
],
ResultToken=mainEvent['resultToken']
)
else:
configClient.put_evaluations(
Evaluations=[
{
'ComplianceResourceType': 'AWS::::Account',
'ComplianceResourceId': mainEvent['accountId'],
'ComplianceType': 'COMPLIANT',
'OrderingTimestamp': invokeEvent['notificationCreationTime']
},
],
ResultToken=mainEvent['resultToken']
)
def json2html(self, controlResult, account):
logger.info(" ---Inside utils :: json2html()--- ")
"""Summary
Args:
controlResult (TYPE): Description
Returns:
TYPE: Description
"""
table = []
shortReport = self.shortAnnotation(controlResult)
table.append(
"<html>\n<head>\n<style>\n\n.table-outer {\n background-color: #eaeaea;\n border: 3px solid "
"darkgrey;\n}\n\n.table-inner {\n background-color: white;\n border: 3px solid "
"darkgrey;\n}\n\n.table-hover tr{\nbackground: transparent;\n}\n\n.table-hover tr:hover {"
"\nbackground-color: lightgrey;\n}\n\ntable, tr, td, th{\n line-height: 1.42857143;\n "
"vertical-align: top;\n border: 1px solid darkgrey;\n border-spacing: 0;\n border-collapse: "
"collapse;\n width: auto;\n max-width: auto;\n background-color: transparent;\n padding: "
"5px;\n}\n\ntable th {\n padding-right: 20px;\n text-align: left;\n}\n\ntd {\n "
"width:100%;\n}\n\ndiv.centered\n{\n position: absolute;\n width: auto;\n height: auto;\n z-index: "
"15;\n top: 10%;\n left: 20%;\n right: 20%;\n background: white;\n}\n\ndiv.centered table\n{\n "
"margin: auto;\n text-align: left;\n}\n</style>\n</head>\n<body>\n<h1 style=\"text-align: "
"center;\">AWS CIS Foundation Framework</h1>\n<div class=\"centered\">")
table.append("<table class=\"table table-inner\">")
table.append("<tr><td>Account: " + account + "</td></tr>")
table.append("<tr><td>Report date: " + time.strftime("%c") + "</td></tr>")
table.append("<tr><td>Benchmark version: " + AWS_CIS_BENCHMARK_VERSION + "</td></tr>")
table.append(
"<tr><td>Whitepaper location: <a href=\"https://d0.awsstatic.com/whitepapers/compliance"
"/AWS_CIS_Foundations_Benchmark.pdf\" "
"target=\"_blank\">https://d0.awsstatic.com/whitepapers/compliance/AWS_CIS_Foundations_Benchmark.pdf</a"
"></td></tr>")
table.append("<tr><td>" + shortReport + "</td></tr></table><br><br>")
tableHeadOuter = "<table class=\"table table-outer\">"
tableHeadInner = "<table class=\"table table-inner\">"
tableHeadHover = "<table class=\"table table-hover\">"
table.append(tableHeadOuter) # Outer table
for m, _ in enumerate(controlResult):
table.append("<tr><th>" + controlResult[m][0]['ControlId'].split('.')[0] + "</th><td>" + tableHeadInner)
for n in range(len(controlResult[m])):
if str(controlResult[m][n]['Result']) == "False":
resultStyle = " style=\"background-color:#ef3d47;\""
elif str(controlResult[m][n]['Result']) == "Manual":
resultStyle = " style=\"background-color:#ffff99;\""
else:
resultStyle = " style=\"background-color:lightgreen;\""
table.append("<tr><th" + resultStyle + ">" + controlResult[m][n]['ControlId'].split('.')[
1] + "</th><td>" + tableHeadHover)
table.append("<tr><th>ControlId</th><td>" + controlResult[m][n]['ControlId'] + "</td></tr>")
table.append("<tr><th>Description</th><td>" + controlResult[m][n]['Description'] + "</td></tr>")
table.append("<tr><th>failReason</th><td>" + controlResult[m][n]['failReason'] + "</td></tr>")
table.append("<tr><th>Offenders</th><td><ul>" + str(controlResult[m][n]['Offenders']).replace("', ",
"',<br>")
+ "</ul></td></tr>")
table.append("<tr><th>Result</th><td>" + str(controlResult[m][n]['Result']) + "</td></tr>")
table.append(
"<tr><th>ScoredControl</th><td>" + str(controlResult[m][n]['ScoredControl']) + "</td></tr>")
table.append("</table></td></tr>")
table.append("</table></td></tr>")
table.append("</table>")
table.append("</div>\n</body>\n</html>")
return table
def s3report(self, htmlReport, account):
logger.info(" ---Inside utils :: s3report()--- ")
"""Summary
Args:
htmlReport (TYPE): Description
Returns:
TYPE: Description
"""
if S3_WEB_REPORT_NAME_DETAILS is True:
reportName = "cis_report_" + str(account) + "_" + str(datetime.now().strftime('%Y%m%d_%H%M')) + ".html"
else:
reportName = "cis_report.html"
ACCOUNT_NUM = str(account)
USER_DATA_DIR = os.path.join('data', ACCOUNT_NUM)
html_path = os.path.join(USER_DATA_DIR, reportName)
if not os.path.exists(USER_DATA_DIR):
os.makedirs(USER_DATA_DIR)
logger.info(" Directory '%s' created" % USER_DATA_DIR)
logger.info(" Creating HTML report file...")
with tempfile.NamedTemporaryFile(delete=False) as f, open(html_path, 'a+b') as fp:
for item in htmlReport:
f.write(item.encode())
fp.write(item.encode())
f.flush()
fp.flush()
try:
f.close()
fp.close()
self.session.client('s3').upload_file(f.name, S3_WEB_REPORT_BUCKET, reportName)
# html_path = os.path.join('data', reportName)
# os.rename(tempfile,html_path)
os.unlink(f.name)
except Exception as e:
return "Failed to upload report to S3 because: " + str(e)
ttl = int(S3_WEB_REPORT_EXPIRE) * 60
signedURL = self.session.client('s3').generate_presigned_url(
'get_object',
Params={
'Bucket': S3_WEB_REPORT_BUCKET,
'Key': reportName
},
ExpiresIn=ttl)
return signedURL
def json_output(self, controlResult, account):
logger.info(" ---Inside utils :: json_output()--- ")
"""Summary
Args:
controlResult (TYPE): Description
Returns:
TYPE: Description
"""
inner = dict()
outer = dict()
for m in range(len(controlResult)):
inner = dict()
for n in range(len(controlResult[m])):
x = int(controlResult[m][n]['ControlId'].split('.')[1])
inner[x] = controlResult[m][n]
y = controlResult[m][0]['ControlId'].split('.')[0]
outer[y] = inner
if OUTPUT_ONLY_JSON is True:
logger.debug(json.dumps(outer, sort_keys=True, indent=4, separators=(',', ': ')))
else:
logger.debug("JSON output:")
logger.debug("---------")
logger.debug(json.dumps(outer, sort_keys=True, indent=4, separators=(',', ': ')))
logger.debug("---------")
logger.debug("\n")
logger.debug("Summary:")
logger.debug(self.shortAnnotation(controlResult))
logger.debug("\n")
ACCOUNT_NUM = str(account)
USER_DATA_DIR = os.path.join('data', ACCOUNT_NUM)
json_path = os.path.join(USER_DATA_DIR, "aws_cis_json_" + ACCOUNT_NUM + ".json")
if not os.path.exists(USER_DATA_DIR):
os.makedirs(USER_DATA_DIR)
logger.info(" Directory '%s' created" % USER_DATA_DIR)
logger.info(" Creating json report file...")
try:
with open(json_path, "w") as fp:
json.dump(outer, fp, sort_keys=True, indent=4, separators=(',', ': '))
except Exception as e:
logger.error(f" Error while writing to JSON file: {e} ")
exit(1)
return 0
def shortAnnotation(self, controlResult):
logger.info(" ---Inside utils :: shortAnnotation()--- ")
"""Summary
Args:
controlResult (TYPE): Description
Returns:
TYPE: Description
"""
annotation = []
longAnnotation = False
for m, _ in enumerate(controlResult):
for n in range(len(controlResult[m])):
if controlResult[m][n]['Result'] is False:
if len(str(annotation)) < 220:
annotation.append(controlResult[m][n]['ControlId'])
else:
longAnnotation = True
if longAnnotation:
annotation.append("etc")
return "{\"Failed\":" + json.dumps(annotation) + "}"
else:
return "{\"Failed\":" + json.dumps(annotation) + "}"
def send_results_to_sns(self, url):
logger.info(" ---Inside utils :: send_results_to_sns()--- ")
"""Summary
Args:
url (TYPE): SignedURL created by the S3 upload function
Returns:
TYPE: Description
"""
# Get correct region for the TopicARN
region = (SNS_TOPIC_ARN.split("sns:", 1)[1]).split(":", 1)[0]
client = self.session.client('sns', region_name=region)
client.publish(
TopicArn=SNS_TOPIC_ARN,
Subject="AWS CIS Benchmark report - " + str(time.strftime("%c")),
Message=json.dumps({'default': url}),
MessageStructure='json'
)
# returns the list of redShift clusters
def list_redshift_clusters(self, region: str) -> list:
logger.info(" ---Inside utils :: list_redshift_clusters()---")
"""Summary
Returns:
TYPE: list
"""
redshift_clusters = []
client = self.session.client('redshift', region_name=region)
marker = ''
while True:
if marker == '' or marker is None:
response = client.describe_clusters()
else:
response = client.describe_clusters(
Marker=marker
)
for cluster in response['Clusters']:
redshift_clusters.append(cluster['ClusterIdentifier'])
try:
marker = response['Marker']
if marker == '':
break
except:
break
return redshift_clusters
# returns the list of elastic load balancers
def list_elb(self, region: str) -> list:
logger.info(" ---Inside utils :: list_elb()---")
"""Summary
Returns:
TYPE: list
"""
elb_lst = []
client = self.session.client('elb', region_name=region)
marker = ''
while True:
if marker == '' or marker is None:
response = client.describe_load_balancers()
else:
response = client.describe_load_balancers(
Marker=marker
)
for lb in response['LoadBalancerDescriptions']:
elb_lst.append(lb['LoadBalancerName'])
try:
marker = response['Marker']
if marker == '':
break
except:
break
return elb_lst
# list s3 buckets
def list_s3_buckets(self) -> list:
"""
:return:
"""
logger.info(" ---Inside utils :: list_s3_buckets")
buckets = []
client = self.session.client('s3')
response = client.list_buckets()
return response['Buckets']
# list rds instances
def list_rds_instances(self, regions) -> dict:
"""
:param regions:
:return:
"""
logger.info(" ---Inside utils :: list_rds_instances()--- ")
rds_instance_lst = {}
for region in regions:
client = self.session.client('rds', region_name=region)
marker = ''
while True:
response = client.describe_db_instances(
MaxRecords=100,
Marker=marker
)
rds_instance_lst.setdefault(region, []).extend(response['DBInstances'])
try:
marker = response['Marker']
if marker == '':
break
except KeyError:
break
return rds_instance_lst | PypiClean |
/squirrel_datasets_core-0.3.1-py3-none-any.whl/squirrel_datasets_core/datasets/adult_dataset/driver.py | from __future__ import annotations
import os
from typing import TYPE_CHECKING
import pandas as pd
from squirrel.driver import IterDriver
from squirrel.iterstream import IterableSource
from squirrel_datasets_core.datasets.utils import proportionate_sample_df
if TYPE_CHECKING:
from squirrel.iterstream import Composable
META_DATA = dict(
filename="adult.csv",
url="https://datahub.io/machine-learning/adult/r",
)
_FEATURE_NAMES = [
"age",
"workclass",
"fnlwgt",
"education",
"education-num",
"marital-status",
"occupation",
"relationship",
"race",
"sex",
"capitalgain",
"capitalloss",
"hoursperweek",
"native-country",
"class",
]
class AdultIncome(IterDriver):
name = "adult_income"
def __init__(self, **kwargs) -> None:
"""Initialze the California housing dataset driver."""
super().__init__(**kwargs)
self._train_df, self._test_df = self._init()
self._train_df = self._train_df.to_dict(orient="records")
self._test_df = self._test_df.to_dict(orient="records")
def _init(self) -> pd.DataFrame:
"""Uses a proportionate sampling strategy to split the dataset into train and test folds."""
df = pd.read_csv(
os.path.join(META_DATA["url"], META_DATA["filename"]),
index_col=None,
)
df = df.fillna("NAN")
train_df, test_df = proportionate_sample_df(df, "class", 0.2, seed=42)
return train_df, test_df
def get_iter(self, split: str = "train", shuffle_item_buffer: int = 100, **kwargs) -> Composable:
"""
Get an iterator over samples.
Args:
split (str): can be `train` or `test`.
shuffle_item_buffer (int): the size of the buffer used to shuffle samples after being fetched. Please note
the memory footprint of samples
"""
assert split in ["train", "test"]
if split == "train":
return IterableSource(self._train_df).shuffle(size=shuffle_item_buffer)
else:
return IterableSource(self._test_df).shuffle(size=shuffle_item_buffer) | PypiClean |
/aiolirc-0.1.2.tar.gz/aiolirc-0.1.2/README.rst |
aiolirc
=======
.. image:: http://img.shields.io/pypi/v/aiolirc.svg
:target: https://pypi.python.org/pypi/aiolirc
.. image:: https://img.shields.io/badge/license-GPLv3-brightgreen.svg
:target: https://github.com/pylover/aiolirc/blob/master/LICENSE
Jump To
-------
* `Documentation <http://aiolirc.dobisel.com>`_
* `Python package index <https://pypi.python.org/pypi/aiolirc>`_
* `Source on github <https://github.com/pylover/aiolirc>`_
* `Downloads <https://pypi.python.org/pypi/aiolirc#downloads>`_
About
-----
Asynchronous messaging using python's new facility(async-await syntax), introduced in version 3.5 is so fun!
So, I decided to provide an asynchronous context manager and iterator wrapper for
`Linux Infra-Red Remote Control(LIRC) <http://www.lirc.org/>`_.
Happily, the Cython is working well with asyncio. So the `lirc_client` C extension has been made by cython's extenstion
type.
In addition, an `IRCDispatcher` type and a `listen_for` decorator have been provided.
Install
-------
::
$ apt-get install liblircclient-dev python3.5-dev build-essential
$ pip install cython
$ pip install aiolirc
Quick Start
-----------
The simplest way to use this library is the famous `very_quickstart` function as follows::
from aiolirc import very_quickstart, listen_for
@listen_for('play')
async def do_play(loop):
...
# Do play stuff
very_quickstart('my-prog') # my-prog is configured in your lircrc file.
Another coroutine function named `quickstart` is also available.This lets you have control over the event loop
life-cycle::
import asyncio
from aiolirc import quickstart
main_loop = asyncio.get_event_loop()
try:
main_loop.run_until_complete(quickstart(loop=main_loop))
except KeyboardInterrupt:
print('CTRL+C detected. terminating...')
return 1
finally:
if not main_loop.is_closed():
main_loop.close()
The `IRCDispatcher`
-------------------
Constructor
^^^^^^^^^^^
::
def __init__(self, source: LIRCClient, loop: asyncio.BaseEventLoop=None):
Example of usage
^^^^^^^^^^^^^^^^
::
import asyncio
from aiolirc.lirc_client import LIRCClient
from aiolirc.dispatcher import IRCDispatcher, listen_for
@listen_for('amp power', repeat=5)
async def amp_power(loop):
...
# Do your stuff
@listen_for('amp source')
async def amp_source(loop):
...
# Do your stuff
async with LIRCClient('my-prog') as client:
dispatcher = IRCDispatcher(client)
await dispatcher.listen()
The `LIRCClient`
----------------
Constructor
^^^^^^^^^^^
::
def __cinit__(self, lircrc_prog, *, lircrc_file='~/.config/lircrc', loop=None, check_interval=.05, verbose=False,
blocking=False):
To advance control over the messages received from lirc, asychronously iter over an instance of the `LIRCClient` after
calling `LIRCClient.lirc_init()`. And make sure the `LIRCClient.lirc_deinit()` has been called after finishing your work
with `LIRCClient`::
from aiolirc.lirc_client import LIRCClient
client = LIRCClient('my-prog')
try:
client.lirc_init()
async for cmd in client:
print(cmd)
finally:
client.lirc_deinit()
You may use the `LIRCClient` as an asynchronous context manager as described as follows, to automatically call the
`LIRCClient.lirc_init()` and `LIRCClient.lirc_deinit()` functions, and also acquiring a lock to prevent multiple
instances of the `LIRCClient` from reading messages from lirc_client wrapper::
from aiolirc.lirc_client import LIRCClient
async with LIRCClient('my-prog') as client:
async for cmd in client:
print(cmd)
Systemd
-------
Create a main.py::
import sys
import asyncio
from aiolirc import IRCDispatcher, LIRCClient
async def launch(self) -> int:
async with LIRCClient('my-prog', lircrc_file='path/to/lircrc', check_interval=.06) as client:
dispatcher = IRCDispatcher(client)
result = (await asyncio.gather(dispatcher.listen(), return_exceptions=True))[0]
if isinstance(result, Exception):
raise result
return 0
def main(self):
main_loop = asyncio.get_event_loop()
try:
return main_loop.run_until_complete(launch())
except KeyboardInterrupt:
print('CTRL+C detected.')
return 1
finally:
if not main_loop.is_closed():
main_loop.close()
if __name__ == '__main__':
sys.exit(main())
`/etc/systemd/system/aiolirc.service` file::
[Unit]
Description=aiolirc
[Service]
ExecStart=python3.5 /path/to/main.py
User=user
Group=group
[Install]
WantedBy=multi-user.target
systemctl::
$ systemctl enable aiolirc
$ systemctl start aiolirc
$ systemctl restart aiolirc
$ ps -Af | grep 'main.py'
$ systemctl stop aiolirc
Change Log
----------
**0.1.0**
- README.rst
| PypiClean |
/dnv_bladed_models-0.3.44.tar.gz/dnv_bladed_models-0.3.44/src/dnv_bladed_models/midpoint_method_fixed_step.py |
from __future__ import annotations
from datetime import date, datetime # noqa: F401
from enum import Enum, IntEnum
import re # noqa: F401
from typing import Any, Dict, List, Optional, Type, Union, Callable # noqa: F401
from pathlib import Path
from typing import TypeVar
Model = TypeVar('Model', bound='BaseModel')
StrBytes = Union[str, bytes]
from pydantic import AnyUrl, BaseModel, EmailStr, Field, validator, root_validator, Extra # noqa: F401
from dnv_bladed_models.fixed_step_integrator import FixedStepIntegrator
class MidpointMethodFixedStep(FixedStepIntegrator, IntegratorType='MidpointMethodFixedStep'):
"""MidpointMethodFixedStep - Settings for the Midpoint Method Fixed Step integrator.
Attributes:
----------
IntegratorType : str, readonly, default='MidpointMethodFixedStep'
Allows the schema to identify the type of the object. For this type of object, this must always be set to 'MidpointMethodFixedStep'
"""
IntegratorType: Optional[str] = Field(alias="IntegratorType", default='MidpointMethodFixedStep', allow_mutation=False)
class Config:
extra = Extra.forbid
validate_assignment = True
allow_population_by_field_name = True
pass
@root_validator(pre=True)
def _parsing_ignores_underscore_properties(cls, values: dict[str, any]):
allowed_vals = {}
for key, val in values.items():
if not key.startswith('_'):
if isinstance(val, dict):
allowed_child_vals = {}
for child_key, child_val in val.items():
if not child_key.startswith('_'):
allowed_child_vals[child_key] = child_val
allowed_vals[key] = allowed_child_vals
else:
allowed_vals[key] = val
return allowed_vals
def to_json(
self,
*,
include: Optional[Union['AbstractSetIntStr', 'MappingIntStrAny']] = None,
exclude: Optional[Union['AbstractSetIntStr', 'MappingIntStrAny']] = None,
by_alias: bool = True,
skip_defaults: Optional[bool] = None,
exclude_unset: bool = False,
exclude_defaults: bool = False,
exclude_none: bool = True,
encoder: Optional[Callable[[Any], Any]] = None,
models_as_dict: bool = True,
**dumps_kwargs: Any) -> str:
r"""
Generates a JSON string representation of the model.
Notes
-----
`include` and `exclude` arguments as per `dict()`.
`encoder` is an optional function to supply as `default` to json.dumps(), other arguments as per `json.dumps()`.
Examples
--------
>>> model.to_json()
Renders the full JSON representation of the model object.
"""
if dumps_kwargs.get('indent') is None:
dumps_kwargs.update(indent=2)
return super().json(
include=include,
exclude=exclude,
by_alias=by_alias,
skip_defaults=skip_defaults,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
encoder=encoder,
models_as_dict=models_as_dict,
**dumps_kwargs)
@classmethod
def from_file(
cls: Type['Model'],
path: Union[str, Path]) -> 'Model':
r"""
Loads a model from a given file path.
Parameters
----------
path : string
The file path to the model.
Returns
-------
MidpointMethodFixedStep
The model object.
Raises
------
ValueError, ValidationError
If the JSON document does not correctly describe the model according to the model schema.
Examples
--------
>>> model = MidpointMethodFixedStep.from_file('/path/to/file')
"""
return super().parse_file(path=path)
@classmethod
def from_json(
cls: Type['Model'],
b: StrBytes) -> 'Model':
r"""
Creates a model object from a JSON string.
Parameters
----------
b: StrBytes
The JSON string describing the model.
Returns
-------
MidpointMethodFixedStep
The model object.
Raises
------
ValueError, ValidationError
If the JSON document does not correctly describe the model according to the model schema.
Examples
--------
>>> model = MidpointMethodFixedStep.from_json('{ ... }')
"""
return super().parse_raw(
b=b,
content_type='application/json')
@classmethod
def from_dict(
cls: Type['Model'],
obj: Any) -> 'Model':
r"""
Creates a model object from a dict.
Parameters
----------
obj : Any
The dictionary object describing the model.
Returns
-------
MidpointMethodFixedStep
The model object.
Raises
------
ValueError, ValidationError
If the JSON document does not correctly describe the model according to the model schema.
"""
return super().parse_obj(obj=obj)
def to_file(
self,
path: Union[str, Path]):
r"""
Writes the model as a JSON document to a file with UTF8 encoding.
Parameters
----------
path : string
The file path to which the model will be written.
Examples
--------
>>> model.to_file('/path/to/file')
"""
with open(file=path, mode='w', encoding="utf8") as output_file:
output_file.write(self.to_json())
MidpointMethodFixedStep.update_forward_refs() | PypiClean |
/columbia-discord-bot-0.2.1.tar.gz/columbia-discord-bot-0.2.1/docs/_build/html/_static/aiohttp/web.py | import asyncio
import logging
import socket
import sys
from argparse import ArgumentParser
from collections.abc import Iterable
from importlib import import_module
from typing import (
Any,
Awaitable,
Callable,
Iterable as TypingIterable,
List,
Optional,
Set,
Type,
Union,
cast,
)
from .abc import AbstractAccessLogger
from .helpers import all_tasks
from .log import access_logger
from .web_app import Application as Application, CleanupError as CleanupError
from .web_exceptions import (
HTTPAccepted as HTTPAccepted,
HTTPBadGateway as HTTPBadGateway,
HTTPBadRequest as HTTPBadRequest,
HTTPClientError as HTTPClientError,
HTTPConflict as HTTPConflict,
HTTPCreated as HTTPCreated,
HTTPError as HTTPError,
HTTPException as HTTPException,
HTTPExpectationFailed as HTTPExpectationFailed,
HTTPFailedDependency as HTTPFailedDependency,
HTTPForbidden as HTTPForbidden,
HTTPFound as HTTPFound,
HTTPGatewayTimeout as HTTPGatewayTimeout,
HTTPGone as HTTPGone,
HTTPInsufficientStorage as HTTPInsufficientStorage,
HTTPInternalServerError as HTTPInternalServerError,
HTTPLengthRequired as HTTPLengthRequired,
HTTPMethodNotAllowed as HTTPMethodNotAllowed,
HTTPMisdirectedRequest as HTTPMisdirectedRequest,
HTTPMovedPermanently as HTTPMovedPermanently,
HTTPMultipleChoices as HTTPMultipleChoices,
HTTPNetworkAuthenticationRequired as HTTPNetworkAuthenticationRequired,
HTTPNoContent as HTTPNoContent,
HTTPNonAuthoritativeInformation as HTTPNonAuthoritativeInformation,
HTTPNotAcceptable as HTTPNotAcceptable,
HTTPNotExtended as HTTPNotExtended,
HTTPNotFound as HTTPNotFound,
HTTPNotImplemented as HTTPNotImplemented,
HTTPNotModified as HTTPNotModified,
HTTPOk as HTTPOk,
HTTPPartialContent as HTTPPartialContent,
HTTPPaymentRequired as HTTPPaymentRequired,
HTTPPermanentRedirect as HTTPPermanentRedirect,
HTTPPreconditionFailed as HTTPPreconditionFailed,
HTTPPreconditionRequired as HTTPPreconditionRequired,
HTTPProxyAuthenticationRequired as HTTPProxyAuthenticationRequired,
HTTPRedirection as HTTPRedirection,
HTTPRequestEntityTooLarge as HTTPRequestEntityTooLarge,
HTTPRequestHeaderFieldsTooLarge as HTTPRequestHeaderFieldsTooLarge,
HTTPRequestRangeNotSatisfiable as HTTPRequestRangeNotSatisfiable,
HTTPRequestTimeout as HTTPRequestTimeout,
HTTPRequestURITooLong as HTTPRequestURITooLong,
HTTPResetContent as HTTPResetContent,
HTTPSeeOther as HTTPSeeOther,
HTTPServerError as HTTPServerError,
HTTPServiceUnavailable as HTTPServiceUnavailable,
HTTPSuccessful as HTTPSuccessful,
HTTPTemporaryRedirect as HTTPTemporaryRedirect,
HTTPTooManyRequests as HTTPTooManyRequests,
HTTPUnauthorized as HTTPUnauthorized,
HTTPUnavailableForLegalReasons as HTTPUnavailableForLegalReasons,
HTTPUnprocessableEntity as HTTPUnprocessableEntity,
HTTPUnsupportedMediaType as HTTPUnsupportedMediaType,
HTTPUpgradeRequired as HTTPUpgradeRequired,
HTTPUseProxy as HTTPUseProxy,
HTTPVariantAlsoNegotiates as HTTPVariantAlsoNegotiates,
HTTPVersionNotSupported as HTTPVersionNotSupported,
)
from .web_fileresponse import FileResponse as FileResponse
from .web_log import AccessLogger
from .web_middlewares import (
middleware as middleware,
normalize_path_middleware as normalize_path_middleware,
)
from .web_protocol import (
PayloadAccessError as PayloadAccessError,
RequestHandler as RequestHandler,
RequestPayloadError as RequestPayloadError,
)
from .web_request import (
BaseRequest as BaseRequest,
FileField as FileField,
Request as Request,
)
from .web_response import (
ContentCoding as ContentCoding,
Response as Response,
StreamResponse as StreamResponse,
json_response as json_response,
)
from .web_routedef import (
AbstractRouteDef as AbstractRouteDef,
RouteDef as RouteDef,
RouteTableDef as RouteTableDef,
StaticDef as StaticDef,
delete as delete,
get as get,
head as head,
options as options,
patch as patch,
post as post,
put as put,
route as route,
static as static,
view as view,
)
from .web_runner import (
AppRunner as AppRunner,
BaseRunner as BaseRunner,
BaseSite as BaseSite,
GracefulExit as GracefulExit,
NamedPipeSite as NamedPipeSite,
ServerRunner as ServerRunner,
SockSite as SockSite,
TCPSite as TCPSite,
UnixSite as UnixSite,
)
from .web_server import Server as Server
from .web_urldispatcher import (
AbstractResource as AbstractResource,
AbstractRoute as AbstractRoute,
DynamicResource as DynamicResource,
PlainResource as PlainResource,
PrefixedSubAppResource as PrefixedSubAppResource,
Resource as Resource,
ResourceRoute as ResourceRoute,
StaticResource as StaticResource,
UrlDispatcher as UrlDispatcher,
UrlMappingMatchInfo as UrlMappingMatchInfo,
View as View,
)
from .web_ws import (
WebSocketReady as WebSocketReady,
WebSocketResponse as WebSocketResponse,
WSMsgType as WSMsgType,
)
__all__ = (
# web_app
"Application",
"CleanupError",
# web_exceptions
"HTTPAccepted",
"HTTPBadGateway",
"HTTPBadRequest",
"HTTPClientError",
"HTTPConflict",
"HTTPCreated",
"HTTPError",
"HTTPException",
"HTTPExpectationFailed",
"HTTPFailedDependency",
"HTTPForbidden",
"HTTPFound",
"HTTPGatewayTimeout",
"HTTPGone",
"HTTPInsufficientStorage",
"HTTPInternalServerError",
"HTTPLengthRequired",
"HTTPMethodNotAllowed",
"HTTPMisdirectedRequest",
"HTTPMovedPermanently",
"HTTPMultipleChoices",
"HTTPNetworkAuthenticationRequired",
"HTTPNoContent",
"HTTPNonAuthoritativeInformation",
"HTTPNotAcceptable",
"HTTPNotExtended",
"HTTPNotFound",
"HTTPNotImplemented",
"HTTPNotModified",
"HTTPOk",
"HTTPPartialContent",
"HTTPPaymentRequired",
"HTTPPermanentRedirect",
"HTTPPreconditionFailed",
"HTTPPreconditionRequired",
"HTTPProxyAuthenticationRequired",
"HTTPRedirection",
"HTTPRequestEntityTooLarge",
"HTTPRequestHeaderFieldsTooLarge",
"HTTPRequestRangeNotSatisfiable",
"HTTPRequestTimeout",
"HTTPRequestURITooLong",
"HTTPResetContent",
"HTTPSeeOther",
"HTTPServerError",
"HTTPServiceUnavailable",
"HTTPSuccessful",
"HTTPTemporaryRedirect",
"HTTPTooManyRequests",
"HTTPUnauthorized",
"HTTPUnavailableForLegalReasons",
"HTTPUnprocessableEntity",
"HTTPUnsupportedMediaType",
"HTTPUpgradeRequired",
"HTTPUseProxy",
"HTTPVariantAlsoNegotiates",
"HTTPVersionNotSupported",
# web_fileresponse
"FileResponse",
# web_middlewares
"middleware",
"normalize_path_middleware",
# web_protocol
"PayloadAccessError",
"RequestHandler",
"RequestPayloadError",
# web_request
"BaseRequest",
"FileField",
"Request",
# web_response
"ContentCoding",
"Response",
"StreamResponse",
"json_response",
# web_routedef
"AbstractRouteDef",
"RouteDef",
"RouteTableDef",
"StaticDef",
"delete",
"get",
"head",
"options",
"patch",
"post",
"put",
"route",
"static",
"view",
# web_runner
"AppRunner",
"BaseRunner",
"BaseSite",
"GracefulExit",
"ServerRunner",
"SockSite",
"TCPSite",
"UnixSite",
"NamedPipeSite",
# web_server
"Server",
# web_urldispatcher
"AbstractResource",
"AbstractRoute",
"DynamicResource",
"PlainResource",
"PrefixedSubAppResource",
"Resource",
"ResourceRoute",
"StaticResource",
"UrlDispatcher",
"UrlMappingMatchInfo",
"View",
# web_ws
"WebSocketReady",
"WebSocketResponse",
"WSMsgType",
# web
"run_app",
)
try:
from ssl import SSLContext
except ImportError: # pragma: no cover
SSLContext = Any # type: ignore[misc,assignment]
HostSequence = TypingIterable[str]
async def _run_app(
app: Union[Application, Awaitable[Application]],
*,
host: Optional[Union[str, HostSequence]] = None,
port: Optional[int] = None,
path: Optional[str] = None,
sock: Optional[Union[socket.socket, TypingIterable[socket.socket]]] = None,
shutdown_timeout: float = 60.0,
keepalive_timeout: float = 75.0,
ssl_context: Optional[SSLContext] = None,
print: Callable[..., None] = print,
backlog: int = 128,
access_log_class: Type[AbstractAccessLogger] = AccessLogger,
access_log_format: str = AccessLogger.LOG_FORMAT,
access_log: Optional[logging.Logger] = access_logger,
handle_signals: bool = True,
reuse_address: Optional[bool] = None,
reuse_port: Optional[bool] = None,
) -> None:
# A internal functio to actually do all dirty job for application running
if asyncio.iscoroutine(app):
app = await app # type: ignore[misc]
app = cast(Application, app)
runner = AppRunner(
app,
handle_signals=handle_signals,
access_log_class=access_log_class,
access_log_format=access_log_format,
access_log=access_log,
keepalive_timeout=keepalive_timeout,
)
await runner.setup()
sites: List[BaseSite] = []
try:
if host is not None:
if isinstance(host, (str, bytes, bytearray, memoryview)):
sites.append(
TCPSite(
runner,
host,
port,
shutdown_timeout=shutdown_timeout,
ssl_context=ssl_context,
backlog=backlog,
reuse_address=reuse_address,
reuse_port=reuse_port,
)
)
else:
for h in host:
sites.append(
TCPSite(
runner,
h,
port,
shutdown_timeout=shutdown_timeout,
ssl_context=ssl_context,
backlog=backlog,
reuse_address=reuse_address,
reuse_port=reuse_port,
)
)
elif path is None and sock is None or port is not None:
sites.append(
TCPSite(
runner,
port=port,
shutdown_timeout=shutdown_timeout,
ssl_context=ssl_context,
backlog=backlog,
reuse_address=reuse_address,
reuse_port=reuse_port,
)
)
if path is not None:
if isinstance(path, (str, bytes, bytearray, memoryview)):
sites.append(
UnixSite(
runner,
path,
shutdown_timeout=shutdown_timeout,
ssl_context=ssl_context,
backlog=backlog,
)
)
else:
for p in path:
sites.append(
UnixSite(
runner,
p,
shutdown_timeout=shutdown_timeout,
ssl_context=ssl_context,
backlog=backlog,
)
)
if sock is not None:
if not isinstance(sock, Iterable):
sites.append(
SockSite(
runner,
sock,
shutdown_timeout=shutdown_timeout,
ssl_context=ssl_context,
backlog=backlog,
)
)
else:
for s in sock:
sites.append(
SockSite(
runner,
s,
shutdown_timeout=shutdown_timeout,
ssl_context=ssl_context,
backlog=backlog,
)
)
for site in sites:
await site.start()
if print: # pragma: no branch
names = sorted(str(s.name) for s in runner.sites)
print(
"======== Running on {} ========\n"
"(Press CTRL+C to quit)".format(", ".join(names))
)
# sleep forever by 1 hour intervals,
# on Windows before Python 3.8 wake up every 1 second to handle
# Ctrl+C smoothly
if sys.platform == "win32" and sys.version_info < (3, 8):
delay = 1
else:
delay = 3600
while True:
await asyncio.sleep(delay)
finally:
await runner.cleanup()
def _cancel_tasks(
to_cancel: Set["asyncio.Task[Any]"], loop: asyncio.AbstractEventLoop
) -> None:
if not to_cancel:
return
for task in to_cancel:
task.cancel()
loop.run_until_complete(asyncio.gather(*to_cancel, return_exceptions=True))
for task in to_cancel:
if task.cancelled():
continue
if task.exception() is not None:
loop.call_exception_handler(
{
"message": "unhandled exception during asyncio.run() shutdown",
"exception": task.exception(),
"task": task,
}
)
def run_app(
app: Union[Application, Awaitable[Application]],
*,
host: Optional[Union[str, HostSequence]] = None,
port: Optional[int] = None,
path: Optional[str] = None,
sock: Optional[Union[socket.socket, TypingIterable[socket.socket]]] = None,
shutdown_timeout: float = 60.0,
keepalive_timeout: float = 75.0,
ssl_context: Optional[SSLContext] = None,
print: Callable[..., None] = print,
backlog: int = 128,
access_log_class: Type[AbstractAccessLogger] = AccessLogger,
access_log_format: str = AccessLogger.LOG_FORMAT,
access_log: Optional[logging.Logger] = access_logger,
handle_signals: bool = True,
reuse_address: Optional[bool] = None,
reuse_port: Optional[bool] = None,
loop: Optional[asyncio.AbstractEventLoop] = None,
) -> None:
"""Run an app locally"""
if loop is None:
loop = asyncio.new_event_loop()
# Configure if and only if in debugging mode and using the default logger
if loop.get_debug() and access_log and access_log.name == "aiohttp.access":
if access_log.level == logging.NOTSET:
access_log.setLevel(logging.DEBUG)
if not access_log.hasHandlers():
access_log.addHandler(logging.StreamHandler())
main_task = loop.create_task(
_run_app(
app,
host=host,
port=port,
path=path,
sock=sock,
shutdown_timeout=shutdown_timeout,
keepalive_timeout=keepalive_timeout,
ssl_context=ssl_context,
print=print,
backlog=backlog,
access_log_class=access_log_class,
access_log_format=access_log_format,
access_log=access_log,
handle_signals=handle_signals,
reuse_address=reuse_address,
reuse_port=reuse_port,
)
)
try:
asyncio.set_event_loop(loop)
loop.run_until_complete(main_task)
except (GracefulExit, KeyboardInterrupt): # pragma: no cover
pass
finally:
_cancel_tasks({main_task}, loop)
_cancel_tasks(all_tasks(loop), loop)
loop.run_until_complete(loop.shutdown_asyncgens())
loop.close()
def main(argv: List[str]) -> None:
arg_parser = ArgumentParser(
description="aiohttp.web Application server", prog="aiohttp.web"
)
arg_parser.add_argument(
"entry_func",
help=(
"Callable returning the `aiohttp.web.Application` instance to "
"run. Should be specified in the 'module:function' syntax."
),
metavar="entry-func",
)
arg_parser.add_argument(
"-H",
"--hostname",
help="TCP/IP hostname to serve on (default: %(default)r)",
default="localhost",
)
arg_parser.add_argument(
"-P",
"--port",
help="TCP/IP port to serve on (default: %(default)r)",
type=int,
default="8080",
)
arg_parser.add_argument(
"-U",
"--path",
help="Unix file system path to serve on. Specifying a path will cause "
"hostname and port arguments to be ignored.",
)
args, extra_argv = arg_parser.parse_known_args(argv)
# Import logic
mod_str, _, func_str = args.entry_func.partition(":")
if not func_str or not mod_str:
arg_parser.error("'entry-func' not in 'module:function' syntax")
if mod_str.startswith("."):
arg_parser.error("relative module names not supported")
try:
module = import_module(mod_str)
except ImportError as ex:
arg_parser.error(f"unable to import {mod_str}: {ex}")
try:
func = getattr(module, func_str)
except AttributeError:
arg_parser.error(f"module {mod_str!r} has no attribute {func_str!r}")
# Compatibility logic
if args.path is not None and not hasattr(socket, "AF_UNIX"):
arg_parser.error(
"file system paths not supported by your operating" " environment"
)
logging.basicConfig(level=logging.DEBUG)
app = func(extra_argv)
run_app(app, host=args.hostname, port=args.port, path=args.path)
arg_parser.exit(message="Stopped\n")
if __name__ == "__main__": # pragma: no branch
main(sys.argv[1:]) # pragma: no cover | PypiClean |
/habitat-lab-0.2.520230802.tar.gz/habitat-lab-0.2.520230802/habitat/tasks/rearrange/actions/actions.py |
# Copyright (c) Meta Platforms, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional
import magnum as mn
import numpy as np
from gym import spaces
import habitat_sim
from habitat.core.embodied_task import SimulatorTaskAction
from habitat.core.registry import registry
from habitat.sims.habitat_simulator.actions import HabitatSimActions
from habitat.tasks.rearrange.actions.articulated_agent_action import (
ArticulatedAgentAction,
)
# flake8: noqa
# These actions need to be imported since there is a Python evaluation
# statement which dynamically creates the desired grip controller.
from habitat.tasks.rearrange.actions.grip_actions import (
GazeGraspAction,
GripSimulatorTaskAction,
MagicGraspAction,
SuctionGraspAction,
)
from habitat.tasks.rearrange.rearrange_sim import RearrangeSim
from habitat.tasks.rearrange.utils import rearrange_collision, rearrange_logger
@registry.register_task_action
class EmptyAction(ArticulatedAgentAction):
"""A No-op action useful for testing and in some controllers where we want
to wait before the next operation.
"""
@property
def action_space(self):
return spaces.Dict(
{
"empty_action": spaces.Box(
shape=(1,),
low=-1,
high=1,
dtype=np.float32,
)
}
)
def step(self, *args, **kwargs):
pass
@registry.register_task_action
class RearrangeStopAction(SimulatorTaskAction):
def reset(self, *args, **kwargs):
super().reset(*args, **kwargs)
self.does_want_terminate = False
def step(self, task, *args, **kwargs):
should_stop = kwargs.get("rearrange_stop", [1.0])
if should_stop[0] > 0.0:
self.does_want_terminate = True
@registry.register_task_action
class ArmAction(ArticulatedAgentAction):
"""An arm control and grip control into one action space."""
def __init__(self, *args, config, sim: RearrangeSim, **kwargs):
super().__init__(*args, config=config, sim=sim, **kwargs)
arm_controller_cls = eval(self._config.arm_controller)
self._sim: RearrangeSim = sim
self.arm_ctrlr = arm_controller_cls(
*args, config=config, sim=sim, **kwargs
)
if self._config.grip_controller is not None:
grip_controller_cls = eval(self._config.grip_controller)
self.grip_ctrlr: Optional[
GripSimulatorTaskAction
] = grip_controller_cls(*args, config=config, sim=sim, **kwargs)
else:
self.grip_ctrlr = None
self.disable_grip = False
if "disable_grip" in config:
self.disable_grip = config["disable_grip"]
def reset(self, *args, **kwargs):
self.arm_ctrlr.reset(*args, **kwargs)
if self.grip_ctrlr is not None:
self.grip_ctrlr.reset(*args, **kwargs)
@property
def action_space(self):
action_spaces = {
self._action_arg_prefix
+ "arm_action": self.arm_ctrlr.action_space,
}
if self.grip_ctrlr is not None and self.grip_ctrlr.requires_action:
action_spaces[
self._action_arg_prefix + "grip_action"
] = self.grip_ctrlr.action_space
return spaces.Dict(action_spaces)
def step(self, *args, **kwargs):
arm_action = kwargs[self._action_arg_prefix + "arm_action"]
self.arm_ctrlr.step(arm_action)
if self.grip_ctrlr is not None and not self.disable_grip:
grip_action = kwargs[self._action_arg_prefix + "grip_action"]
self.grip_ctrlr.step(grip_action)
@registry.register_task_action
class ArmRelPosAction(ArticulatedAgentAction):
"""
The arm motor targets are offset by the delta joint values specified by the
action
"""
def __init__(self, *args, config, sim: RearrangeSim, **kwargs):
super().__init__(*args, config=config, sim=sim, **kwargs)
self._delta_pos_limit = self._config.delta_pos_limit
@property
def action_space(self):
return spaces.Box(
shape=(self._config.arm_joint_dimensionality,),
low=-1,
high=1,
dtype=np.float32,
)
def step(self, delta_pos, should_step=True, *args, **kwargs):
# clip from -1 to 1
delta_pos = np.clip(delta_pos, -1, 1)
delta_pos *= self._delta_pos_limit
# The actual joint positions
self._sim: RearrangeSim
self.cur_articulated_agent.arm_motor_pos = (
delta_pos + self.cur_articulated_agent.arm_motor_pos
)
@registry.register_task_action
class ArmRelPosMaskAction(ArticulatedAgentAction):
"""
The arm motor targets are offset by the delta joint values specified by the
action
"""
def __init__(self, *args, config, sim: RearrangeSim, **kwargs):
super().__init__(*args, config=config, sim=sim, **kwargs)
self._delta_pos_limit = self._config.delta_pos_limit
self._arm_joint_mask = self._config.arm_joint_mask
@property
def action_space(self):
return spaces.Box(
shape=(self._config.arm_joint_dimensionality,),
low=-1,
high=1,
dtype=np.float32,
)
def step(self, delta_pos, should_step=True, *args, **kwargs):
# clip from -1 to 1
delta_pos = np.clip(delta_pos, -1, 1)
delta_pos *= self._delta_pos_limit
mask_delta_pos = np.zeros(len(self._arm_joint_mask))
src_idx = 0
tgt_idx = 0
for mask in self._arm_joint_mask:
if mask == 0:
tgt_idx += 1
src_idx += 1
continue
mask_delta_pos[tgt_idx] = delta_pos[src_idx]
tgt_idx += 1
src_idx += 1
# Although habitat_sim will prevent the motor from exceeding limits,
# clip the motor joints first here to prevent the arm from being unstable.
min_limit, max_limit = self.cur_articulated_agent.arm_joint_limits
target_arm_pos = (
mask_delta_pos + self.cur_articulated_agent.arm_motor_pos
)
set_arm_pos = np.clip(target_arm_pos, min_limit, max_limit)
# The actual joint positions
self._sim: RearrangeSim
self.cur_articulated_agent.arm_motor_pos = set_arm_pos
@registry.register_task_action
class ArmRelPosKinematicAction(ArticulatedAgentAction):
"""
The arm motor targets are offset by the delta joint values specified by the
action
"""
def __init__(self, *args, config, sim: RearrangeSim, **kwargs):
super().__init__(*args, config=config, sim=sim, **kwargs)
self._delta_pos_limit = self._config.delta_pos_limit
self._should_clip = self._config.get("should_clip", True)
@property
def action_space(self):
return spaces.Box(
shape=(self._config.arm_joint_dimensionality,),
low=0,
high=1,
dtype=np.float32,
)
def step(self, delta_pos, *args, **kwargs):
if self._should_clip:
# clip from -1 to 1
delta_pos = np.clip(delta_pos, -1, 1)
delta_pos *= self._delta_pos_limit
self._sim: RearrangeSim
set_arm_pos = delta_pos + self.cur_articulated_agent.arm_joint_pos
self.cur_articulated_agent.arm_joint_pos = set_arm_pos
self.cur_articulated_agent.fix_joint_values = set_arm_pos
@registry.register_task_action
class ArmAbsPosAction(ArticulatedAgentAction):
"""
The arm motor targets are directly set to the joint configuration specified
by the action.
"""
@property
def action_space(self):
return spaces.Box(
shape=(self._config.arm_joint_dimensionality,),
low=0,
high=1,
dtype=np.float32,
)
def step(self, set_pos, *args, **kwargs):
# No clipping because the arm is being set to exactly where it needs to
# go.
self._sim: RearrangeSim
self.cur_articulated_agent.arm_motor_pos = set_pos
@registry.register_task_action
class ArmAbsPosKinematicAction(ArticulatedAgentAction):
"""
The arm is kinematically directly set to the joint configuration specified
by the action.
"""
@property
def action_space(self):
return spaces.Box(
shape=(self._config.arm_joint_dimensionality,),
low=0,
high=1,
dtype=np.float32,
)
def step(self, set_pos, *args, **kwargs):
# No clipping because the arm is being set to exactly where it needs to
# go.
self._sim: RearrangeSim
self.cur_articulated_agent.arm_joint_pos = set_pos
@registry.register_task_action
class ArmRelPosKinematicReducedActionStretch(ArticulatedAgentAction):
"""
The arm motor targets are offset by the delta joint values specified by the
action and the mask. This function is used for Stretch.
"""
def __init__(self, *args, config, sim: RearrangeSim, **kwargs):
super().__init__(*args, config=config, sim=sim, **kwargs)
self.last_arm_action = None
self._delta_pos_limit = self._config.delta_pos_limit
self._should_clip = self._config.get("should_clip", True)
self._arm_joint_mask = self._config.arm_joint_mask
def reset(self, *args, **kwargs):
super().reset(*args, **kwargs)
self.last_arm_action = None
@property
def action_space(self):
self.step_c = 0
return spaces.Box(
shape=(self._config.arm_joint_dimensionality,),
low=-1,
high=1,
dtype=np.float32,
)
def step(self, delta_pos, *args, **kwargs):
if self._should_clip:
# clip from -1 to 1
delta_pos = np.clip(delta_pos, -1, 1)
delta_pos *= self._delta_pos_limit
self._sim: RearrangeSim
# Expand delta_pos based on mask
expanded_delta_pos = np.zeros(len(self._arm_joint_mask))
src_idx = 0
tgt_idx = 0
for mask in self._arm_joint_mask:
if mask == 0:
tgt_idx += 1
src_idx += 1
continue
expanded_delta_pos[tgt_idx] = delta_pos[src_idx]
tgt_idx += 1
src_idx += 1
min_limit, max_limit = self.cur_articulated_agent.arm_joint_limits
set_arm_pos = (
expanded_delta_pos + self.cur_articulated_agent.arm_motor_pos
)
# Perform roll over to the joints so that the user cannot control
# the motor 2, 3, 4 for the arm.
if expanded_delta_pos[0] >= 0:
for i in range(3):
if set_arm_pos[i] > max_limit[i]:
set_arm_pos[i + 1] += set_arm_pos[i] - max_limit[i]
set_arm_pos[i] = max_limit[i]
else:
for i in range(3):
if set_arm_pos[i] < min_limit[i]:
set_arm_pos[i + 1] -= min_limit[i] - set_arm_pos[i]
set_arm_pos[i] = min_limit[i]
set_arm_pos = np.clip(set_arm_pos, min_limit, max_limit)
self.cur_articulated_agent.arm_motor_pos = set_arm_pos
@registry.register_task_action
class BaseVelAction(ArticulatedAgentAction):
"""
The articulated agent base motion is constrained to the NavMesh and controlled with velocity commands integrated with the VelocityControl interface.
Optionally cull states with active collisions if config parameter `allow_dyn_slide` is True
"""
def __init__(self, *args, config, sim: RearrangeSim, **kwargs):
super().__init__(*args, config=config, sim=sim, **kwargs)
self._sim: RearrangeSim = sim
self.base_vel_ctrl = habitat_sim.physics.VelocityControl()
self.base_vel_ctrl.controlling_lin_vel = True
self.base_vel_ctrl.lin_vel_is_local = True
self.base_vel_ctrl.controlling_ang_vel = True
self.base_vel_ctrl.ang_vel_is_local = True
self._allow_dyn_slide = self._config.get("allow_dyn_slide", True)
self._lin_speed = self._config.lin_speed
self._ang_speed = self._config.ang_speed
self._allow_back = self._config.allow_back
@property
def action_space(self):
lim = 20
return spaces.Dict(
{
self._action_arg_prefix
+ "base_vel": spaces.Box(
shape=(2,), low=-lim, high=lim, dtype=np.float32
)
}
)
def _capture_articulated_agent_state(self):
return {
"forces": self.cur_articulated_agent.sim_obj.joint_forces,
"vel": self.cur_articulated_agent.sim_obj.joint_velocities,
"pos": self.cur_articulated_agent.sim_obj.joint_positions,
}
def _set_articulated_agent_state(self, set_dat):
self.cur_articulated_agent.sim_obj.joint_positions = set_dat["forces"]
self.cur_articulated_agent.sim_obj.joint_velocities = set_dat["vel"]
self.cur_articulated_agent.sim_obj.joint_forces = set_dat["pos"]
def update_base(self):
ctrl_freq = self._sim.ctrl_freq
before_trans_state = self._capture_articulated_agent_state()
trans = self.cur_articulated_agent.sim_obj.transformation
rigid_state = habitat_sim.RigidState(
mn.Quaternion.from_matrix(trans.rotation()), trans.translation
)
target_rigid_state = self.base_vel_ctrl.integrate_transform(
1 / ctrl_freq, rigid_state
)
end_pos = self._sim.step_filter(
rigid_state.translation, target_rigid_state.translation
)
# Offset the base
end_pos -= self.cur_articulated_agent.params.base_offset
target_trans = mn.Matrix4.from_(
target_rigid_state.rotation.to_matrix(), end_pos
)
self.cur_articulated_agent.sim_obj.transformation = target_trans
if not self._allow_dyn_slide:
# Check if in the new articulated_agent state the arm collides with anything.
# If so we have to revert back to the previous transform
self._sim.internal_step(-1)
colls = self._sim.get_collisions()
did_coll, _ = rearrange_collision(
colls, self._sim.snapped_obj_id, False
)
if did_coll:
# Don't allow the step, revert back.
self._set_articulated_agent_state(before_trans_state)
self.cur_articulated_agent.sim_obj.transformation = trans
if self.cur_grasp_mgr.snap_idx is not None:
# Holding onto an object, also kinematically update the object.
# object.
self.cur_grasp_mgr.update_object_to_grasp()
if self.cur_articulated_agent._base_type == "leg":
# Fix the leg joints
self.cur_articulated_agent.leg_joint_pos = (
self.cur_articulated_agent.params.leg_init_params
)
def step(self, *args, **kwargs):
lin_vel, ang_vel = kwargs[self._action_arg_prefix + "base_vel"]
lin_vel = np.clip(lin_vel, -1, 1) * self._lin_speed
ang_vel = np.clip(ang_vel, -1, 1) * self._ang_speed
if not self._allow_back:
lin_vel = np.maximum(lin_vel, 0)
self.base_vel_ctrl.linear_velocity = mn.Vector3(lin_vel, 0, 0)
self.base_vel_ctrl.angular_velocity = mn.Vector3(0, ang_vel, 0)
if lin_vel != 0.0 or ang_vel != 0.0:
self.update_base()
@registry.register_task_action
class BaseVelNonCylinderAction(ArticulatedAgentAction):
"""
The articulated agent base motion is constrained to the NavMesh and controlled with velocity commands integrated with the VelocityControl interface.
Optionally cull states with active collisions if config parameter `allow_dyn_slide` is True
"""
def __init__(self, *args, config, sim: RearrangeSim, **kwargs):
super().__init__(*args, config=config, sim=sim, **kwargs)
self._sim: RearrangeSim = sim
self.base_vel_ctrl = habitat_sim.physics.VelocityControl()
self.base_vel_ctrl.controlling_lin_vel = True
self.base_vel_ctrl.lin_vel_is_local = True
self.base_vel_ctrl.controlling_ang_vel = True
self.base_vel_ctrl.ang_vel_is_local = True
self._allow_dyn_slide = self._config.get("allow_dyn_slide", True)
self._allow_back = self._config.allow_back
self._collision_threshold = self._config.collision_threshold
self._longitudinal_lin_speed = self._config.longitudinal_lin_speed
self._lateral_lin_speed = self._config.lateral_lin_speed
self._ang_speed = self._config.ang_speed
self._navmesh_offset = self._config.navmesh_offset
self._enable_lateral_move = self._config.enable_lateral_move
@property
def action_space(self):
lim = 20
if self._enable_lateral_move:
return spaces.Dict(
{
self._action_arg_prefix
+ "base_vel": spaces.Box(
shape=(3,), low=-lim, high=lim, dtype=np.float32
)
}
)
else:
return spaces.Dict(
{
self._action_arg_prefix
+ "base_vel": spaces.Box(
shape=(2,), low=-lim, high=lim, dtype=np.float32
)
}
)
def collision_check(
self, trans, target_trans, target_rigid_state, compute_sliding
):
"""
trans: the transformation of the current location of the robot
target_trans: the transformation of the target location of the robot given the center original Navmesh
target_rigid_state: the target state of the robot given the center original Navmesh
compute_sliding: if we want to compute sliding or not
"""
# Get the offset positions
num_check_cylinder = len(self._navmesh_offset)
nav_pos_3d = [
np.array([xz[0], 0.0, xz[1]]) for xz in self._navmesh_offset
]
cur_pos = [trans.transform_point(xyz) for xyz in nav_pos_3d]
goal_pos = [target_trans.transform_point(xyz) for xyz in nav_pos_3d]
# For step filter of offset positions
end_pos = []
for i in range(num_check_cylinder):
pos = self._sim.step_filter(cur_pos[i], goal_pos[i])
# Sanitize the height
pos[1] = 0.0
cur_pos[i][1] = 0.0
goal_pos[i][1] = 0.0
end_pos.append(pos)
# Planar move distance clamped by NavMesh
move = []
for i in range(num_check_cylinder):
move.append((end_pos[i] - goal_pos[i]).length())
# For detection of linear or angualr velocities
# There is a collision if the difference between the clamped NavMesh position and target position is too great for any point.
diff = len([v for v in move if v > self._collision_threshold])
if diff > 0:
# Wrap the move direction if we use sliding
# Find the largest diff moving direction, which means that there is a collision in that cylinder
if compute_sliding:
max_idx = np.argmax(move)
move_vec = end_pos[max_idx] - cur_pos[max_idx]
new_end_pos = trans.translation + move_vec
return True, mn.Matrix4.from_(
target_rigid_state.rotation.to_matrix(), new_end_pos
)
return True, trans
else:
return False, target_trans
def update_base(self, if_rotation):
"""
Update the base of the robot
if_rotation: if the robot is rotating or not
"""
# Get the control frequency
ctrl_freq = self._sim.ctrl_freq
# Get the current transformation
trans = self.cur_articulated_agent.sim_obj.transformation
# Get the current rigid state
rigid_state = habitat_sim.RigidState(
mn.Quaternion.from_matrix(trans.rotation()), trans.translation
)
# Integrate to get target rigid state
target_rigid_state = self.base_vel_ctrl.integrate_transform(
1 / ctrl_freq, rigid_state
)
# Get the traget transformation based on the target rigid state
target_trans = mn.Matrix4.from_(
target_rigid_state.rotation.to_matrix(),
target_rigid_state.translation,
)
# We do sliding only if we allow the robot to do sliding and current
# robot is not rotating
compute_sliding = self._allow_dyn_slide and not if_rotation
# Check if there is a collision
did_coll, new_target_trans = self.collision_check(
trans, target_trans, target_rigid_state, compute_sliding
)
# Update the base
self.cur_articulated_agent.sim_obj.transformation = new_target_trans
if self.cur_grasp_mgr.snap_idx is not None:
# Holding onto an object, also kinematically update the object.
# object.
self.cur_grasp_mgr.update_object_to_grasp()
if self.cur_articulated_agent._base_type == "leg":
# Fix the leg joints
self.cur_articulated_agent.leg_joint_pos = (
self.cur_articulated_agent.params.leg_init_params
)
def step(self, *args, **kwargs):
lateral_lin_vel = 0.0
if self._enable_lateral_move:
longitudinal_lin_vel, lateral_lin_vel, ang_vel = kwargs[
self._action_arg_prefix + "base_vel"
]
else:
longitudinal_lin_vel, ang_vel = kwargs[
self._action_arg_prefix + "base_vel"
]
longitudinal_lin_vel = (
np.clip(longitudinal_lin_vel, -1, 1) * self._longitudinal_lin_speed
)
lateral_lin_vel = (
np.clip(lateral_lin_vel, -1, 1) * self._lateral_lin_speed
)
ang_vel = np.clip(ang_vel, -1, 1) * self._ang_speed
if not self._allow_back:
longitudinal_lin_vel = np.maximum(longitudinal_lin_vel, 0)
self.base_vel_ctrl.linear_velocity = mn.Vector3(
longitudinal_lin_vel, 0, -lateral_lin_vel
)
self.base_vel_ctrl.angular_velocity = mn.Vector3(0, ang_vel, 0)
if (
longitudinal_lin_vel != 0.0
or lateral_lin_vel != 0.0
or ang_vel != 0.0
):
self.update_base(ang_vel != 0.0)
@registry.register_task_action
class ArmEEAction(ArticulatedAgentAction):
"""Uses inverse kinematics (requires pybullet) to apply end-effector position control for the articulated_agent's arm."""
def __init__(self, *args, sim: RearrangeSim, **kwargs):
self.ee_target: Optional[np.ndarray] = None
self.ee_index: Optional[int] = 0
super().__init__(*args, sim=sim, **kwargs)
self._sim: RearrangeSim = sim
self._render_ee_target = self._config.get("render_ee_target", False)
self._ee_ctrl_lim = self._config.ee_ctrl_lim
def reset(self, *args, **kwargs):
super().reset()
cur_ee = self._ik_helper.calc_fk(
np.array(self._sim.articulated_agent.arm_joint_pos)
)
self.ee_target = cur_ee
@property
def action_space(self):
return spaces.Box(shape=(3,), low=-1, high=1, dtype=np.float32)
def apply_ee_constraints(self):
self.ee_target = np.clip(
self.ee_target,
self._sim.articulated_agent.params.ee_constraint[
self.ee_index, :, 0
],
self._sim.articulated_agent.params.ee_constraint[
self.ee_index, :, 1
],
)
def set_desired_ee_pos(self, ee_pos: np.ndarray) -> None:
self.ee_target += np.array(ee_pos)
self.apply_ee_constraints()
joint_pos = np.array(self._sim.articulated_agent.arm_joint_pos)
joint_vel = np.zeros(joint_pos.shape)
self._ik_helper.set_arm_state(joint_pos, joint_vel)
des_joint_pos = self._ik_helper.calc_ik(self.ee_target)
des_joint_pos = list(des_joint_pos)
self._sim.articulated_agent.arm_motor_pos = des_joint_pos
def step(self, ee_pos, **kwargs):
ee_pos = np.clip(ee_pos, -1, 1)
ee_pos *= self._ee_ctrl_lim
self.set_desired_ee_pos(ee_pos)
if self._render_ee_target:
global_pos = self._sim.articulated_agent.base_transformation.transform_point(
self.ee_target
)
self._sim.viz_ids["ee_target"] = self._sim.visualize_position(
global_pos, self._sim.viz_ids["ee_target"]
)
@registry.register_task_action
class HumanoidJointAction(ArticulatedAgentAction):
def __init__(self, *args, sim: RearrangeSim, **kwargs):
super().__init__(*args, sim=sim, **kwargs)
self._sim: RearrangeSim = sim
self.num_joints = self._config.num_joints
def reset(self, *args, **kwargs):
super().reset()
@property
def action_space(self):
num_joints = self.num_joints
num_dim_transform = 16
# The action space is the number of joints plus 16 for a 4x4 transformtion matrix for the base
return spaces.Dict(
{
"human_joints_trans": spaces.Box(
shape=(4 * num_joints + num_dim_transform,),
low=-1,
high=1,
dtype=np.float32,
)
}
)
def step(self, *args, **kwargs):
r"""
Updates the joint rotations and root transformation of the humanoid.
:param self._action_arg_prefix+human_joints_trans: Array of size
(num_joints*4)+32. The last 32 dimensions define two 4x4 root
transformation matrices, a base transform that controls the base
of the character, and an offset transform, that controls
a transformation offset that comes from the MOCAP pose.
The first elements correspond to a flattened list of quaternions for each joint.
When the array is all 0 it keeps the previous joint rotation and transform.
"""
human_joints_trans = kwargs[
self._action_arg_prefix + "human_joints_trans"
]
new_joints = human_joints_trans[:-16]
new_pos_transform_base = human_joints_trans[-16:]
new_pos_transform_offset = human_joints_trans[-32:-16]
# When the array is all 0, this indicates we are not setting
# the human joint
if np.array(new_pos_transform_offset).sum() != 0:
vecs_base = [
mn.Vector4(new_pos_transform_base[i * 4 : (i + 1) * 4])
for i in range(4)
]
vecs_offset = [
mn.Vector4(new_pos_transform_offset[i * 4 : (i + 1) * 4])
for i in range(4)
]
new_transform_offset = mn.Matrix4(*vecs_offset)
new_transform_base = mn.Matrix4(*vecs_base)
if (
new_transform_offset.is_rigid_transformation()
and new_transform_base.is_rigid_transformation()
):
# TODO: this will cause many sampled actions to be invalid
# Maybe we should update the sampling mechanism
self.cur_articulated_agent.set_joint_transform(
new_joints, new_transform_offset, new_transform_base
) | PypiClean |
/collective.upgrade-1.7.tar.gz/collective.upgrade-1.7/bootstrap.py | import os
import shutil
import sys
import tempfile
from optparse import OptionParser
tmpeggs = tempfile.mkdtemp()
usage = '''\
[DESIRED PYTHON FOR BUILDOUT] bootstrap.py [options]
Bootstraps a buildout-based project.
Simply run this script in a directory containing a buildout.cfg, using the
Python that you want bin/buildout to use.
Note that by using --find-links to point to local resources, you can keep
this script from going over the network.
'''
parser = OptionParser(usage=usage)
parser.add_option("-v", "--version", help="use a specific zc.buildout version")
parser.add_option("-t", "--accept-buildout-test-releases",
dest='accept_buildout_test_releases',
action="store_true", default=False,
help=("Normally, if you do not specify a --version, the "
"bootstrap script and buildout gets the newest "
"*final* versions of zc.buildout and its recipes and "
"extensions for you. If you use this flag, "
"bootstrap and buildout will get the newest releases "
"even if they are alphas or betas."))
parser.add_option("-c", "--config-file",
help=("Specify the path to the buildout configuration "
"file to be used."))
parser.add_option("-f", "--find-links",
help=("Specify a URL to search for buildout releases"))
parser.add_option("--allow-site-packages",
action="store_true", default=False,
help=("Let bootstrap.py use existing site packages"))
parser.add_option("--setuptools-version",
help="use a specific setuptools version")
options, args = parser.parse_args()
######################################################################
# load/install setuptools
try:
if options.allow_site_packages:
import setuptools
import pkg_resources
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
ez = {}
exec(urlopen('https://bootstrap.pypa.io/ez_setup.py').read(), ez)
if not options.allow_site_packages:
# ez_setup imports site, which adds site packages
# this will remove them from the path to ensure that incompatible versions
# of setuptools are not in the path
import site
# inside a virtualenv, there is no 'getsitepackages'.
# We can't remove these reliably
if hasattr(site, 'getsitepackages'):
for sitepackage_path in site.getsitepackages():
sys.path[:] = [x for x in sys.path if sitepackage_path not in x]
setup_args = dict(to_dir=tmpeggs, download_delay=0)
if options.setuptools_version is not None:
setup_args['version'] = options.setuptools_version
ez['use_setuptools'](**setup_args)
import setuptools
import pkg_resources
# This does not (always?) update the default working set. We will
# do it.
for path in sys.path:
if path not in pkg_resources.working_set.entries:
pkg_resources.working_set.add_entry(path)
######################################################################
# Install buildout
ws = pkg_resources.working_set
cmd = [sys.executable, '-c',
'from setuptools.command.easy_install import main; main()',
'-mZqNxd', tmpeggs]
find_links = os.environ.get(
'bootstrap-testing-find-links',
options.find_links or
('http://downloads.buildout.org/'
if options.accept_buildout_test_releases else None)
)
if find_links:
cmd.extend(['-f', find_links])
setuptools_path = ws.find(
pkg_resources.Requirement.parse('setuptools')).location
requirement = 'zc.buildout'
version = options.version
if version is None and not options.accept_buildout_test_releases:
# Figure out the most recent final version of zc.buildout.
import setuptools.package_index
_final_parts = '*final-', '*final'
def _final_version(parsed_version):
try:
return not parsed_version.is_prerelease
except AttributeError:
# Older setuptools
for part in parsed_version:
if (part[:1] == '*') and (part not in _final_parts):
return False
return True
index = setuptools.package_index.PackageIndex(
search_path=[setuptools_path])
if find_links:
index.add_find_links((find_links,))
req = pkg_resources.Requirement.parse(requirement)
if index.obtain(req) is not None:
best = []
bestv = None
for dist in index[req.project_name]:
distv = dist.parsed_version
if _final_version(distv):
if bestv is None or distv > bestv:
best = [dist]
bestv = distv
elif distv == bestv:
best.append(dist)
if best:
best.sort()
version = best[-1].version
if version:
requirement = '=='.join((requirement, version))
cmd.append(requirement)
import subprocess
if subprocess.call(cmd, env=dict(os.environ, PYTHONPATH=setuptools_path)) != 0:
raise Exception(
"Failed to execute command:\n%s" % repr(cmd)[1:-1])
######################################################################
# Import and run buildout
ws.add_entry(tmpeggs)
ws.require(requirement)
import zc.buildout.buildout
if not [a for a in args if '=' not in a]:
args.append('bootstrap')
# if -c was provided, we push it back into args for buildout' main function
if options.config_file is not None:
args[0:0] = ['-c', options.config_file]
zc.buildout.buildout.main(args)
shutil.rmtree(tmpeggs) | PypiClean |
/spyder-terminal-1.2.2.tar.gz/spyder-terminal-1.2.2/spyder_terminal/server/static/components/caniuse-lite/data/regions/MZ.js | module.exports={C:{"52":0.022,"57":0.0176,"66":0.0044,"68":0.0088,"72":0.0044,"78":0.0176,"84":0.0264,"85":0.0044,"88":0.0132,"89":0.022,"90":0.0044,"91":0.0088,"92":0.0132,"93":0.3124,"94":1.518,"95":0.0132,_:"2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 53 54 55 56 58 59 60 61 62 63 64 65 67 69 70 71 73 74 75 76 77 79 80 81 82 83 86 87 96 3.5 3.6"},D:{"28":0.0044,"33":0.1452,"40":0.0352,"43":0.2332,"49":0.066,"55":0.0088,"56":0.0088,"57":0.0088,"60":0.0616,"62":0.0044,"63":0.0528,"65":0.0088,"69":0.0132,"70":0.0132,"73":0.0088,"74":0.0748,"79":0.0264,"80":0.0088,"81":0.1584,"83":0.0132,"84":0.0044,"85":0.0088,"86":0.0572,"87":0.3652,"88":0.022,"89":0.0176,"90":0.0308,"91":0.1056,"92":0.1276,"93":0.2816,"94":0.3784,"95":11.6468,"96":7.2644,"97":0.0088,_:"4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 29 30 31 32 34 35 36 37 38 39 41 42 44 45 46 47 48 50 51 52 53 54 58 59 61 64 66 67 68 71 72 75 76 77 78 98 99"},F:{"34":0.0044,"39":0.0044,"46":0.0088,"51":0.0044,"53":0.0044,"65":0.0044,"76":0.0088,"77":0.0396,"78":0.0044,"79":0.066,"80":1.98,"81":0.6952,_:"9 11 12 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 35 36 37 38 40 41 42 43 44 45 47 48 49 50 52 54 55 56 57 58 60 62 63 64 66 67 68 69 70 71 72 73 74 75 9.5-9.6 10.5 10.6 11.1 11.5 11.6 12.1","10.0-10.1":0},B:{"12":0.022,"13":0.0044,"14":0.0176,"15":0.0132,"16":0.0176,"17":0.044,"18":0.1188,"84":0.0132,"85":0.0088,"89":0.0836,"90":0.0088,"91":0.0484,"92":0.0352,"93":0.022,"94":0.0572,"95":1.6588,"96":0.682,_:"79 80 81 83 86 87 88"},E:{"4":0,"13":0.022,"14":0.0396,"15":0.0484,_:"0 5 6 7 8 9 10 11 12 3.1 3.2 6.1 7.1 9.1 10.1","5.1":0.0044,"11.1":0.0132,"12.1":0.0088,"13.1":0.0748,"14.1":0.1188,"15.1":0.0924},G:{"8":0,"3.2":0,"4.0-4.1":0,"4.2-4.3":0,"5.0-5.1":0,"6.0-6.1":0.00055,"7.0-7.1":0.018,"8.1-8.4":0,"9.0-9.2":0.00218,"9.3":0.03764,"10.0-10.2":0.00545,"10.3":0.42712,"11.0-11.2":0.02073,"11.3-11.4":0.04909,"12.0-12.1":0.08619,"12.2-12.5":1.31737,"13.0-13.1":0.01255,"13.2":0.01309,"13.3":0.066,"13.4-13.7":0.2182,"14.0-14.4":0.7686,"14.5-14.8":1.46683,"15.0-15.1":0.9448},P:{"4":1.74669,"5.0-5.4":0.05116,"6.2-6.4":0.03082,"7.2-7.4":0.27742,"8.2":0.10275,"9.2":0.09247,"10.1":0.02055,"11.1-11.2":0.11302,"12.0":0.10275,"13.0":0.06165,"14.0":0.1233,"15.0":0.90417},I:{"0":0,"3":0,"4":0,"2.1":0,"2.2":0,"2.3":0,"4.1":0.00018,"4.2-4.3":0.00194,"4.4":0,"4.4.3-4.4.4":0.02028},K:{_:"0 10 11 12 11.1 11.5 12.1"},A:{"8":0.00599,"11":0.35921,_:"6 7 9 10 5.5"},J:{"7":0,"10":0.0504},N:{"10":0.02658,"11":0.22582},L:{"0":54.2696},S:{"2.5":0.028},R:{_:"0"},M:{"0":0.0672},Q:{"10.4":0.056},O:{"0":0.336},H:{"0":5.51909}}; | PypiClean |
/Fumagalli_Motta_Tarantino_2020-0.5.3.tar.gz/Fumagalli_Motta_Tarantino_2020-0.5.3/Fumagalli_Motta_Tarantino_2020/Models/Types.py | from dataclasses import dataclass
from enum import Enum
class MergerPolicies(Enum):
"""
Defines the available merger policies in the models.
"""
Strict = "Strict"
"""The AA authorises only takeovers that, at the moment in which they are reviewed, are expected to increase total welfare."""
Intermediate_late_takeover_prohibited = "Intermediate (late takeover prohibited)"
"""The AA blocks late takeovers, but is more lenient with early takeovers."""
Intermediate_late_takeover_allowed = "Intermediate (late takeover allowed)"
"""The AA authorises late takeovers, but is stricter with early takeovers."""
Laissez_faire = "Laissez-faire"
"""The intervention threshold of the AA is so high that any acquisition would be allowed."""
def abbreviation(self) -> str:
"""
Generates a string containing the abbreviation of the current merger policy.
Returns
-------
str
Abbreviation of the current merger policy.
"""
if self is MergerPolicies.Intermediate_late_takeover_prohibited:
return "$I^P$"
if self is MergerPolicies.Intermediate_late_takeover_allowed:
return "$I^A$"
return f"${self.value[0]}$"
def __str__(self) -> str:
"""
Returns the string representation of the current merger policy.
Returns
-------
str
String representation of the current merger policy.
"""
return self.value
@staticmethod
def legend() -> str:
"""
Generates a string containing the legend of the possible merger policies.
Returns
-------
str
Containing the legend for the merger policies.
"""
return (
f"{MergerPolicies.Strict.abbreviation()}: Strict\n"
f"{MergerPolicies.Intermediate_late_takeover_prohibited.abbreviation()}: Intermediate (late takeover prohibited)\n"
f"{MergerPolicies.Intermediate_late_takeover_allowed.abbreviation()}: Intermediate (late takeover allowed)\n"
f"{MergerPolicies.Laissez_faire.abbreviation()}: Laissez-faire"
)
class Takeover(Enum):
"""
Defines the available options for a takeover of the start-up by the incumbent.
"""
No = "No bid"
"""The incumbent does not bid for the start-up."""
Separating = "Separating bid"
"""The incumbent offers a low takeover price targeting only the credit-rationed start-ups."""
Pooling = "Pooling bid"
"""The incumbent offers a high takeover price such that a start-up would always accept, irrespective of the amount of own assets."""
def abbreviation(self) -> str:
"""
Generates a string containing the abbreviation of the current takeover option.
Returns
-------
str
Abbreviation of the current takeover option.
"""
return f"${self.value[0]}$"
def __str__(self) -> str:
"""
Returns the string representation of the current takeover option.
Returns
-------
str
String representation of the current takeover option.
"""
return self.value
@staticmethod
def legend() -> str:
"""
Generates a string containing the legend of the possible takeover options.
Returns
-------
str
Containing the legend for the takeover options.
"""
return (
f"{Takeover.No.abbreviation()}: No bid by the incumbent\n"
f"{Takeover.Separating.abbreviation()}: Separating bid by the incumbent\n"
f"{Takeover.Pooling.abbreviation()}: Pooling bid by the incumbent"
)
@dataclass(frozen=True)
class ThresholdItem:
"""
Threshold item containing the name (string representation) and the value (threshold express in float value).
"""
name: str
value: float
include: bool = False
"""Marks this ThresholdItem with high priority."""
def __eq__(self, other):
return self.value == other.value
def __lt__(self, other):
return self.value < other.value
@dataclass(frozen=True)
class Outcome:
"""
Contains the bare-bones information about the outcome of a Fumagalli_Motta_Tarantino_2020.Models.Base.MergerPolicy.
"""
early_bidding_type: Takeover
late_bidding_type: Takeover
development_attempt: bool
development_outcome: bool
early_takeover: bool
late_takeover: bool
@dataclass(frozen=True)
class Summary(Outcome):
"""
Summary of Fumagalli_Motta_Tarantino_2020.Models.Base.MergerPolicy.
"""
set_policy: MergerPolicies
credit_rationed: bool
@dataclass(frozen=True)
class OptimalMergerPolicySummary(Summary):
"""
Summary of Fumagalli_Motta_Tarantino_2020.Models.Base.OptimalMergerPolicy.
"""
optimal_policy: MergerPolicies
class PossibleOutcomes(Enum):
"""
Contains the outcomes in the models.
"""
def __init__(
self,
early_bidding_type: Takeover,
early_takeover: bool,
development_attempt: bool,
development_outcome: bool,
late_bidding_type: Takeover,
late_takeover: bool,
):
self.outcome = Outcome(
early_bidding_type=early_bidding_type,
early_takeover=early_takeover,
development_attempt=development_attempt,
development_outcome=development_outcome,
late_bidding_type=late_bidding_type,
late_takeover=late_takeover,
)
NoTakeoversSuccessfulDevelopment = (
Takeover.No,
False,
True,
True,
Takeover.No,
False,
)
"""Neither an early or late takeover occurs and the development is successful."""
NoTakeoversFailedDevelopment = (Takeover.No, False, True, False, Takeover.No, False)
"""Neither an early or late takeover occurs and the development is unsuccessful."""
NoTakeoversDevelopmentNotAttempted = (
Takeover.No,
False,
False,
False,
Takeover.No,
False,
)
"""Neither an early or late takeover occurs and the development is not attempted."""
RejectedEarlySeparatingSuccessfulDevelopment = (
Takeover.Separating,
False,
True,
True,
Takeover.No,
False,
)
"""An early separating bid is rejected by the start-up and the development is successful."""
RejectedEarlySeparatingUnsuccessfulDevelopment = (
Takeover.Separating,
False,
True,
False,
Takeover.No,
False,
)
"""An early separating bid is rejected by the start-up and the development is unsuccessful."""
EarlySeparatingSuccessfulDevelopment = (
Takeover.Separating,
True,
True,
True,
Takeover.No,
False,
)
"""An early separating bid is accepted by the start-up and the development is successful."""
EarlySeparatingUnsuccessfulDevelopment = (
Takeover.Separating,
True,
True,
False,
Takeover.No,
False,
)
"""An early separating bid is accepted by the start-up and the development is unsuccessful."""
EarlySeparatingDevelopmentNotAttempted = (
Takeover.Separating,
True,
False,
False,
Takeover.No,
False,
)
"""An early separating bid is accepted by the start-up and the development is not attempted."""
EarlyPoolingSuccessfulDevelopment = (
Takeover.Pooling,
True,
True,
True,
Takeover.No,
False,
)
"""An early pooling bid is accepted by the start-up and the development is successful."""
EarlyPoolingUnsuccessfulDevelopment = (
Takeover.Pooling,
True,
True,
False,
Takeover.No,
False,
)
"""An early pooling bid is accepted by the start-up and the development is unsuccessful."""
EarlyPoolingDevelopmentNotAttempted = (
Takeover.Pooling,
True,
False,
False,
Takeover.No,
False,
)
"""An early pooling bid is accepted by the start-up and the development is not attempted."""
LatePoolingSuccessfulDevelopment = (
Takeover.No,
False,
True,
True,
Takeover.Pooling,
True,
)
"""A late pooling bid is accepted by the start-up after a successful development.""" | PypiClean |
/cognite-air-sdk-4.0.0.tar.gz/cognite-air-sdk-4.0.0/cognite/air/_spaces_api.py | from typing import Optional
from cognite.air._admin_config import AdminAPI
class SpacesAPI(AdminAPI):
def create(self, id: str, name: str, description: str = ""):
"""Create a Space
Args:
id (str): An id given to the Space. Needs to be unique and will be part of the URL
name (str): The name for a Space
description (str): The description of the space (it will be stored
but has no functionality in the Front End yet)
Examples:
>>> from cognite.client import CogniteClient
>>> from cognite.air.admin import AIRAdmin
>>> c = CogniteClient()
>>> air_admin = AIRAdmin(c)
>>> air_admin.spaces.create(id="my_space", name="My Space")
"""
if not description:
description = name
payload = {"id": id, "name": name, "description": description, "groups": []}
self.air_client.post("/space", payload)
def delete(self, id):
"""Not implemented yet. Please ask the AIR team which space to delete.
Send an email to [email protected] with the following information: project, cluster, id of the space and
whether it is staging or not.
"""
print("Warning: not implemented yet.")
print(
"""Send an email to [email protected] with
the following information: project, cluster, id of the space and
whether it is staging or not."""
)
pass
def update(self, id, new_name: Optional[str], new_description: Optional[str]):
"""Not implemented yet. Please ask the AIR team which space to update.
Send an email to [email protected] with the following information: project, cluster, id of the space and
whether it is staging or not. Then also provide information what should be updated
"""
print("Warning: not implemented yet.")
print(
"""Send an email to [email protected] with
the following information: project, cluster, id of the space and
whether it is staging or not.
Then also provide information what should be updated."""
)
pass | PypiClean |
/sdformat-0.23.2.tar.gz/sdformat-0.23.2/SDF/sdf_object.py | from . import sdf_rc
from .sdf_gen_val import sdf_gen_val
from .sdf_name import sdf_name
from .sdf_date import sdf_date
from .sdf_owner import sdf_owner
from .sdf_comment import sdf_comment
from .sdf_sample import sdf_sample
from .sdf_instrument import sdf_instrument
from .sdf_par import sdf_par
from .sdf_data import sdf_data
from .sdf_data_sc import sdf_data_sc
from .sdf_data_img import sdf_data_img
from .sdf_data_mc import sdf_data_mc
class sdf_object(sdf_gen_val):
"""
This class contains either an SDF workspace or a single SDF
dataset.
This decision is made by the type of the sdf_object.value:
o if value is an instance of sdf_data then the sdf_object
is a dataset.
o if value is a Python list(*) then the sdf_object is a workspace
and the list can contain multiple sdf_datasets or a mixture
of datasets and workspaces (the latter as instances of sdf_objects).
(*) It is NOT allowed that the object contains an instance of an
sdf_object. Embedded workspaces MUST be enclosed in a Python list!
"""
def __init__(self, src='ws'):
"""
Create an empty sdf_object.
"""
if src == 'workspace' or src == 'ws':
val = []
elif src == 'dataset' or src == 'ds':
val = sdf_data()
else:
val = None
super(sdf_object, self).__init__(val)
self.ID = 'sdf-object'
self.name = sdf_name()
self.date = sdf_date()
self.owner = sdf_owner()
self.comment = sdf_comment()
self.par = [] # becomes a list of sdf_par
self.instrument = [] # becomes a list of sdf_instrument
self.sample = [] # becomes a list of sdf_sample
self.parent = None
self.debug = False
def __str__(self):
"""
Printable contents of the object. Used by: 'print ds'
(For debugging purpose mainly).
"""
res = '=' * 80 + '\n'
if self.IsWorkspace():
res = res + 'WORKSPACE '
res = res + str(self.name)
elif self.IsDataset():
res = res + 'DATASET '
res = res + str(self.name)
res = res + ' TYPE = ' + str(self.value.datatype) + '\n'
else:
raise RuntimeError("Object is neither DATASET nor WORKSPACE")
# now parse recursively into its contents:
if not self.owner.IsEmpty():
res = res + str(self.owner)
if not self.date.IsEmpty():
res = res + str(self.date)
if not self.comment.IsEmpty():
res = res + str(self.comment)
for sample in self.sample:
res = res + str(sample)
for instrument in self.instrument:
res = res + str(instrument)
for par in self.par:
res = res + 'PARAMETER ' + str(par) + '\n'
res = res + str(self.value)
res = res + '=' * 80
return res
def Set(self, val):
"""
Set values of an sdf_object.
"""
# pass
raise NotImplementedError("sdf_object.Set not implemented yet.")
def SetName(self, name, encoding=None):
if encoding:
self.name.Set(name, encoding)
else:
self.name.Set(name)
def SetDate(self, datestr, dateformat=None):
if dateformat:
self.date.Set(datestr, dateformat)
else:
self.date.Set(datestr)
def SetOwner(self, owner, encoding=None):
if encoding:
self.owner.Set(owner, encoding)
else:
self.owner.Set(owner)
def SetComment(self, comment, encoding=None):
if encoding:
self.comment.Set(comment, encoding)
else:
self.comment.Set(comment)
def SetData(self, data):
if not isinstance(data, sdf_data):
msg = "data must be an instance of the sdf_data class"
raise RuntimeError(msg)
self.value = data
def AppendComment(self, comment):
"""
Append text to a comment creating a new paragraph.
"""
self.comment.Append(comment)
def AppendSample(self, sname=None, scomment=None):
"""
Append a sample to the sample-section of the dataset
or workspace.
"""
self.sample.append(sdf_sample(sname, scomment))
def AppendInstrument(self, instrument):
"""
Append an instrument to the list of instruments.
"""
if not isinstance(instrument, sdf_instrument):
msg = 'Error in sdf_object.AppendInstrument():'
msg += ' Argument is not an sdf_instrument instance.'
raise KeyError(msg)
self.instrument.append(instrument)
def AppendPar(self, par):
"""
Append a parameter to the list of the datasets or workspaces
parameters.
"""
# NOTE: If you change something here, it propably also has to
# be changed inf the sdf_instrument.AppendPar() function.
if isinstance(par, sdf_par):
self.par.append(par)
elif isinstance(par, list):
self.par = self.par + par # this is Pythons list concatenation
elif isinstance(par, str):
self.par.append(sdf_par(name=par)) # create a new, empty
# subparameter list
def AppendObject(self, obj):
"""
Append another sdf_object to this workspace's self.value.
This is a valid function only for workspaces. To set the value
of a dataset, use the function sdf_object.SetData .
@param obj Instance of sdf_object (workspace or dataset).
"""
if not self.IsWorkspace():
msg = "ERROR: can't call function sdf_object.AppendObject"
msg += " for datasets, only for workspaces."
raise RuntimeError(msg)
if isinstance(obj, sdf_object):
self.value.append(obj)
obj.parent = self
def AsXML(self, indent=0, lw=1000000):
"""
Create the XML representation of an SDF object.
"""
if self.debug:
if indent:
print("=" * indent)
print("Enter function sdf_object.AsXML")
if indent == 0: # if beginning of file, define document type xml
res = ' ' * indent + '<?xml version="1.0"?>\n'
else: # else, create empty string res
res = ''
if self.IsWorkspace():
if self.debug:
print("sdf_object identified as workspace")
if indent == 0:
res = res + ' ' * indent + '<!DOCTYPE workspace>\n'
res = res + ' ' * indent + '<workspace>\n'
elif self.IsDataset():
if self.debug:
print("sdf_object identified as dataset")
if indent == 0:
res = res + ' ' * indent + '<!DOCTYPE dataset>\n'
res = res + ' ' * indent + "<dataset type='"
res = res + self.value.datatype + "' >\n"
else:
raise RuntimeError("Object is neither DATASET nor WORKSPACE")
if not self.name.IsEmpty():
res = res + self.name.AsXML(indent + sdf_rc._tabsize) + '\n'
if not self.owner.IsEmpty():
res = res + self.owner.AsXML(indent + sdf_rc._tabsize) + '\n'
if not self.date.IsEmpty():
res = res + self.date.AsXML(indent + sdf_rc._tabsize) + '\n'
if not self.comment.IsEmpty():
res = res + self.comment.AsXML(indent + sdf_rc._tabsize) + '\n'
for sample in self.sample:
res = res + sample.AsXML(indent + sdf_rc._tabsize) + '\n'
for par in self.par:
res = res + (par.AsXML(indent=indent + sdf_rc._tabsize)) + '\n'
for instrument in self.instrument:
res = res + instrument.AsXML(indent + sdf_rc._tabsize) + '\n'
# handle single datasets:
if self.IsDataset():
res = res + self.value.AsXML(indent + sdf_rc._tabsize) + '\n'
else:
if self.debug:
print("will now go through objects in this workspace ...")
for obj in self.value:
if self.debug:
print("... arrived at object " + obj.name.__str__())
obj.debug = True
res = res + obj.AsXML(indent + sdf_rc._tabsize) + '\n'
if self.IsWorkspace():
res = res + ' ' * indent + '</workspace>\n'
elif self.IsDataset():
res = res + ' ' * indent + '</dataset>\n'
if self.debug:
print("done converting to XML")
if indent:
print("=" * indent)
return res
def Save(self, filename):
"""
Save the sdf_object as an SDF-file.
"""
ofp = open(filename, 'wt', encoding='utf-8')
ofp.write(self.AsXML())
ofp.close()
def FromXML(self, etree_node):
"""
Create an SDF_object from an XML ElementTree node.
"""
if etree_node.tag == 'dataset':
self.value.datatype = etree_node.attrib['type']
for child in etree_node:
if child.tag == 'name':
self.name.FromXML(child)
if child.tag == 'date':
self.date.FromXML(child)
if child.tag == 'owner':
self.owner.FromXML(child)
if child.tag == 'comment':
self.comment.FromXML(child)
if child.tag == 'par':
self.par.append(sdf_par())
self.par[-1].FromXML(child)
if child.tag == 'instrument':
self.instrument.append(sdf_instrument())
self.instrument[-1].FromXML(child)
if child.tag == 'sample':
self.sample.append(sdf_sample())
self.sample[-1].FromXML(child)
if child.tag == 'workspace':
ws = sdf_object('ws')
ws.FromXML(child)
self.AppendObject(ws)
if child.tag == 'dataset':
if etree_node.tag != 'workspace':
print('Error in SDF file structure: ', end='')
print('only workspaces can contain datasets.')
return
ds = sdf_object('ds')
self.AppendObject(ds)
if child.attrib['type'] == 'img':
ds.value = sdf_data_img()
elif child.attrib['type'] == 'sc':
ds.value = sdf_data_sc()
elif child.attrib['type'] == 'mc':
ds.value = sdf_data_mc()
else:
msg = "ERROR: unknown data type in attrib " + str(child.attrib) + '\n'
raise RuntimeError(msg)
ds.FromXML(child)
if child.tag == 'data':
if etree_node.tag != 'dataset':
print('Error in SDF file structure: ', end='')
print('only datasets can contain data blocks.')
return
self.value.FromXML(child)
def GetChild(self, i):
"""
Think I (ilyas) introduced this for the QtTreeViewModel.
"""
if self.IsWorkspace():
return self.value[i]
else:
return None
def Get(self, key=None):
if key is None:
return self.value
if isinstance(key, int):
return self.value[key]
if isinstance(key, str):
for v in self.value:
if v.name.value == key:
return v
msg = "No child with name \"{}\" found.".format(key)
raise KeyError(msg)
msg = "key needs to be of type int or str"
raise TypeError(msg)
def __getitem__(self, item):
return self.Get(item)
def __contains__(self, item: str):
if not isinstance(item, str):
raise TypeError('item needs to be of type str')
for v in self.value:
if v.name.value == item:
return True
return False
def GetPar(self, key):
"""
More comfortable way to get a parameter in sdf_object.par either by
index or name.
"""
if isinstance(key, int):
return self.par[key]
if isinstance(key, str):
for p in self.par:
if p.name.value == key:
return p
msg = "No parmeter by name \"{}\" found.".format(key)
raise KeyError(msg)
msg = "key needs to be of type int or str"
raise TypeError(msg)
def GetInstrument(self, key):
"""
More comfortable way to get a parameter in sdf_object.par either by
index or name.
"""
if isinstance(key, int):
return self.instrument[key]
if isinstance(key, str):
for inst in self.instrument:
if inst.name.value == key:
return inst
msg = "No instrument by name \"{}\" found.".format(key)
raise KeyError(msg)
msg = "key needs to be of type int or str"
raise TypeError(msg)
def IsValid(self):
"""
Check validity of an sdf_object.
"""
pass
def IsWorkspace(self):
"""
Check whether the object is a workspace.
"""
return isinstance(self.value, list)
def IsDataset(self):
"""
Check whether the object is a dataset.
"""
return isinstance(self.value, sdf_data) | PypiClean |
/sheepdog-tables-1.2.0.tar.gz/sheepdog-tables-1.2.0/sheepdog_tables/table.py | from inspect import getmembers
from django.core.exceptions import ImproperlyConfigured
from django.conf import settings
from django.forms.models import ModelForm, BaseModelFormSet
from django.forms.formsets import formset_factory
from django.utils.translation import ugettext_lazy as _
from .column import Column, ASC, DESC
class Table(object):
"""
Generic table base class
This is for adding model based tables to your page. It doesn't
need to know what model it is using, it's all based off whatever
queryset the ListView class contains (see docs for ``TablesMixin``)
Each column is set as a normal class attribute. For instance:
class MyCrazyTable(Table):
field1 = Column()
second = Column(field="field2", header="Hello")
:params
table_page_limit - The number of items to show per page.
table_attrs - HTML Attributes for <table></table> tags
table_empty - String to print if no data is available
table_sequence - The explicit sequence of columns to show.
"""
table_page_limit = getattr(settings, 'DEFAULT_ITEMS_PER_PAGE', 25)
table_attrs = {'class': 'table table-bordered table-striped'}
table_empty = _("No data is available")
table_sequence = []
def __init__(self, is_paged=True):
if not self.table_sequence:
raise ImproperlyConfigured('%s does not provide a table_sequence.'
% self.__class__.__name__)
self.table_columns = {}
self.is_paged = is_paged
self.gen_columns()
def gen_columns(self):
# l(k) -> class.__dict__[k]
members_dict = dict(getmembers(self))
l = lambda k: members_dict[k]
# Extract the columns into our own nifty dict for later
for k in members_dict.keys():
if isinstance(l(k), Column):
# Field becomes the key value if it isn't passed
# to the column explicitly
if not l(k).field:
l(k).field = k
self.table_columns[k] = l(k)
def filter(self, queryset):
return queryset
def annotate(self, queryset):
cols = self.table_columns
annotated_columns = [col for col in cols
if cols[col].annotation is not None]
for col in annotated_columns:
queryset = cols[col].annotation(queryset)
return queryset
def columns(self):
return [self.table_columns[h] for h in self.table_sequence]
def headers(self):
return [self.table_columns[h].header or h.title()
for h in self.table_sequence]
def parse_sort(self, sortstring):
direction = DESC if sortstring[0] == '-' else ASC
field = sortstring if direction == ASC else sortstring[1:]
return field, direction
def sort(self, queryset, sort_string):
if not sort_string:
return queryset
sort_field, direction = self.parse_sort(sort_string)
sorting_col = None
for col in self.columns():
if col.sortable and col.get_sort_field() == sort_field:
sorting_col = col
break
if not sorting_col:
return queryset
else:
return queryset.order_by(sorting_col.render_sort(direction))
class EditTable(Table):
"""
The only enhancements required to the Table data structure is the
addition of the `table_form` and `table_formset` which are used to bind
the FormSet class consumed by the view mixin.
"""
table_form = ModelForm
table_formset = BaseModelFormSet
def __init__(self, *args, **kwargs):
super(EditTable, self).__init__(*args, **kwargs)
# build our own formset class with some strict requirements around no
# deletion, ordering and maxes.
self.FormSet = formset_factory(self.table_form, self.table_formset,
extra=0, max_num=0, can_order=False,
can_delete=False)
self.FormSet.model = self.table_form.Meta.model | PypiClean |
/msgraph-sdk-1.0.0a3.tar.gz/msgraph-sdk-1.0.0a3/msgraph/generated/groups/item/calendar/events/item/single_value_extended_properties/item/single_value_legacy_extended_property_item_request_builder.py | from __future__ import annotations
from dataclasses import dataclass
from kiota_abstractions.get_path_parameters import get_path_parameters
from kiota_abstractions.method import Method
from kiota_abstractions.request_adapter import RequestAdapter
from kiota_abstractions.request_information import RequestInformation
from kiota_abstractions.request_option import RequestOption
from kiota_abstractions.response_handler import ResponseHandler
from kiota_abstractions.serialization import Parsable, ParsableFactory
from typing import Any, Callable, Dict, List, Optional, Union
from ........models import single_value_legacy_extended_property
from ........models.o_data_errors import o_data_error
class SingleValueLegacyExtendedPropertyItemRequestBuilder():
"""
Provides operations to manage the singleValueExtendedProperties property of the microsoft.graph.event entity.
"""
def __init__(self,request_adapter: RequestAdapter, path_parameters: Optional[Union[Dict[str, Any], str]] = None) -> None:
"""
Instantiates a new SingleValueLegacyExtendedPropertyItemRequestBuilder and sets the default values.
Args:
pathParameters: The raw url or the Url template parameters for the request.
requestAdapter: The request adapter to use to execute the requests.
"""
if path_parameters is None:
raise Exception("path_parameters cannot be undefined")
if request_adapter is None:
raise Exception("request_adapter cannot be undefined")
# Url template to use to build the URL for the current request builder
self.url_template: str = "{+baseurl}/groups/{group%2Did}/calendar/events/{event%2Did}/singleValueExtendedProperties/{singleValueLegacyExtendedProperty%2Did}{?%24select,%24expand}"
url_tpl_params = get_path_parameters(path_parameters)
self.path_parameters = url_tpl_params
self.request_adapter = request_adapter
def create_delete_request_information(self,request_configuration: Optional[SingleValueLegacyExtendedPropertyItemRequestBuilderDeleteRequestConfiguration] = None) -> RequestInformation:
"""
Delete navigation property singleValueExtendedProperties for groups
Args:
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
Returns: RequestInformation
"""
request_info = RequestInformation()
request_info.url_template = self.url_template
request_info.path_parameters = self.path_parameters
request_info.http_method = Method.DELETE
if request_configuration:
request_info.add_request_headers(request_configuration.headers)
request_info.add_request_options(request_configuration.options)
return request_info
def create_get_request_information(self,request_configuration: Optional[SingleValueLegacyExtendedPropertyItemRequestBuilderGetRequestConfiguration] = None) -> RequestInformation:
"""
The collection of single-value extended properties defined for the event. Read-only. Nullable.
Args:
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
Returns: RequestInformation
"""
request_info = RequestInformation()
request_info.url_template = self.url_template
request_info.path_parameters = self.path_parameters
request_info.http_method = Method.GET
request_info.headers["Accept"] = "application/json"
if request_configuration:
request_info.add_request_headers(request_configuration.headers)
request_info.set_query_string_parameters_from_raw_object(request_configuration.query_parameters)
request_info.add_request_options(request_configuration.options)
return request_info
def create_patch_request_information(self,body: Optional[single_value_legacy_extended_property.SingleValueLegacyExtendedProperty] = None, request_configuration: Optional[SingleValueLegacyExtendedPropertyItemRequestBuilderPatchRequestConfiguration] = None) -> RequestInformation:
"""
Update the navigation property singleValueExtendedProperties in groups
Args:
body:
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
Returns: RequestInformation
"""
if body is None:
raise Exception("body cannot be undefined")
request_info = RequestInformation()
request_info.url_template = self.url_template
request_info.path_parameters = self.path_parameters
request_info.http_method = Method.PATCH
request_info.headers["Accept"] = "application/json"
if request_configuration:
request_info.add_request_headers(request_configuration.headers)
request_info.add_request_options(request_configuration.options)
request_info.set_content_from_parsable(self.request_adapter, "application/json", body)
return request_info
async def delete(self,request_configuration: Optional[SingleValueLegacyExtendedPropertyItemRequestBuilderDeleteRequestConfiguration] = None, response_handler: Optional[ResponseHandler] = None) -> None:
"""
Delete navigation property singleValueExtendedProperties for groups
Args:
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
responseHandler: Response handler to use in place of the default response handling provided by the core service
"""
request_info = self.create_delete_request_information(
request_configuration
)
error_mapping: Dict[str, ParsableFactory] = {
"4XX": o_data_error.ODataError,
"5XX": o_data_error.ODataError,
}
if not self.request_adapter:
raise Exception("Http core is null")
return await self.request_adapter.send_no_response_content_async(request_info, response_handler, error_mapping)
async def get(self,request_configuration: Optional[SingleValueLegacyExtendedPropertyItemRequestBuilderGetRequestConfiguration] = None, response_handler: Optional[ResponseHandler] = None) -> Optional[single_value_legacy_extended_property.SingleValueLegacyExtendedProperty]:
"""
The collection of single-value extended properties defined for the event. Read-only. Nullable.
Args:
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
responseHandler: Response handler to use in place of the default response handling provided by the core service
Returns: Optional[single_value_legacy_extended_property.SingleValueLegacyExtendedProperty]
"""
request_info = self.create_get_request_information(
request_configuration
)
error_mapping: Dict[str, ParsableFactory] = {
"4XX": o_data_error.ODataError,
"5XX": o_data_error.ODataError,
}
if not self.request_adapter:
raise Exception("Http core is null")
return await self.request_adapter.send_async(request_info, single_value_legacy_extended_property.SingleValueLegacyExtendedProperty, response_handler, error_mapping)
async def patch(self,body: Optional[single_value_legacy_extended_property.SingleValueLegacyExtendedProperty] = None, request_configuration: Optional[SingleValueLegacyExtendedPropertyItemRequestBuilderPatchRequestConfiguration] = None, response_handler: Optional[ResponseHandler] = None) -> Optional[single_value_legacy_extended_property.SingleValueLegacyExtendedProperty]:
"""
Update the navigation property singleValueExtendedProperties in groups
Args:
body:
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
responseHandler: Response handler to use in place of the default response handling provided by the core service
Returns: Optional[single_value_legacy_extended_property.SingleValueLegacyExtendedProperty]
"""
if body is None:
raise Exception("body cannot be undefined")
request_info = self.create_patch_request_information(
body, request_configuration
)
error_mapping: Dict[str, ParsableFactory] = {
"4XX": o_data_error.ODataError,
"5XX": o_data_error.ODataError,
}
if not self.request_adapter:
raise Exception("Http core is null")
return await self.request_adapter.send_async(request_info, single_value_legacy_extended_property.SingleValueLegacyExtendedProperty, response_handler, error_mapping)
@dataclass
class SingleValueLegacyExtendedPropertyItemRequestBuilderDeleteRequestConfiguration():
"""
Configuration for the request such as headers, query parameters, and middleware options.
"""
# Request headers
headers: Optional[Dict[str, str]] = None
# Request options
options: Optional[List[RequestOption]] = None
@dataclass
class SingleValueLegacyExtendedPropertyItemRequestBuilderGetQueryParameters():
"""
The collection of single-value extended properties defined for the event. Read-only. Nullable.
"""
# Expand related entities
expand: Optional[List[str]] = None
# Select properties to be returned
select: Optional[List[str]] = None
def get_query_parameter(self,original_name: Optional[str] = None) -> str:
"""
Maps the query parameters names to their encoded names for the URI template parsing.
Args:
originalName: The original query parameter name in the class.
Returns: str
"""
if original_name is None:
raise Exception("original_name cannot be undefined")
if original_name == "expand":
return "%24expand"
if original_name == "select":
return "%24select"
return original_name
@dataclass
class SingleValueLegacyExtendedPropertyItemRequestBuilderGetRequestConfiguration():
"""
Configuration for the request such as headers, query parameters, and middleware options.
"""
# Request headers
headers: Optional[Dict[str, str]] = None
# Request options
options: Optional[List[RequestOption]] = None
# Request query parameters
query_parameters: Optional[SingleValueLegacyExtendedPropertyItemRequestBuilder.SingleValueLegacyExtendedPropertyItemRequestBuilderGetQueryParameters] = None
@dataclass
class SingleValueLegacyExtendedPropertyItemRequestBuilderPatchRequestConfiguration():
"""
Configuration for the request such as headers, query parameters, and middleware options.
"""
# Request headers
headers: Optional[Dict[str, str]] = None
# Request options
options: Optional[List[RequestOption]] = None | PypiClean |
/alipay_sdk_python-3.6.740-py3-none-any.whl/alipay/aop/api/request/AlipayCommerceMedicalInstcardCreateandpayRequest.py | import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayCommerceMedicalInstcardCreateandpayModel import AlipayCommerceMedicalInstcardCreateandpayModel
class AlipayCommerceMedicalInstcardCreateandpayRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayCommerceMedicalInstcardCreateandpayModel):
self._biz_content = value
else:
self._biz_content = AlipayCommerceMedicalInstcardCreateandpayModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.commerce.medical.instcard.createandpay'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params | PypiClean |
/pymouser-0.8.tar.gz/pymouser-0.8/README.md | # PyMouser
## Installation
Install the package with pip.
```pip install --user pymouser```
## Usage:
```python
import pymouser
# Initialize the package with your API key
mouser = pymouser.MouserAPI('your-search-key')
# Search by Part-Number
err, res = mouser.search_by_PN('your-part-number')
# Check for errors or print the returned results
if err:
print("Error during request:")
print(err)
else:
if res['NumberOfResult'] == 0:
print("No results matched the part number")
else:
for match in res['Parts']:
print("Match for PartNumber .... %s" % match['MouserPartNumber'])
print("Description ............. %s" % match['Description'])
print("Link to datasheet ....... %s" % match['DataSheetUrl'])
print("Link to product page .... %s" % match['ProductDetailUrl'])
print("")
``` | PypiClean |
/steelscript.appfwk-1.8.tar.gz/steelscript.appfwk-1.8/steelscript/appfwk/apps/plugins/builtin/whois/reports/whois.py |
from steelscript.appfwk.apps.plugins.builtin.whois.datasource.whois import \
WhoisTable, whois_function
from steelscript.appfwk.apps.datasource.modules.analysis import AnalysisTable
from steelscript.netprofiler.appfwk.datasources.netprofiler import \
NetProfilerGroupbyTable
from steelscript.appfwk.apps.report.models import Report
import steelscript.appfwk.apps.report.modules.tables as tables
#
# Description
#
description = """
<div style="width:500px">
<p>This example report demonstrates two different ways to utilize the
AnalysisTable features of App framework.
<p>The first table uses the extensible <strong>custom table definition</strong>
approach where two new classes are defined to perform the initial table
definition and data processing.
<p>The second table looks much like the first, but uses a <strong>single
function</strong> to perform the post-processing.
<p>Both approaches have benefits, the custom definitions allow far more
flexibility in how things get defined, while the function approach can
be simpler for a quick report. See the <a href="edit/">report definition</a>
for details on how this was written.
</div>
"""
report = Report.create("Whois Example Report",
description=description, position=11)
report.add_section()
# Define a Table that gets external hosts by avg bytes
# This will be used as the base table for both analysis approaches
table = NetProfilerGroupbyTable.create(
'5-hosts', groupby='host', duration='1 hour',
filterexpr='not srv host 10/8 and not srv host 192.168/16'
)
table.add_column('host_ip', 'IP Addr', iskey=True, datatype='string')
table.add_column('avg_bytes', 'Avg Bytes', units='B/s', sortdesc=True)
# Using the custom analysis classes, this will create a new analysis table
# and also add the extra column of interest.
whoistable = WhoisTable.create('5-whois-hosts', tables={'t': table})
report.add_widget(tables.TableWidget, whoistable,
"Custom Analysis Link table", width=12)
# Create an Analysis table that calls the 'whois' function to create the link
# Note that we need to manually add the extra column here, since our
# function won't do that for us
function_table = AnalysisTable.create('whois-function-table',
tables={'t': table},
function=whois_function)
function_table.copy_columns(table)
function_table.add_column('whois', label='Whois link', datatype='html')
report.add_widget(tables.TableWidget, function_table,
"Analysis Function Link table", width=12) | PypiClean |
/gdbfrontend-0.6.2.tar.gz/gdbfrontend-0.6.2/frontend/thirdparty/ace/mode-cobol.js | ace.define("ace/mode/cobol_highlight_rules",["require","exports","module","ace/lib/oop","ace/mode/text_highlight_rules"],function(e,t,n){"use strict";var r=e("../lib/oop"),i=e("./text_highlight_rules").TextHighlightRules,s=function(){var e="ACCEPT|MERGE|SUM|ADD||MESSAGE|TABLE|ADVANCING|MODE|TAPE|AFTER|MULTIPLY|TEST|ALL|NEGATIVE|TEXT|ALPHABET|NEXT|THAN|ALSO|NO|THEN|ALTERNATE|NOT|THROUGH|AND|NUMBER|THRU|ANY|OCCURS|TIME|ARE|OF|TO|AREA|OFF|TOP||ASCENDING|OMITTED|TRUE|ASSIGN|ON|TYPE|AT|OPEN|UNIT|AUTHOR|OR|UNTIL|BEFORE|OTHER|UP|BLANK|OUTPUT|USE|BLOCK|PAGE|USING|BOTTOM|PERFORM|VALUE|BY|PIC|VALUES|CALL|PICTURE|WHEN|CANCEL|PLUS|WITH|CD|POINTER|WRITE|CHARACTER|POSITION||ZERO|CLOSE|POSITIVE|ZEROS|COLUMN|PROCEDURE|ZEROES|COMMA|PROGRAM|COMMON|PROGRAM-ID|COMMUNICATION|QUOTE|COMP|RANDOM|COMPUTE|READ|CONTAINS|RECEIVE|CONFIGURATION|RECORD|CONTINUE|REDEFINES|CONTROL|REFERENCE|COPY|REMAINDER|COUNT|REPLACE|DATA|REPORT|DATE|RESERVE|DAY|RESET|DELETE|RETURN|DESTINATION|REWIND|DISABLE|REWRITE|DISPLAY|RIGHT|DIVIDE|RUN|DOWN|SAME|ELSE|SEARCH|ENABLE|SECTION|END|SELECT|ENVIRONMENT|SENTENCE|EQUAL|SET|ERROR|SIGN|EXIT|SEQUENTIAL|EXTERNAL|SIZE|FLASE|SORT|FILE|SOURCE|LENGTH|SPACE|LESS|STANDARD|LIMIT|START|LINE|STOP|LOCK|STRING|LOW-VALUE|SUBTRACT",t="true|false|null",n="count|min|max|avg|sum|rank|now|coalesce|main",r=this.createKeywordMapper({"support.function":n,keyword:e,"constant.language":t},"identifier",!0);this.$rules={start:[{token:"comment",regex:"\\*.*$"},{token:"string",regex:'".*?"'},{token:"string",regex:"'.*?'"},{token:"constant.numeric",regex:"[+-]?\\d+(?:(?:\\.\\d*)?(?:[eE][+-]?\\d+)?)?\\b"},{token:r,regex:"[a-zA-Z_$][a-zA-Z0-9_$]*\\b"},{token:"keyword.operator",regex:"\\+|\\-|\\/|\\/\\/|%|<@>|@>|<@|&|\\^|~|<|>|<=|=>|==|!=|<>|="},{token:"paren.lparen",regex:"[\\(]"},{token:"paren.rparen",regex:"[\\)]"},{token:"text",regex:"\\s+"}]}};r.inherits(s,i),t.CobolHighlightRules=s}),ace.define("ace/mode/cobol",["require","exports","module","ace/lib/oop","ace/mode/text","ace/mode/cobol_highlight_rules"],function(e,t,n){"use strict";var r=e("../lib/oop"),i=e("./text").Mode,s=e("./cobol_highlight_rules").CobolHighlightRules,o=function(){this.HighlightRules=s,this.$behaviour=this.$defaultBehaviour};r.inherits(o,i),function(){this.lineCommentStart="*",this.$id="ace/mode/cobol"}.call(o.prototype),t.Mode=o}); (function() {
ace.require(["ace/mode/cobol"], function(m) {
if (typeof module == "object" && typeof exports == "object" && module) {
module.exports = m;
}
});
})(); | PypiClean |
/lintrunner-0.11.0.tar.gz/lintrunner-0.11.0/CHANGELOG.md | # Changelog
All notable changes to this project will be documented in this file.
## [0.11.0] - 2023-08-23
### Features\
- Add command line argument `--only-lint-under-config-dir` ([a604812](https://github.com/suo/lintrunner/commit/a604812e11c5c5bf3c1160f9ee7ccd9a9680f43a))
- Allow multiple toml files in config([49265e5](https://github.com/suo/lintrunner/commit/49265e5f544e8d7edf62bb6cf52af8a88573847b))
- add sapling support to ci ([6e5bc4e](https://github.com/suo/lintrunner/commit/6e5bc4ee93920cae11a02bb35f7900cd06f64818))
- implement sapling support in lintrunner ([1442962](https://github.com/suo/lintrunner/commit/1442962475b7065151656ae9629de23ef4f08ca3))
### Bug Fixes
- Skip linting nonexitent files ([5c65871](https://github.com/suo/lintrunner/commit/5c65871f90493fb2f92a1d05534e76260dcd7c23))
### Documentation
- Update README to include options and link to github action ([#55](https://github.com/suo/lintrunner/issues/55)) ([9efb969](https://github.com/suo/lintrunner/commit/9efb969e27f5775e619a3bbf51576a175557e3f8))
## [0.10.7] - 2023-03-02
### Bug Fixes
- Run clippy and rustfmt; fix issues ([#34](https://github.com/suo/lintrunner/issues/34)) ([b0e8be2](https://github.com/suo/lintrunner/commit/b0e8be295e5a0e959f36ea740b95780a9abe7400))
- Fix and enable rustfmt linter ([#35](https://github.com/suo/lintrunner/issues/35)) ([507d273](https://github.com/suo/lintrunner/commit/507d27314283fd5c6acede4e75800766921e358d))
### Features
- Enable setting default --merge-base-with values ([75ea9c0](https://github.com/suo/lintrunner/commit/75ea9c09cd6904e6e53170af0661fd3dcb39c9e9))
## [0.10.5] - 2023-01-19
### Bug Fixes
- Add a space to the severity on oneline format ([#30](https://github.com/suo/lintrunner/issues/30)) ([5120786](https://github.com/suo/lintrunner/commit/5120786d3a61bf9013563a126f61f9cb5727be1a))
## [0.10.2] - 2023-01-13
### Features
- Update the message format produced by `convert_to_sarif.py` ([#28](https://github.com/suo/lintrunner/issues/28)) ([b3370bf](https://github.com/suo/lintrunner/commit/b3370bff64ee5bdaad7faef89b4127c2d3b4f357))
## [0.10.1] - 2023-01-13
### Bug Fixes
- Allow --paths-cmd to run on Windows ([#23](https://github.com/suo/lintrunner/issues/23)) ([a1c4191](https://github.com/suo/lintrunner/commit/a1c4191575959974ce5b17269f624b17e93951a0))
## [0.10.0] - 2022-11-28
### Bug Fixes
- Typo in init_command doc ([#17](https://github.com/suo/lintrunner/issues/17)) ([fa8d7b3](https://github.com/suo/lintrunner/commit/fa8d7b32641e58c041e9f3bf15a4b26e1afff915))
- Path construction errors on Windows ([#19](https://github.com/suo/lintrunner/issues/19)) ([032bea6](https://github.com/suo/lintrunner/commit/032bea69f31f6ccfab5cb6670edfb5adb22f1840))
### Features
- A tool to convert json output to SARIF format ([#16](https://github.com/suo/lintrunner/issues/16)) ([1c991af](https://github.com/suo/lintrunner/commit/1c991affb15edac2bb67080e49bf0e5037b47e92))
- Add lint_message.name to oneline output ([#21](https://github.com/suo/lintrunner/issues/21)) ([84f3d34](https://github.com/suo/lintrunner/commit/84f3d34c6db340bdbbe63a4d192004f17769758b))
### Testing
- Fix linux ci ([c443387](https://github.com/suo/lintrunner/commit/c443387ff9a42a6f8c9b0e8add04220d2fea46a1))
## [0.9.3] - 2022-09-23
### Bug Fixes
- Don't check files that were deleted/moved in working tree ([0fbb2f3](https://github.com/suo/lintrunner/commit/0fbb2f3d01a08088606ee6650e98d9db9b0b7b3a))
### Testing
- Add unit test for trailing whitespace ([bbbcffd](https://github.com/suo/lintrunner/commit/bbbcffd7d095b16fc831fe48c163b4805e6a9aa0))
- Add missing snapshot ([9fda576](https://github.com/suo/lintrunner/commit/9fda576f330392c244527defb6e80250663744c6))
## [0.9.2] - 2022-05-11
### Bug Fixes
- Add more runtime info to logs ([80e78de](https://github.com/suo/lintrunner/commit/80e78dee128f834f4f696c652bcec32a4f0e0d1c))
### Features
- Add --all-files command ([3d64ad3](https://github.com/suo/lintrunner/commit/3d64ad33ca94172ee27830fb772c35d469b41028))
## [0.9.1] - 2022-05-11
### Features
- Add --tee-json option ([5978ec0](https://github.com/suo/lintrunner/commit/5978ec0e47f38bd0252c3f5afa02d27314edd875))
## [0.9.0] - 2022-05-10
### Bug Fixes
- Add --version command-line arg ([7932c44](https://github.com/suo/lintrunner/commit/7932c44d80279e54b67e02d256b356104ba4bcc2))
- Escape command-line args in log ([1018103](https://github.com/suo/lintrunner/commit/10181032e2093bcf0cb233300b982da459a71975))
- Error if duplicate linters found ([89064c1](https://github.com/suo/lintrunner/commit/89064c1f808d7e76ecc183c182b9c1ac4d765704))
- Escape linter initializer in logs ([0a0f0ec](https://github.com/suo/lintrunner/commit/0a0f0ec1d86b02f77a680ad8e4560ed80219b849))
- Properly ignore current run on `rage -i` ([#6](https://github.com/suo/lintrunner/issues/6)) ([e4989eb](https://github.com/suo/lintrunner/commit/e4989ebe598d7268d4ae715484ec21a57aadd426))
- Show milliseconds in rage run timestamp ([9780a2b](https://github.com/suo/lintrunner/commit/9780a2b8774b3c6e52b29414435a038840a3aabf))
### Documentation
- Update changelog ([82c3335](https://github.com/suo/lintrunner/commit/82c33359f0cde758e7153d4ba450751afbc6c6c8))
### Features
- Add rage command for bug reporting ([bb80fef](https://github.com/suo/lintrunner/commit/bb80fef49fabad5558e77786e157b4ea822d0f23))
## [0.8.0] - 2022-05-02
### Bug Fixes
- Add severity to oneline message ([14495be](https://github.com/suo/lintrunner/commit/14495be590d1b8c223a07f59ccdb6600d22e92c4))
- Unify output controlling commands into --output ([8b95e7b](https://github.com/suo/lintrunner/commit/8b95e7b76c65dc4187b17b9851ce902aebc58944))
### Documentation
- Improve help message ([0630560](https://github.com/suo/lintrunner/commit/06305606f9d840610487a9b7dff9a159a05fb8d1))
### Features
- Warn if init seems out of date ([4050dd7](https://github.com/suo/lintrunner/commit/4050dd7fe883c419e0af110a7d2c6887b6ba08f0))
- Format command ([bf7925d](https://github.com/suo/lintrunner/commit/bf7925df7b1aac0265e3bf88ef8ca05d720e0560))
### Testing
- Add integration test for init warnings ([9c75f29](https://github.com/suo/lintrunner/commit/9c75f293cdccbd662f922548861b277c70f9d14d))
- Add integration test for dryrun error on init config ([88738ca](https://github.com/suo/lintrunner/commit/88738ca299179588e9abae6b8265c8287270edb6))
### Build
- Run cargo upgrade ([0241c01](https://github.com/suo/lintrunner/commit/0241c01630187ce3817ee1964f858ebc7b85d10a))
## [0.7.0] - 2022-04-15
### Features
- Add --oneline arg for compact lint rendering ([a0a9e87](https://github.com/suo/lintrunner/commit/a0a9e878781a2ead70ff7bfc94064275eeb79020))
## [0.6.2] - 2022-04-15
### Bug Fixes
- Do not allow * to match across path segments ([382413a](https://github.com/suo/lintrunner/commit/382413aa40edf2dead74fd9f25fdd01bac00bd80))
### Testing
- Add test for deleted files with --revision specified ([19c6fee](https://github.com/suo/lintrunner/commit/19c6fee0d11096c4ba7e7182fd3178b170cddb10))
## [0.6.1] - 2022-04-15
### Bug Fixes
- Correct order of arguments while gathering files to lint ([9c2093d](https://github.com/suo/lintrunner/commit/9c2093d4dace6e3570cad9bc5b363e0b3fc50b3c))
### Documentation
- Update install instructions ([a3095fd](https://github.com/suo/lintrunner/commit/a3095fde2edacb0dba93250cfca35f2000c4c009))
- Add --merge-base-with to readme ([8d51a11](https://github.com/suo/lintrunner/commit/8d51a117e833211ef275355d27c64eacab40cbce))
<!-- generated by git-cliff -->
| PypiClean |
/fake_bpy_module_2.82-20230117-py3-none-any.whl/bl_ui/properties_physics_softbody.py | import sys
import typing
import bpy_types
GenericType = typing.TypeVar("GenericType")
class PhysicButtonsPanel:
bl_context = None
''' '''
bl_region_type = None
''' '''
bl_space_type = None
''' '''
def poll(self, context):
'''
'''
pass
class PHYSICS_PT_softbody(PhysicButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PHYSICS_PT_softbody_cache(PhysicButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PHYSICS_PT_softbody_collision(PhysicButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_header(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PHYSICS_PT_softbody_edge(PhysicButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_header(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PHYSICS_PT_softbody_edge_aerodynamics(
PhysicButtonsPanel, bpy_types.Panel, bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PHYSICS_PT_softbody_edge_stiffness(PhysicButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_header(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PHYSICS_PT_softbody_field_weights(PhysicButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PHYSICS_PT_softbody_goal(PhysicButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_header(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PHYSICS_PT_softbody_goal_settings(PhysicButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PHYSICS_PT_softbody_goal_strengths(PhysicButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PHYSICS_PT_softbody_object(PhysicButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PHYSICS_PT_softbody_simulation(PhysicButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PHYSICS_PT_softbody_solver(PhysicButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PHYSICS_PT_softbody_solver_diagnostics(
PhysicButtonsPanel, bpy_types.Panel, bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PHYSICS_PT_softbody_solver_helpers(PhysicButtonsPanel, bpy_types.Panel,
bpy_types._GenericUI):
COMPAT_ENGINES = None
''' '''
bl_context = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_parent_id = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
def softbody_panel_enabled(md):
'''
'''
pass | PypiClean |
/ansible-kkvesper-2.3.2.0.tar.gz/ansible-kkvesper-2.3.2.0/lib/ansible/modules/web_infrastructure/jboss.py |
# (c) 2013, Jeroen Hoekx <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
module: jboss
version_added: "1.4"
short_description: deploy applications to JBoss
description:
- Deploy applications to JBoss standalone using the filesystem
options:
deployment:
required: true
description:
- The name of the deployment
src:
required: false
description:
- The remote path of the application ear or war to deploy
deploy_path:
required: false
default: /var/lib/jbossas/standalone/deployments
description:
- The location in the filesystem where the deployment scanner listens
state:
required: false
choices: [ present, absent ]
default: "present"
description:
- Whether the application should be deployed or undeployed
notes:
- "The JBoss standalone deployment-scanner has to be enabled in standalone.xml"
- "Ensure no identically named application is deployed through the JBoss CLI"
author: "Jeroen Hoekx (@jhoekx)"
"""
EXAMPLES = """
# Deploy a hello world application
- jboss:
src: /tmp/hello-1.0-SNAPSHOT.war
deployment: hello.war
state: present
# Update the hello world application
- jboss:
src: /tmp/hello-1.1-SNAPSHOT.war
deployment: hello.war
state: present
# Undeploy the hello world application
- jboss:
deployment: hello.war
state: absent
"""
import os
import shutil
import time
def is_deployed(deploy_path, deployment):
return os.path.exists(os.path.join(deploy_path, "%s.deployed"%(deployment)))
def is_undeployed(deploy_path, deployment):
return os.path.exists(os.path.join(deploy_path, "%s.undeployed"%(deployment)))
def is_failed(deploy_path, deployment):
return os.path.exists(os.path.join(deploy_path, "%s.failed"%(deployment)))
def main():
module = AnsibleModule(
argument_spec = dict(
src=dict(),
deployment=dict(required=True),
deploy_path=dict(default='/var/lib/jbossas/standalone/deployments'),
state=dict(choices=['absent', 'present'], default='present'),
),
)
changed = False
src = module.params['src']
deployment = module.params['deployment']
deploy_path = module.params['deploy_path']
state = module.params['state']
if state == 'present' and not src:
module.fail_json(msg="Argument 'src' required.")
if not os.path.exists(deploy_path):
module.fail_json(msg="deploy_path does not exist.")
deployed = is_deployed(deploy_path, deployment)
if state == 'present' and not deployed:
if not os.path.exists(src):
module.fail_json(msg='Source file %s does not exist.'%(src))
if is_failed(deploy_path, deployment):
### Clean up old failed deployment
os.remove(os.path.join(deploy_path, "%s.failed"%(deployment)))
shutil.copyfile(src, os.path.join(deploy_path, deployment))
while not deployed:
deployed = is_deployed(deploy_path, deployment)
if is_failed(deploy_path, deployment):
module.fail_json(msg='Deploying %s failed.'%(deployment))
time.sleep(1)
changed = True
if state == 'present' and deployed:
if module.sha1(src) != module.sha1(os.path.join(deploy_path, deployment)):
os.remove(os.path.join(deploy_path, "%s.deployed"%(deployment)))
shutil.copyfile(src, os.path.join(deploy_path, deployment))
deployed = False
while not deployed:
deployed = is_deployed(deploy_path, deployment)
if is_failed(deploy_path, deployment):
module.fail_json(msg='Deploying %s failed.'%(deployment))
time.sleep(1)
changed = True
if state == 'absent' and deployed:
os.remove(os.path.join(deploy_path, "%s.deployed"%(deployment)))
while deployed:
deployed = not is_undeployed(deploy_path, deployment)
if is_failed(deploy_path, deployment):
module.fail_json(msg='Undeploying %s failed.'%(deployment))
time.sleep(1)
changed = True
module.exit_json(changed=changed)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main() | PypiClean |
/rinohtype-0.5.4.tar.gz/rinohtype-0.5.4/src/rinoh/attribute.py |
import re
from collections import OrderedDict
from configparser import ConfigParser
from io import StringIO
from itertools import chain
from pathlib import Path
from token import NUMBER, ENDMARKER, MINUS, PLUS, NAME, NEWLINE
from tokenize import generate_tokens
from warnings import warn
from .util import (NamedDescriptor, WithNamedDescriptors,
NotImplementedAttribute, class_property, PeekIterator,
cached)
__all__ = ['AttributeType', 'AcceptNoneAttributeType', 'OptionSet',
'OptionSetMeta', 'Attribute', 'OverrideDefault',
'AttributesDictionary', 'Configurable', 'RuleSet', 'RuleSetFile',
'Bool', 'Integer', 'ParseError', 'Var']
class AttributeType(object):
def __eq__(self, other):
return type(self) == type(other) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
@classmethod
def check_type(cls, value):
return isinstance(value, cls)
@classmethod
def from_string(cls, string, source=None):
return cls.parse_string(string, source)
@classmethod
def parse_string(cls, string, source):
tokens = TokenIterator(string)
value = cls.from_tokens(tokens, source)
if next(tokens).type != ENDMARKER:
raise ParseError('Syntax error')
return value
@classmethod
def from_tokens(cls, tokens, source):
raise NotImplementedError(cls)
@classmethod
def validate(cls, value):
if isinstance(value, str):
value = cls.from_string(value)
if not cls.check_type(value):
raise TypeError("{} is not of type {}".format(value, cls.__name__))
return value
@classmethod
def doc_repr(cls, value):
return '``{}``'.format(value) if value else '(no value)'
@classmethod
def doc_format(cls):
warn('Missing implementation for {}.doc_format'.format(cls.__name__))
return ''
class AcceptNoneAttributeType(AttributeType):
"""Accepts 'none' (besides other values)"""
@classmethod
def check_type(cls, value):
return (isinstance(value, type(None))
or super(__class__, cls).check_type(value))
@classmethod
def from_string(cls, string, source=None):
if string.strip().lower() == 'none':
return None
return super(__class__, cls).from_string(string, source)
@classmethod
def doc_repr(cls, value):
return '``{}``'.format('none' if value is None else value)
class OptionSetMeta(type):
def __new__(metacls, classname, bases, cls_dict):
cls = super().__new__(metacls, classname, bases, cls_dict)
cls.__doc__ = (cls_dict['__doc__'] + '\n\n'
if '__doc__' in cls_dict else '')
cls.__doc__ += 'Accepts: {}'.format(cls.doc_format())
return cls
def __getattr__(cls, item):
if item == 'NONE' and None in cls.values:
return None
string = item.lower().replace('_', ' ')
if item.isupper() and string in cls.values:
return string
raise AttributeError(item)
def __iter__(cls):
return iter(cls.values)
class OptionSet(AttributeType, metaclass=OptionSetMeta):
"""Accepts the values listed in :attr:`values`"""
values = ()
@classmethod
def check_type(cls, value):
return value in cls.values
@class_property
def value_strings(cls):
return ['none' if value is None else value.lower()
for value in cls.values]
@classmethod
def _value_from_tokens(cls, tokens):
if tokens.next.type != NAME:
raise ParseError('Expecting a name')
token = next(tokens)
_, start_col = token.start
while tokens.next and tokens.next.exact_type in (NAME, MINUS):
token = next(tokens)
_, end_col = token.end
return token.line[start_col:end_col].strip()
@classmethod
def from_tokens(cls, tokens, source):
option_string = cls._value_from_tokens(tokens)
try:
index = cls.value_strings.index(option_string.lower())
except ValueError:
raise ValueError("'{}' is not a valid {}. Must be one of: '{}'"
.format(option_string, cls.__name__,
"', '".join(cls.value_strings)))
return cls.values[index]
@classmethod
def doc_repr(cls, value):
return '``{}``'.format(value)
@classmethod
def doc_format(cls):
return ', '.join('``{}``'.format(s) for s in cls.value_strings)
class Attribute(NamedDescriptor):
"""Descriptor used to describe a style attribute"""
def __init__(self, accepted_type, default_value, description):
self.name = None
self.accepted_type = accepted_type
self.default_value = accepted_type.validate(default_value)
self.description = description
self.source = None
def __get__(self, style, type=None):
try:
return style.get(self.name, self.default_value)
except AttributeError:
return self
def __set__(self, style, value):
if not self.accepted_type.check_type(value):
raise TypeError('The {} attribute only accepts {} instances'
.format(self.name, self.accepted_type.__name__))
style[self.name] = value
class OverrideDefault(Attribute):
"""Overrides the default value of an attribute defined in a superclass"""
def __init__(self, default_value):
self._default_value = default_value
@property
def overrides(self):
return self._overrides
@overrides.setter
def overrides(self, attribute):
self._overrides = attribute
self.default_value = self.accepted_type.validate(self._default_value)
@property
def accepted_type(self):
return self.overrides.accepted_type
@property
def description(self):
return self.overrides.description
class WithAttributes(WithNamedDescriptors):
def __new__(mcls, classname, bases, cls_dict):
attributes = cls_dict['_attributes'] = OrderedDict()
doc = []
for name, attr in cls_dict.items():
if not isinstance(attr, Attribute):
continue
attributes[name] = attr
if isinstance(attr, OverrideDefault):
for mro_cls in (cls for base_cls in bases
for cls in base_cls.__mro__):
try:
attr.overrides = mro_cls._attributes[name]
break
except (AttributeError, KeyError):
pass
else:
raise NotImplementedError
battr = ':attr:`{0} <.{0}.{1}>`'.format(mro_cls.__name__, name)
inherits = f' (inherited from {battr})'
overrides = f' (overrides {battr} default)'
else:
inherits = overrides = ''
doc.append('{}: {}{}'.format(name, attr.description, inherits))
format = attr.accepted_type.doc_format()
default = attr.accepted_type.doc_repr(attr.default_value)
doc.append('\n *Accepts* :class:`.{}`: {}\n'
.format(attr.accepted_type.__name__, format))
doc.append('\n *Default*: {}{}\n'
.format(default, overrides))
supported_attributes = list(name for name in attributes)
documented = set(supported_attributes)
for base_class in bases:
try:
supported_attributes.extend(base_class._supported_attributes)
except AttributeError:
continue
for mro_cls in base_class.__mro__:
for name, attr in getattr(mro_cls, '_attributes', {}).items():
if name in documented:
continue
doc.append('{0}: {1} (inherited from :attr:`{2} <.{2}.{0}>`)'
.format(name, attr.description,
mro_cls.__name__))
format = attr.accepted_type.doc_format()
default = attr.accepted_type.doc_repr(attr.default_value)
doc.append('\n *Accepts* :class:`.{}`: {}\n'
.format(attr.accepted_type.__name__, format))
doc.append('\n *Default*: {}\n'.format(default))
documented.add(name)
if doc:
attr_doc = '\n '.join(chain([' Attributes:'], doc))
cls_dict['__doc__'] = (cls_dict.get('__doc__', '') + '\n\n'
+ attr_doc)
cls_dict['_supported_attributes'] = supported_attributes
return super().__new__(mcls, classname, bases, cls_dict)
@property
def _all_attributes(cls):
for mro_class in reversed(cls.__mro__):
for name in getattr(mro_class, '_attributes', ()):
yield name
@property
def supported_attributes(cls):
for mro_class in cls.__mro__:
for name in getattr(mro_class, '_supported_attributes', ()):
yield name
class AttributesDictionary(OrderedDict, metaclass=WithAttributes):
def __init__(self, base=None, **attributes):
self.name = None
self.source = None
self.base = base
super().__init__(attributes)
@classmethod
def _get_default(cls, attribute):
"""Return the default value for `attribute`.
If no default is specified in this style, get the default from the
nearest superclass.
If `attribute` is not supported, raise a :class:`KeyError`."""
try:
for klass in cls.__mro__:
if attribute in klass._attributes:
return klass._attributes[attribute].default_value
except AttributeError:
raise KeyError("No attribute '{}' in {}".format(attribute, cls))
@classmethod
def attribute_definition(cls, name):
try:
for klass in cls.__mro__:
if name in klass._attributes:
return klass._attributes[name]
except AttributeError:
pass
raise KeyError(name)
@classmethod
def attribute_type(cls, name):
try:
return cls.attribute_definition(name).accepted_type
except KeyError:
raise TypeError('{} is not a supported attribute for {}'
.format(name, cls.__name__))
@classmethod
def get_ruleset(self):
raise NotImplementedError
class DefaultValueException(Exception):
pass
class Configurable(object):
configuration_class = NotImplementedAttribute()
def configuration_name(self, document):
raise NotImplementedError
def get_config_value(self, attribute, document):
ruleset = self.configuration_class.get_ruleset(document)
return ruleset.get_value_for(self, attribute, document)
class BaseConfigurationException(Exception):
def __init__(self, base_name):
self.name = base_name
class Source(object):
"""Describes where a :class:`DocumentElement` was defined"""
@property
def location(self):
"""Textual representation of this source"""
return repr(self)
@property
def root(self):
"""Directory path for resolving paths relative to this source"""
return None
class RuleSet(OrderedDict, Source):
main_section = NotImplementedAttribute()
def __init__(self, name, base=None, source=None, **kwargs):
super().__init__(**kwargs)
self.name = name
self.base = base
self.source = source
self.variables = OrderedDict()
def contains(self, name):
return name in self or (self.base and self.base.contains(name))
def find_source(self, name):
"""Find top-most ruleset where configuration `name` is defined"""
if name in self:
return self.name
if self.base:
return self.base.find_source(name)
def get_configuration(self, name):
try:
return self[name]
except KeyError:
if self.base:
return self.base.get_configuration(name)
raise
def __setitem__(self, name, item):
assert name not in self
if isinstance(item, AttributesDictionary): # FIXME
self._validate_attributes(name, item)
super().__setitem__(name, item)
def __call__(self, name, **kwargs):
self[name] = self.get_entry_class(name)(**kwargs)
def __repr__(self):
return '{}({})'.format(type(self).__name__, self.name)
def __str__(self):
return repr(self)
def __bool__(self):
return True
RE_VARIABLE = re.compile(r'^\$\(([a-z_ -]+)\)$', re.IGNORECASE)
def _validate_attributes(self, name, attr_dict):
attr_dict.name = name
attr_dict.source = self
for key, val in attr_dict.items():
attr_dict[key] = self._validate_attribute(attr_dict, key, val)
def _validate_attribute(self, attr_dict, name, value):
attribute_type = attr_dict.attribute_type(name)
if isinstance(value, str):
stripped = value.replace('\n', ' ').strip()
m = self.RE_VARIABLE.match(stripped)
if m:
return Var(m.group(1))
value = self._attribute_from_string(attribute_type, stripped)
elif hasattr(value, 'source'):
value.source = self
if not isinstance(value, Var) and not attribute_type.check_type(value):
raise TypeError("{} ({}) is not of the correct type for the '{}' "
"attribute".format(value, type(value).__name__,
name))
return value
@cached
def _attribute_from_string(self, attribute_type, string):
return attribute_type.from_string(string, self)
def get_variable(self, configuration_class, attribute, variable):
try:
value = self.variables[variable.name]
except KeyError:
if not self.base:
raise VariableNotDefined("Variable '{}' is not defined"
.format(variable.name))
return self.base.get_variable(configuration_class, attribute,
variable)
return self._validate_attribute(configuration_class, attribute, value)
def get_entry_class(self, name):
raise NotImplementedError
def _get_value_recursive(self, name, attribute):
if name in self:
entry = self[name]
if attribute in entry:
return entry[attribute]
elif isinstance(entry.base, str):
raise BaseConfigurationException(entry.base)
elif entry.base is not None:
return entry.base[attribute]
if self.base:
return self.base._get_value_recursive(name, attribute)
raise DefaultValueException
@cached
def get_value(self, name, attribute):
try:
return self._get_value_recursive(name, attribute)
except BaseConfigurationException as exc:
return self.get_value(exc.name, attribute)
def _get_value_lookup(self, configurable, attribute, document):
name = configurable.configuration_name(document)
return self.get_value(name, attribute)
def get_value_for(self, configurable, attribute, document):
try:
value = self._get_value_lookup(configurable, attribute, document)
except DefaultValueException:
value = configurable.configuration_class._get_default(attribute)
if isinstance(value, Var):
configuration_class = configurable.configuration_class
value = self.get_variable(configuration_class, attribute, value)
return value
class RuleSetFile(RuleSet):
def __init__(self, filename, base=None, source=None, **kwargs):
self.filename = self._absolute_path(filename, source)
config = ConfigParser(default_section=None, delimiters=('=',),
interpolation=None)
with self.filename.open() as file:
config.read_file(file)
options = dict(config[self.main_section]
if config.has_section(self.main_section) else {})
name = options.pop('name', filename)
base = options.pop('base', base)
options.update(kwargs) # optionally override options
super().__init__(name, base=base, source=source, **options)
if config.has_section('VARIABLES'):
for name, value in config.items('VARIABLES'):
self.variables[name] = value
for section_name, section_body in config.items():
if section_name in (None, self.main_section, 'VARIABLES'):
continue
if ':' in section_name:
name, classifier = (s.strip() for s in section_name.split(':'))
else:
name, classifier = section_name.strip(), None
self.process_section(name, classifier, section_body.items())
@classmethod
def _absolute_path(cls, filename, source):
file_path = Path(filename)
if not file_path.is_absolute():
if source is None or source.root is None:
raise ValueError('{} path should be absolute: {}'
.format(cls.__name__, file_path))
file_path = source.root / file_path
return file_path
@property
def location(self):
return str(self.filename.resolve()), None, None
@property
def root(self):
return self.filename.parent.resolve()
def process_section(self, section_name, classifier, items):
raise NotImplementedError
class Bool(AttributeType):
"""Expresses a binary choice"""
@classmethod
def check_type(cls, value):
return isinstance(value, bool)
@classmethod
def from_tokens(cls, tokens, source):
string = next(tokens).string
lower_string = string.lower()
if lower_string not in ('true', 'false'):
raise ValueError("'{}' is not a valid {}. Must be one of 'true' "
"or 'false'".format(string, cls.__name__))
return lower_string == 'true'
@classmethod
def doc_repr(cls, value):
return '``{}``'.format(str(value).lower())
@classmethod
def doc_format(cls):
return '``true`` or ``false``'
class Integer(AttributeType):
"""Accepts natural numbers"""
@classmethod
def check_type(cls, value):
return isinstance(value, int)
@classmethod
def from_tokens(cls, tokens, source):
token = next(tokens)
sign = 1
if token.exact_type in (MINUS, PLUS):
sign = 1 if token.exact_type == PLUS else -1
token = next(tokens)
if token.type != NUMBER:
raise ParseError('Expecting a number')
try:
value = int(token.string)
except ValueError:
raise ParseError('Expecting an integer')
return sign * value
@classmethod
def doc_format(cls):
return 'a natural number (positive integer)'
class TokenIterator(PeekIterator):
"""Tokenizes `string` and iterates over the tokens"""
def __init__(self, string):
self.string = string
tokens = generate_tokens(StringIO(string).readline)
super().__init__(tokens)
def _advance(self):
result = super()._advance()
if self.next and self.next.type == NEWLINE and self.next.string == '':
super()._advance()
return result
class ParseError(Exception):
pass
# variables
class Var(object):
def __init__(self, name):
super().__init__()
self.name = name
def __repr__(self):
return "{}('{}')".format(type(self).__name__, self.name)
def __str__(self):
return '$({})'.format(self.name)
def __eq__(self, other):
return self.name == other.name
class VariableNotDefined(Exception):
pass | PypiClean |
/plone.app.debugtoolbar-1.3.0.tar.gz/plone.app.debugtoolbar-1.3.0/src/plone/app/debugtoolbar/browser/resources/debugtoolbar.js | _read_debug_cookie = function() {
key = "plone.app.debugtoolbar";
var result, decode = decodeURIComponent;
var cookie = (result = new RegExp('(?:^|; )' + encodeURIComponent(key) + '=([^;]*)').exec(document.cookie)) ? decode(result[1]) : null;
if(cookie == null) {
return {};
}
return jQuery.parseJSON(cookie);
};
function InteractivePrompt(target, path) {
this.target = target;
this.submitHistory = [];
this.historyPosition = -1;
if (path == undefined) {
// path = "./@@plone.app.debugtoolbar.interactive.response";
path = location.href + "/@@plone.app.debugtoolbar.interactive.response";
}
this.path = path;
};
InteractivePrompt.prototype.submit = function(line) {
var out = this.target;
this.submitHistory.push(line);
this.historyPosition = this.submitHistory.length;
jQuery.post(
this.path,
{'line': line},
function(data) {
if(data != '') {
jQuery(out).append(data);
jQuery(out).animate({ scrollTop: jQuery(out).prop('scrollHeight') }, "fast");
}
}
);
};
function TalesTester(target, path) {
this.target = target;
this.submitHistory = [];
this.historyPosition = -1;
if (path == undefined) {
// path = "./@@plone.app.debugtoolbar.interactive.tales";
path = location.href + "/@@plone.app.debugtoolbar.interactive.tales";
}
this.path = path;
};
TalesTester.prototype.submit = function(line) {
var out = this.target;
this.submitHistory.push(line);
this.historyPosition = this.submitHistory.length;
jQuery.post(
this.path,
{'line': line},
function(data) {
if(data != '') {
jQuery(out).html(data);
}
}
);
};
jQuery(function($) {
$(function() {
// Move debug toolbar to the top
$("#debug-toolbar").prependTo("html body");
$("#debug-toolbar-trigger").prependTo("html body");
// Action for trigger
$("#debug-toolbar-trigger").click(function() {
$('#debug-toolbar').slideDown();
return false;
});
$("#debug-toolbar-close").click(function() {
$('#debug-toolbar').slideUp();
return false;
});
// Panel open/close
$(".debug-toolbar-header").click(function() {
$('#' + $(this).attr('id') + '-body').toggle('fade');
});
// Section open/close
$(".debug-toolbar-section-header").click(function() {
$('#' + $(this).attr('id') + '-body').toggle('fade');
});
// Interactive debug panel
var prompt = new InteractivePrompt("#debug-toolbar-interactive-out");
$("#debug-toolbar-interactive-input-submit").click(function () {
var line = $("#debug-toolbar-interactive-input").val();
prompt.submit(line);
$("#debug-toolbar-interactive-input").val("");
return false;
});
$("#debug-toolbar-interactive-input").keyup(function (e) {
if(e.keyCode == 13) { // enter
var line = $(this).val();
prompt.submit(line);
$(this).val("");
return false;
} else if(e.keyCode == 38) { // up
if(prompt.historyPosition > 0 && prompt.submitHistory.length > 0) {
--prompt.historyPosition;
$(this).val(prompt.submitHistory[prompt.historyPosition]);
return false;
}
} else if(e.keyCode == 40) { // down
if(prompt.historyPosition < prompt.submitHistory.length - 1 && prompt.submitHistory.length > 0) {
++prompt.historyPosition;
$(this).val(prompt.submitHistory[prompt.historyPosition]);
return false;
} else if(prompt.historyPosition >= prompt.submitHistory.length - 1) {
prompt.historyPosition = prompt.submitHistory.length;
$(this).val("");
return false;
}
}
});
// TALES tester
var talesTester = new TalesTester("#debug-toolbar-tales-out");
$("#debug-toolbar-tales-input-submit").click(function () {
var line = $("#debug-toolbar-tales-input").val();
talesTester.submit(line);
return false;
});
$("#debug-toolbar-tales-input").keyup(function (e) {
if(e.keyCode == 13) { // enter
var line = $(this).val();
talesTester.submit(line);
return false;
} else if(e.keyCode == 38) { // up
if(talesTester.historyPosition > 0 && talesTester.submitHistory.length > 0) {
--talesTester.historyPosition;
$(this).val(talesTester.submitHistory[talesTester.historyPosition]);
return false;
}
} else if(e.keyCode == 40) { // down
if(talesTester.historyPosition < talesTester.submitHistory.length - 1 && talesTester.submitHistory.length > 0) {
++talesTester.historyPosition;
$(this).val(talesTester.submitHistory[talesTester.historyPosition]);
return false;
} else if(talesTester.historyPosition >= talesTester.submitHistory.length - 1) {
talesTester.historyPosition = talesTester.submitHistory.length;
$(this).val("");
return false;
}
}
});
$('#debug-toolbar-reload-body form').submit(function(e){
e.preventDefault();
$.ajax({
url: $(this).attr('action'),
method: 'POST',
success: function(){
window.location.reload();
}
});
});
});
}); | PypiClean |
/yt-dlp-cp-2.9.9.tar.gz/yt-dlp-cp-2.9.9/yt_dlp/extractor/breakcom.py | from .common import InfoExtractor
from .youtube import YoutubeIE
from ..utils import (
int_or_none,
url_or_none,
)
class BreakIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?break\.com/video/(?P<display_id>[^/]+?)(?:-(?P<id>\d+))?(?:[/?#&]|$)'
_TESTS = [{
'url': 'http://www.break.com/video/when-girls-act-like-guys-2468056',
'info_dict': {
'id': '2468056',
'ext': 'mp4',
'title': 'When Girls Act Like D-Bags',
'age_limit': 13,
},
}, {
# youtube embed
'url': 'http://www.break.com/video/someone-forgot-boat-brakes-work',
'info_dict': {
'id': 'RrrDLdeL2HQ',
'ext': 'mp4',
'title': 'Whale Watching Boat Crashing Into San Diego Dock',
'description': 'md5:afc1b2772f0a8468be51dd80eb021069',
'upload_date': '20160331',
'uploader': 'Steve Holden',
'uploader_id': 'sdholden07',
},
'params': {
'skip_download': True,
}
}, {
'url': 'http://www.break.com/video/ugc/baby-flex-2773063',
'only_matching': True,
}]
def _real_extract(self, url):
display_id, video_id = self._match_valid_url(url).groups()
webpage = self._download_webpage(url, display_id)
youtube_url = YoutubeIE._extract_url(webpage)
if youtube_url:
return self.url_result(youtube_url, ie=YoutubeIE.ie_key())
content = self._parse_json(
self._search_regex(
r'(?s)content["\']\s*:\s*(\[.+?\])\s*[,\n]', webpage,
'content'),
display_id)
formats = []
for video in content:
video_url = url_or_none(video.get('url'))
if not video_url:
continue
bitrate = int_or_none(self._search_regex(
r'(\d+)_kbps', video_url, 'tbr', default=None))
formats.append({
'url': video_url,
'format_id': 'http-%d' % bitrate if bitrate else 'http',
'tbr': bitrate,
})
title = self._search_regex(
(r'title["\']\s*:\s*(["\'])(?P<value>(?:(?!\1).)+)\1',
r'<h1[^>]*>(?P<value>[^<]+)'), webpage, 'title', group='value')
def get(key, name):
return int_or_none(self._search_regex(
r'%s["\']\s*:\s*["\'](\d+)' % key, webpage, name,
default=None))
age_limit = get('ratings', 'age limit')
video_id = video_id or get('pid', 'video id') or display_id
return {
'id': video_id,
'display_id': display_id,
'title': title,
'thumbnail': self._og_search_thumbnail(webpage),
'age_limit': age_limit,
'formats': formats,
} | PypiClean |
/bitmovin_api_sdk-1.171.0-py3-none-any.whl/bitmovin_api_sdk/encoding/filters/unsharp/unsharp_api.py |
from __future__ import absolute_import
from bitmovin_api_sdk.common import BaseApi, BitmovinApiLoggerBase
from bitmovin_api_sdk.common.poscheck import poscheck_except
from bitmovin_api_sdk.models.bitmovin_response import BitmovinResponse
from bitmovin_api_sdk.models.response_envelope import ResponseEnvelope
from bitmovin_api_sdk.models.response_error import ResponseError
from bitmovin_api_sdk.models.unsharp_filter import UnsharpFilter
from bitmovin_api_sdk.encoding.filters.unsharp.customdata.customdata_api import CustomdataApi
from bitmovin_api_sdk.encoding.filters.unsharp.unsharp_filter_list_query_params import UnsharpFilterListQueryParams
class UnsharpApi(BaseApi):
@poscheck_except(2)
def __init__(self, api_key, tenant_org_id=None, base_url=None, logger=None):
# type: (str, str, str, BitmovinApiLoggerBase) -> None
super(UnsharpApi, self).__init__(
api_key=api_key,
tenant_org_id=tenant_org_id,
base_url=base_url,
logger=logger
)
self.customdata = CustomdataApi(
api_key=api_key,
tenant_org_id=tenant_org_id,
base_url=base_url,
logger=logger
)
def create(self, unsharp_filter, **kwargs):
# type: (UnsharpFilter, dict) -> UnsharpFilter
"""Create Unsharp Filter
:param unsharp_filter: The Unsharp Filter to be created
:type unsharp_filter: UnsharpFilter, required
:return: Unsharp Filter details
:rtype: UnsharpFilter
"""
return self.api_client.post(
'/encoding/filters/unsharp',
unsharp_filter,
type=UnsharpFilter,
**kwargs
)
def delete(self, filter_id, **kwargs):
# type: (string_types, dict) -> BitmovinResponse
"""Delete Unsharp Filter
:param filter_id: Id of the unsharp filter
:type filter_id: string_types, required
:return: Id of the unsharp filter.
:rtype: BitmovinResponse
"""
return self.api_client.delete(
'/encoding/filters/unsharp/{filter_id}',
path_params={'filter_id': filter_id},
type=BitmovinResponse,
**kwargs
)
def get(self, filter_id, **kwargs):
# type: (string_types, dict) -> UnsharpFilter
"""Unsharp Filter Details
:param filter_id: Id of the unsharp filter
:type filter_id: string_types, required
:return: Unsharp Filter details
:rtype: UnsharpFilter
"""
return self.api_client.get(
'/encoding/filters/unsharp/{filter_id}',
path_params={'filter_id': filter_id},
type=UnsharpFilter,
**kwargs
)
def list(self, query_params=None, **kwargs):
# type: (UnsharpFilterListQueryParams, dict) -> UnsharpFilter
"""List Unsharp Filters
:param query_params: Query parameters
:type query_params: UnsharpFilterListQueryParams
:return: List of Unsharp Filters
:rtype: UnsharpFilter
"""
return self.api_client.get(
'/encoding/filters/unsharp',
query_params=query_params,
pagination_response=True,
type=UnsharpFilter,
**kwargs
) | PypiClean |
/GRFloodMaster-1.1.0-py3-none-any.whl/FloodMaster/utils/CategoryEncoder.py | import category_encoders as ce
import pandas as pd
import joblib
import os
import json
class LooEncoder():
"""采用 LeaveOneOut 方法编码类别变量,从而可进行机器学习。
由于该方法属于有监督方法,要求数据集中标签变量为非类别变量。
同时,该编码方法可以适应不断增加的类别。
此外,该编码方式为不唯一编码、无法逆编码,不适合标签的编码。
"""
def __init__(self, ID: str, features: list = None):
"""初始化编码器的基本配置。
Args
----
+ ID(str): 编码器ID;
+ features(list of str): 待编码的类别变量IDs(默认对所有字符型变量编码);
"""
self._encoder = ce.LeaveOneOutEncoder(cols=features, return_df=True)
self._id = ID
self._fitted = False
def fit_transform(self, features_df: pd.DataFrame,
label_df: pd.DataFrame) -> pd.DataFrame:
"""监督训练编码器,并对指定的类别变量编码。
训练中采用的`features_df`的向量形状(标签数)将在编码器中固定下来,后续
编码`transform()`中输入的`features_df`的标签数必须与此一致。
Args
----
+ features_df(pd.DataFrame): 待编码类别变量数据集(样本数*标签数);
+ label_df(pd.DataFrame): 标签变量数据集(标签变量必须为数值类型);
Returns
----
返回类别编码后的数据集。
"""
features_df_encoded = self._encoder.fit_transform(features_df, label_df)
self._fitted = True
return features_df_encoded
def fit(self, features_df: pd.DataFrame, label_df: pd.DataFrame):
"""监督训练编码器。
训练中采用的`features_df`的向量形状(标签数)将在编码器中固定下来,后续
编码`transform()`中输入的`features_df`的标签数必须与此一致。
Args
----
+ features_df(pd.DataFrame): 待编码类别变量数据集(样本数*标签数);
+ label_df(pd.DataFrame): 标签变量一维数据集(标签变量必须为数值类型);
"""
self._encoder.fit(features_df, label_df)
self._fitted = True
def transform(self,
features_df: pd.DataFrame,
label_df: pd.DataFrame = None) -> pd.DataFrame:
"""通过已经训练好的编码器编码类别变量。
通常对于训练集,需要继续提供标签数据;而对于测试集则不需要。
编码时要求输入的`features_df`的标签数必须与训练时一致。
Args
----
+ features_df(pd.DataFrame): 待编码类别变量数据集(样本数*标签数);
+ label_df(pd.DataFrame): 标签变量一维数据集(标签变量必须为数值类型);
Returns
----
返回编码后的数据集;若编码器未经训练, 则返回`None`。
"""
if not self._fitted:
return None
df_encoded = self._encoder.transform(features_df, label_df)
return df_encoded
@property
def features(self) -> list:
"""编码器编码的特征变量IDs。
"""
return self._encoder.get_feature_names()
def save(self, encoder_file: str):
"""将编码器保存到本地。
Args
----
+ encoder_file(str): 保存文件名(.pkl文件, 完整路径);
"""
encoder_path = os.path.dirname(encoder_file)
if not os.path.exists(encoder_path):
os.makedirs(encoder_path)
joblib.dump(self._encoder, encoder_file)
def set_encoder(self, encoder: ce.LeaveOneOutEncoder):
"""直接设置(训练好的)类别编码器。
Args
----
+ encoder(LeaveOneOutEncoder): 训练好的编码器;
"""
self._encoder = encoder
self._fitted = True
@staticmethod
def load(encoder_file, ID: str):
"""从本地加载到编码器。
Args
----
+ encoder_file(str): 本地编码器文件名(.pkl文件, 完整路径);
+ ID(str): 编码器ID;
"""
encoder = LooEncoder(ID)
encoder.set_encoder(joblib.load(encoder_file))
return encoder
class OrdinalEncoder():
"""采用 OrdinalEncoder 方法实现类别变量的编码。
该方法属于无监督方法,所以不需要标签数据,但是支持结合标签数据训练。
该方法支持用户指定类别编码,从而为类别编码提供先验知识。
该方法支持逆编码, 同时可以适应不断增加的类别。
"""
def __init__(self, ID: str, features: list = None, mapping: list = None):
"""初始化编码器的基本配置。
如果指定编码,则需要指定所有`features`中列出的类别变量的自定义编码;同时,
指定每个类别变量编码时,需要指定数据集中所有类别的编码(未指定的均编码-1)。
Args
----
+ ID(str): 编码器ID;
+ features(list of str): 待编码的类别变量IDs(默认对所有字符型变量编码);
+ mapping(list of dict): 自定义编码([{'col':'col_id', 'mapping':{'v1':1, 'v2:2}}]);
"""
self._encoder = ce.OrdinalEncoder(cols=features,
mapping=mapping,
return_df=True)
self._id = ID
self._mapping = mapping
self._features = features
self._fitted = False
def fit_transform(self,
features_df: pd.DataFrame,
label_df: pd.DataFrame = None) -> pd.DataFrame:
"""训练编码器,并对指定的类别变量编码。
训练中采用的`features_df`的向量形状(标签数)将在编码器中固定下来,后续
编码`transform()`中输入的`features_df`的标签数必须与此一致。
标签数据'label_df'为可选配置。
当前接口会重置编码器。
Args
----
+ features_df(pd.DataFrame): 待编码类别变量数据集(样本数*标签数);
+ label_df(pd.DataFrame): 标签变量一维数据集(标签变量必须为数值类型);
Returns
----
返回类别编码后的数据集。
"""
# 训练编码器。
features_df_encoded = self._encoder.fit_transform(features_df, label_df)
# 记录编码变量。
self._record_encoded_features(features_df)
# 记录编码关系。
self._record_mapping(features_df)
self._fitted = True
return features_df_encoded
def _record_encoded_features(self, features_df):
"""记录实际被编码处理的特征。
"""
self._features = []
features_all = self._encoder.get_feature_names()
for f in features_all:
if features_df[f].dtype == object: # 而非str类型
self._features.append(f)
def _record_mapping(self, features_df):
"""记录编码关系。
"""
# 统计类别变量。
tmp_feat_dic = {}
for f in self._features:
categories_f = list(set(features_df[f]))
tmp_feat_dic[f] = categories_f
# 统计数据长度。
size = 0
for Id, val in tmp_feat_dic.items():
size = max(size, len(val))
# 重构数据集。
for Id, val in tmp_feat_dic.items():
if len(val) < size:
dn = size - len(val)
val += [val[-1] for i in range(dn)]
tmp_feat_dic[Id] = val
for Id in self._encoder.get_feature_names():
if Id not in self._features:
dic = [0 for i in range(size)]
tmp_feat_dic[Id] = dic
# 类型编码和记录。
self._mapping = []
res = self._encoder.transform(pd.DataFrame(tmp_feat_dic))
for Id, categories in tmp_feat_dic.items():
res_id = res[Id]
map_id = {None: 0}
for i in range(len(categories)):
map_id[categories[i]] = int(res_id[i]) # 为了后期json保存
dic_id = {}
dic_id['col'] = Id
dic_id['mapping'] = map_id
self._mapping.append(dic_id)
def fit(self, features_df: pd.DataFrame, label_df: pd.DataFrame = None):
"""训练编码器。
训练中采用的`features_df`的向量形状(标签数)将在编码器中固定下来,后续
编码`transform()`中输入的`features_df`的标签数必须与此一致。
标签数据'label_df'为可选配置。
当前接口会重置编码器。
Args
----
+ features_df(pd.DataFrame): 待编码类别变量数据集(样本数*标签数);
+ label_df(pd.DataFrame): 标签变量一维数据集(标签变量必须为数值类型);
"""
# 训练编码器。
self._encoder.fit(features_df, label_df)
# 记录编码变量。
self._record_encoded_features(features_df)
# 记录编码关系。
self._record_mapping(features_df)
self._fitted = True
def transform(self, features_df: pd.DataFrame) -> pd.DataFrame:
"""通过已经训练好的编码器编码类别变量。
通常对于训练集,需要继续提供标签数据;而对于测试集则不需要。
编码时要求输入的`features_df`的标签数必须与训练时一致。
Args
----
+ features_df(pd.DataFrame): 待编码类别变量数据集(样本数*标签数);
Returns
----
返回编码后的数据集;若编码器未经训练, 则返回`None`。
"""
if not self._fitted:
print(f"ERROR: CategoryEncoder({self._id}) is not fitted yet.")
return None
if not self._features:
return features_df
df_encoded = self._encoder.transform(features_df)
return df_encoded
def partial_fit(self, features_df: pd.DataFrame, label_df: pd.DataFrame = None):
"""增量训练编码器。
训练中采用的`features_df`的向量形状(标签数)将在编码器中固定下来,后续
编码`transform()`中输入的`features_df`的标签数必须与此一致。
标签数据'label_df'为可选配置。
Args
----
+ features_df(pd.DataFrame): 待编码类别变量数据集(样本数*标签数);
+ label_df(pd.DataFrame): 标签变量一维数据集(标签变量必须为数值类型);
"""
# 提取原始映射关系并加载新增类别。
if self._fitted and self._features:
for feat in self._features:
idx = self._search_categories(feat)
mapping = self._mapping[idx]['mapping']
categories = mapping.keys()
max_codes = max(mapping.values())
for Id in list(set(features_df[feat])): # 加载新增类别
if Id not in categories:
max_codes += 1
mapping[Id] = max_codes
self._encoder = ce.OrdinalEncoder(cols=self._features,
mapping=self._mapping,
return_df=True)
# 编码器训练和记录。
self._encoder.fit(features_df, label_df)
if not self._fitted: # 保存并防止覆盖训练结果
self._record_encoded_features(features_df)
self._record_mapping(features_df)
self._fitted = True
def _search_categories(self, feature):
"""搜索类别。
"""
if feature not in self._features:
return None
for i in range(len(self._mapping)):
if self._mapping[i]['col'] == feature:
return i
return None
def inverse_transform(self, encoded_features_df: pd.DataFrame) -> pd.DataFrame:
"""将编码结果逆编码为类别。
Args
----
+ encoded_features_df(pd.DataFrame): 编码后的特征数据集;
Returns
----
返回逆编码后的数据集;若编码器未经训练, 则返回`None`.
"""
if not self._fitted:
print(f"ERROR: CategoryEncoder({self._id}) is not fitted yet.")
if not self._features:
return encoded_features_df
# 逆编码(自定义实现;ce.OrdinalEncoder无法在指定mapping的情况下逆编码)。
# inversed_features = self._encoder.inverse_transform(encoded_features_df)
inversed_features = {}
for feat in encoded_features_df.columns:
if feat not in self._features:
inversed_features[feat] = encoded_features_df[feat]
continue
feat_map = self._mapping[self._search_categories(feat)]['mapping']
inversed_map = dict(zip(feat_map.values(), feat_map.keys()))
inversed_feat = list(
map(lambda x: inversed_map[x], encoded_features_df[feat]))
inversed_features[feat] = inversed_feat
return pd.DataFrame(inversed_features)
@property
def features(self) -> list:
"""编码器编码的特征变量IDs。
"""
return self._features
def save(self, encoder_file: str, property_file: str):
"""将编码器保存到本地。
Args
----
+ encoder_file(str): 保存编码器文件名(.pkl文件, 完整路径);
+ property_file(str): 保存编码器属性文件名(.json文件, 完整路径);
"""
# 保存编码器。
encoder_path = os.path.dirname(encoder_file)
if not os.path.exists(encoder_path):
os.makedirs(encoder_path)
joblib.dump(self._encoder, encoder_file)
# 保存编码器属性。
property_path = os.path.dirname(property_file)
if not os.path.exists(property_path):
os.makedirs(property_path)
with open(property_file, 'w', encoding='utf8') as fo:
json.dump(
{
"mapping": self._mapping,
"features": self._features,
"fitted": self._fitted
}, fo)
def set_encoder(self, encoder: ce.OrdinalEncoder, features: list, mapping: list,
fitted: bool):
"""直接设置(训练好的)类别编码器。
Args
----
+ encoder(LeaveOneOutEncoder): 训练好的编码器;
+ features(list of str): 待编码的类别变量IDs(默认对所有字符型变量编码);
+ mapping(list of dict): 自定义编码;
+ fitted(bool): 是否是训练过的编码器;
"""
self._encoder = encoder
self._features = features
self._mapping = mapping
self._fitted = fitted
@staticmethod
def load(ID: str, encoder_file: str, property_file: str):
"""从本地加载到编码器。
Args
----
+ ID(str): 编码器ID;
+ encoder_file(str): 本地编码器文件名(.pkl文件, 完整路径);
+ property_file(str): 保存编码器属性文件名(.json文件, 完整路径);
"""
with open(property_file, 'r', encoding='utf8') as fi:
encoder_properties = json.load(fi)
features = encoder_properties['features']
mapping = encoder_properties['mapping']
fitted = encoder_properties['fitted']
encoder = OrdinalEncoder(ID)
encoder.set_encoder(joblib.load(encoder_file), features, mapping, fitted)
return encoder
if __name__ == "__main__":
data = pd.DataFrame({
'ID': [1, 2, 3, 4, 5, 6, 7, 8],
'Sex': ['F', 'M', 'M', 'F', 'M', None, 'F', 'M'],
'BloodType': ['A', 'AB', 'O', 'B', None, 'O', 'AB', 'B'],
'Grade': ['High', 'High', 'Medium', 'Low', 'Low', 'Medium', 'Low', 'High'],
'Education': [
'PhD', 'HighSchool', 'Bachelor', 'Master', 'HighSchool', 'Master', 'PhD',
'Bachelor'
],
'Income': [28300, 4500, 7500, 12500, 4200, 15000, 25000, 7200]
})
Income_grand_mean = data['Income'].mean()
data['Income_grand_mean'] = [Income_grand_mean] * len(data)
Income_group = data.groupby('Education')['Income'].mean().rename(
'Income_level_mean').reset_index()
data_new = pd.merge(data, Income_group)
features = list(data_new.columns)
features.remove('Income')
print(data_new)
# 编码器测试
features_train = data_new[['Grade']]
features_test = pd.DataFrame({'Grade': ['High', 'High', 'Medium2', 'Low2']})
mapping = [{'col': 'Grade', 'mapping': {'High': 1, 'Low': 2, 'Medium': 3}}]
features = ['Grade']
encoder = OrdinalEncoder('id1')
encoder.partial_fit(features_train)
res = encoder.transform(features_train)
print(res)
res = encoder.inverse_transform(res)
print(res)
encoder.partial_fit(features_test)
res = encoder.transform(features_train)
print(res)
res = encoder.inverse_transform(res)
print(res)
res = encoder.transform(features_test)
print(res)
res = encoder.inverse_transform(res)
print(res)
encoder_f = "./encoder.pkl"
property_f = "./encoder.json"
encoder.save(encoder_f, property_f)
encoder2 = OrdinalEncoder.load(encoder_f, property_f, 'id2')
res = encoder2.transform(features_test)
print(res)
res = encoder2.inverse_transform(res)
print(res) | PypiClean |