id
stringlengths
1
8
text
stringlengths
6
1.05M
dataset_id
stringclasses
1 value
/open_darts-1.0.2-cp310-cp310-win_amd64.whl/darts/mesh/geometry/shapes.py
import numpy as np import math from dataclasses import dataclass, is_dataclass, field # region geometry dataclasses @dataclass class Point: idx: int xyz: list lc: int = 0 active: bool = True embed: list = field(default_factory=list) @dataclass class Curve: idx: int curve_type: str points: list active: bool = True embed: list = field(default_factory=list) @dataclass class Surface: idx: int points: list curves: list = field(default_factory=list) in_surfaces: list = field(default_factory=list) holes: list = field(default_factory=list) plane: bool = True active: bool = True embed: list = field(default_factory=list) @dataclass class Volume: idx: int surfaces: list active: bool = True @dataclass class Physical: tag: str idxs: list # endregion class Shape: def __init__(self): self.points = [] self.curves = [] self.surfaces = [] self.volumes = [] self.physical_points = [] self.physical_curves = [] self.physical_surfaces = [] self.physical_volumes = [] # self.physical_points = {} # self.physical_curves = {} # self.physical_surfaces = {} # self.physical_volumes = {} self.boundary_tag = 90000 self.define_shapes() def define_shapes(self): pass def connect_points(self): """ Function to connect points in surfaces specified. Points in surfaces must be defined in order """ curves = [] for curve in self.curves: # If self.curves is not empty curves.append(curve.points) for j, surface in enumerate(self.surfaces): points = surface.points surface_curves = [] for i in range(len(points)-1): curve_points = [points[i], points[i+1]] reverse = [points[i+1], points[i]] if curve_points not in curves and reverse not in curves: curve_idx = len(curves) + 1 curves.append(curve_points) curve_type = 'line' self.curves.append(Curve(curve_idx, curve_type, curve_points)) surface_curves.append(curve_idx) else: if curve_points in curves: curve_idx = curves.index(curve_points) + 1 surface_curves.append(curve_idx) else: curve_idx = curves.index(reverse) + 1 surface_curves.append(-curve_idx) self.surfaces[j].curves = surface_curves return def add_boundary(self, boundary): pass def plot_shape_2D(self): """ Function to plot point and curves """ import matplotlib.pyplot as plt plt.figure(dpi=400, figsize=(10, 5)) for point in self.points: plt.scatter(point.xyz[0], point.xyz[2], c='k', s=0.5) for curve in self.curves: idx1 = curve.points[0]-1 idx2 = curve.points[1]-1 p1 = self.points[idx1].xyz p2 = self.points[idx2].xyz x = [p1[0], p2[0]] z = [p1[2], p2[2]] plt.plot(x, z, c='k', linewidth=1) # Find limits xmin, xmax = self.points[0].xyz[0], self.points[0].xyz[0] zmin, zmax = self.points[0].xyz[2], self.points[0].xyz[2] for point in self.points: xi = point.xyz[0] if xi < xmin: xmin = xi elif xi > xmax: xmax = xi zi = point.xyz[2] if zi < zmin: zmin = zi elif zi > zmax: zmax = zi plt.axis('scaled') plt.xlim([xmin, xmax]) plt.ylim([zmin, zmax]) plt.xlabel("x [m]") plt.ylabel("z [m]") return class Box(Shape): def __init__(self, xdim: list, ydim: list, zdim: list): self.xdim, self.ydim, self.zdim = xdim, ydim, zdim super().__init__() self.connect_points() surface_idxs = [surface.idx for surface in self.surfaces] self.volumes = [Volume(1, surface_idxs)] def define_shapes(self): self.points = [Point(1, [self.xdim[0], self.ydim[0], self.zdim[0]]), Point(2, [self.xdim[1], self.ydim[0], self.zdim[0]]), Point(3, [self.xdim[1], self.ydim[1], self.zdim[0]]), Point(4, [self.xdim[0], self.ydim[1], self.zdim[0]]), Point(5, [self.xdim[0], self.ydim[0], self.zdim[1]]), Point(6, [self.xdim[1], self.ydim[0], self.zdim[1]]), Point(7, [self.xdim[1], self.ydim[1], self.zdim[1]]), Point(8, [self.xdim[0], self.ydim[1], self.zdim[1]])] self.surfaces = [Surface(1, points=[1, 4, 8, 5, 1]), # yz_min Surface(2, points=[2, 3, 7, 6, 2]), # yz_plus Surface(3, points=[1, 2, 6, 5, 1]), # xz_min Surface(4, points=[4, 3, 7, 8, 4]), # xz_plus Surface(5, points=[1, 2, 3, 4, 1]), # xy_min Surface(6, points=[5, 6, 7, 8, 5])] # xy_plus return def add_boundary(self, boundary): # xy_min, xy_plus, xz_min, xz_plus, yz_min, yz_plus if boundary == "xy_min": idxs = [5] elif boundary == "xy_plus": idxs = [6] elif boundary == "xz_min": idxs = [3] elif boundary == "xz_plus": idxs = [4] elif boundary == "yz_min": idxs = [1] elif boundary == "yz_plus": idxs = [2] else: raise Exception("Not a valid boundary") surface = Physical(boundary, idxs) self.physical_surfaces.append(surface) self.boundary_tag += 1 return self.boundary_tag class Cylinder(Shape): def __init__(self, c0: list, radius: float, length: float, orientation: int, angle=360.): self.c0, self.radius, self.length, self.orientation, self.angle = c0, radius, length, orientation, angle super().__init__() def define_shapes(self): if self.angle == 360.: # need 3 curved surfaces self.points, self.curves, self.surfaces, self.volumes = self.calc_faces_360(self.c0, self.radius, self.length, self.orientation) elif self.angle >= 180.: # need 2 curved surfaces self.points, self.curves, self.surfaces, self.volumes = self.calc_faces_180(self.c0, self.radius, self.length, self.orientation) else: # need only 1 curved surface self.points, self.curves, self.surfaces, self.volumes = self.calc_faces_180_(self.c0, self.radius, self.length, self.orientation) return def calc_faces_360(self, c0, r, l, orientation): """Function to calculate points, curves and surfaces for a full cylinder""" """Calculate points""" segments = 3 curve_angle = 120 points = [Point(1, c0)] for i in range(segments): radians = math.radians(i*curve_angle) point0 = self.calc_radial_points(c0, r, orientation, radians) points.append(Point(i+2, point0)) for i, point in enumerate(points[:]): point1 = point.xyz[:] point1[orientation] += l points.append(Point(i+segments+2, point1)) """Define curves and surfaces""" curves = [Curve(1, 'circle', [2, 1, 3]), Curve(2, 'circle', [3, 1, 4]), Curve(3, 'circle', [4, 1, 2]), Curve(4, 'circle', [6, 5, 7]), Curve(5, 'circle', [7, 5, 8]), Curve(6, 'circle', [8, 5, 6]), Curve(7, 'line', [1, 5]), Curve(8, 'line', [2, 6]), Curve(9, 'line', [3, 7]), Curve(10, 'line', [4, 8])] surfaces = [Surface(1, points=[2, 3, 4, 2], curves=[1, 2, 3]), # face0 Surface(2, [6, 7, 8, 6], [4, 5, 6]), # face1 Surface(3, [2, 3, 7, 6, 2], [1, 9, -4, -8], plane=False), # side12 Surface(4, [3, 4, 8, 7, 3], [2, 10, -5, -9], plane=False), # side23 Surface(5, [4, 2, 6, 8, 4], [3, 8, -6, -10], plane=False)] # side31 surface_idxs = [surface.idx for surface in surfaces] volumes = [Volume(1, surface_idxs)] return points, curves, surfaces, volumes def calc_faces_180(self, c0, r, l, orientation): """Function to calculate points, curves and surfaces for a half cylinder""" """Calculate points""" segments = 2 curve_angle = self.angle/2 points = [Point(1, c0)] for i in range(segments + 1): radians = math.radians(i*curve_angle) point0 = self.calc_radial_points(c0, r, orientation, radians) points.append(Point(i+2, point0)) for i, point in enumerate(points[:]): point1 = point.xyz[:] point1[orientation] += l points.append(Point(i+segments+3, point1)) """Define curves and surfaces""" curves = [Curve(1, 'line', [1, 2]), Curve(2, 'circle', [2, 1, 3]), Curve(3, 'circle', [3, 1, 4]), Curve(4, 'line', [4, 1]), Curve(5, 'line', [5, 6]), Curve(6, 'circle', [6, 5, 7]), Curve(7, 'circle', [7, 5, 8]), Curve(8, 'line', [8, 5]), Curve(9, 'line', [1, 5]), Curve(10, 'line', [2, 6]), Curve(11, 'line', [3, 7]), Curve(12, 'line', [4, 8])] surfaces = [Surface(1, [1, 2, 3, 4, 1], [1, 2, 3, 4]), # face0 Surface(2, [5, 6, 7, 8, 5], [5, 6, 7, 8]), # face1 Surface(3, [1, 2, 6, 5, 1], [1, 10, -5, -9]), # side centre1 Surface(4, [2, 3, 7, 6, 2], [2, 11, -6, -10], plane=False), # side12 Surface(5, [3, 4, 8, 7, 3], [3, 12, -7, -11], plane=False), # side23 Surface(6, [1, 5, 8, 4, 1], [9, -8, -12, 4])] # side centre2 surface_idxs = [surface.idx for surface in surfaces] volumes = [Volume(1, surface_idxs)] return points, curves, surfaces, volumes def calc_faces_180_(self, c0, r, l, orientation): """Function to calculate points, curves and surfaces for a less than half cylinder""" """Calculate points""" segments = 1 curve_angle = self.angle points = [Point(1, c0)] for i in range(segments+1): radians = math.radians(i*curve_angle) point0 = self.calc_radial_points(c0, r, orientation, radians) points.append(Point(i+2, point0)) for i, point in enumerate(points[:]): point1 = point.xyz[:] point1[orientation] += l points.append(Point(i+segments+3, point1)) """Define curves and surfaces""" curves = [Curve(1, 'line', [1, 2]), Curve(2, 'circle', [2, 1, 3]), Curve(3, 'line', [3, 1]), Curve(4, 'line', [4, 5]), Curve(5, 'circle', [5, 4, 6]), Curve(6, 'line', [6, 4]), Curve(7, 'line', [1, 4]), Curve(8, 'line', [2, 5]), Curve(9, 'line', [3, 6])] surfaces = [Surface(1, [1, 2, 3, 1], [1, 2, 3]), # face0 Surface(2, [4, 5, 6, 4], [4, 5, 6]), # face1 Surface(3, [1, 2, 5, 4, 1], [1, 8, -4, -7]), # side centre1 Surface(4, [2, 3, 6, 5, 2], [2, 9, -5, -8], plane=False), # side12 Surface(5, [1, 4, 6, 3, 1], [7, -6, -9, 3])] # side centre2 surface_idxs = [surface.idx for surface in surfaces] volumes = [Volume(1, surface_idxs)] return points, curves, surfaces, volumes def calc_radial_points(self, c, r, orientation, angle): point = c[:] if orientation == 0: # x-dir point[1] += np.round(r * np.sin(angle), 5) point[2] += np.round(r * np.cos(angle), 5) elif orientation == 1: # y-dir point[0] += np.round(r * np.sin(angle), 5) point[2] += np.round(r * np.cos(angle), 5) else: # z-dir point[0] += np.round(r * np.sin(angle), 5) point[1] += np.round(r * np.cos(angle), 5) return point def add_boundary(self, boundary): # bottom, top, outer if boundary == "top": idxs = [1] elif boundary == "bottom": idxs = [2] elif boundary == "outer": if self.angle < 180.: idxs = [4] elif self.angle < 360.: idxs = [4, 5] else: idxs = [3, 4, 5] else: raise Exception("Not a valid boundary") surface = Physical(boundary, idxs) self.physical_surfaces.append(surface) self.boundary_tag += 1 return self.boundary_tag class CylinderWithHole(Cylinder): def __init__(self, c0: list, c1: list, radius: float, rw: float, length: float, orientation: int, angle=360.): self.rw = rw super().__init__(c0, radius, length, orientation, angle) def calc_faces_360(self, c0, r, l, orientation): """Function to calculate points, curves and surfaces for a full cylinder""" """Calculate points""" segments = 3 curve_angle = 120 points = [Point(1, c0)] for i in range(segments): radians = math.radians(i * curve_angle) point0 = self.calc_radial_points(c0, r, orientation, radians) points.append(Point(i + 2, point0)) for i, point in enumerate(points[:]): point1 = point.xyz[:] point1[orientation] += l points.append(Point(i + segments + 2, point1)) """Define curves and surfaces""" curves = [Curve(1, 'circle', [2, 1, 3]), Curve(2, 'circle', [3, 1, 4]), Curve(3, 'circle', [4, 1, 2]), Curve(4, 'circle', [5, 1, 6]), Curve(5, 'circle', [6, 1, 7]), Curve(6, 'circle', [7, 1, 8]), Curve(7, 'line', [1, 5]), Curve(8, 'line', [2, 6]), Curve(9, 'line', [3, 7]), Curve(10, 'line', [4, 8])] surfaces = [Surface(1, points=[2, 3, 4, 2], curves=[1, 2, 3]), # face0 Surface(2, [6, 7, 8, 6], [4, 5, 6]), # face1 Surface(3, [2, 3, 7, 6, 2], [1, 9, -4, -8], plane=False), # side12 Surface(4, [3, 4, 8, 7, 3], [2, 10, -5, -9], plane=False), # side23 Surface(5, [4, 2, 6, 8, 4], [3, 8, -6, -10], plane=False)] # side31 surface_idxs = [surface.idx for surface in surfaces] volumes = [Volume(1, surface_idxs)] return points, curves, surfaces, volumes def calc_faces_180(self, c0, r, l, orientation): """Function to calculate points, curves and surfaces for a half cylinder""" """Calculate points""" segments = 2 curve_angle = self.angle / 2 points = [Point(1, c0)] for i in range(segments + 1): radians = math.radians(i * curve_angle) point0 = self.calc_radial_points(c0, r, orientation, radians) points.append(Point(i + 2, point0)) for i, point in enumerate(points[:]): point1 = point.xyz[:] point1[orientation] += l points.append(Point(i + segments + 3, point1)) """Define curves and surfaces""" curves = [Curve(1, 'line', [1, 2]), Curve(2, 'circle', [2, 1, 3]), Curve(3, 'circle', [3, 1, 4]), Curve(4, 'line', [4, 1]), Curve(5, 'line', [5, 6]), Curve(6, 'circle', [6, 5, 7]), Curve(7, 'circle', [7, 5, 8]), Curve(8, 'line', [8, 5]), Curve(9, 'line', [1, 5]), Curve(10, 'line', [2, 6]), Curve(11, 'line', [3, 7]), Curve(12, 'line', [4, 8])] surfaces = [Surface(1, [1, 2, 3, 4, 1], [1, 2, 3, 4]), # face0 Surface(2, [5, 6, 7, 8, 5], [5, 6, 7, 8]), # face1 Surface(3, [1, 2, 6, 5, 1], [1, 10, -5, -9]), # side centre1 Surface(4, [2, 3, 7, 6, 2], [2, 11, -6, -10], plane=False), # side12 Surface(5, [3, 4, 8, 7, 3], [3, 12, -7, -11], plane=False), # side23 Surface(6, [1, 5, 8, 4, 1], [9, -8, -12, 4])] # side centre2 surface_idxs = [surface.idx for surface in surfaces] volumes = [Volume(1, surface_idxs)] return points, curves, surfaces, volumes def calc_faces_180_(self, c0, r, l, orientation): """Function to calculate points, curves and surfaces for a less than half cylinder""" """Calculate points""" segments = 1 curve_angle = self.angle points = [Point(1, c0)] for i in range(segments + 1): radians = math.radians(i * curve_angle) point0 = self.calc_radial_points(c0, r, orientation, radians) points.append(Point(i + 2, point0)) for i, point in enumerate(points[:]): point1 = point.xyz[:] point1[orientation] += l points.append(Point(i + segments + 3, point1)) """Define curves and surfaces""" curves = [Curve(1, 'line', [1, 2]), Curve(2, 'circle', [2, 1, 3]), Curve(3, 'line', [3, 1]), Curve(4, 'line', [4, 5]), Curve(5, 'circle', [5, 4, 6]), Curve(6, 'line', [6, 4]), Curve(7, 'line', [1, 4]), Curve(8, 'line', [2, 5]), Curve(9, 'line', [3, 6])] surfaces = [Surface(1, [1, 2, 3, 1], [1, 2, 3]), # face0 Surface(2, [4, 5, 6, 4], [4, 5, 6]), # face1 Surface(3, [1, 2, 5, 4, 1], [1, 8, -4, -7]), # side centre1 Surface(4, [2, 3, 6, 5, 2], [2, 9, -5, -8], plane=False), # side12 Surface(5, [1, 4, 6, 3, 1], [7, -6, -9, 3])] # side centre2 surface_idxs = [surface.idx for surface in surfaces] volumes = [Volume(1, surface_idxs)] return points, curves, surfaces, volumes def add_boundary(self, boundary): # bottom, top, outer if boundary == "top": idxs = [1] elif boundary == "bottom": idxs = [2] elif boundary == "outer": if self.angle < 180.: idxs = [4] elif self.angle < 360.: idxs = [4, 5] else: idxs = [3, 4, 5] else: raise Exception("Not a valid boundary") surface = Physical(boundary, idxs) self.physical_surfaces.append(surface) self.boundary_tag += 1 return self.boundary_tag class Circle(Shape): def __init__(self, c0: list, radius: float, orientation: int = 1, angle=360.): self.c0, self.radius, self.orientation, self.angle = c0, radius, orientation, angle super().__init__() def define_shapes(self): if self.angle == 360.: # need 3 curved lines self.points = [Point(1, self.c0), Point(2, self.calc_radial_points(self.c0, self.radius, self.orientation, math.radians(0))), Point(3, self.calc_radial_points(self.c0, self.radius, self.orientation, math.radians(120))), Point(4, self.calc_radial_points(self.c0, self.radius, self.orientation, math.radians(240)))] self.curves = [Curve(1, curve_type='circle', points=[2, 1, 3]), Curve(2, curve_type='circle', points=[3, 1, 4]), Curve(3, curve_type='circle', points=[4, 1, 2])] self.surfaces = [Surface(1, points=[2, 3, 4, 2], curves=[1, 2, 3])] elif self.angle >= 180.: # need 2 curved lines self.points = [Point(1, self.c0), Point(2, self.calc_radial_points(self.c0, self.radius, self.orientation, math.radians(0))), Point(3, self.calc_radial_points(self.c0, self.radius, self.orientation, math.radians(self.angle/2))), Point(4, self.calc_radial_points(self.c0, self.radius, self.orientation, math.radians(self.angle)))] self.curves = [Curve(1, curve_type='line', points=[1, 2]), Curve(2, curve_type='circle', points=[2, 1, 3]), Curve(3, curve_type='circle', points=[3, 1, 4]), Curve(4, curve_type='line', points=[4, 1])] self.surfaces = [Surface(1, points=[1, 2, 3, 4, 1], curves=[1, 2, 3, 4])] else: # need only 1 curved line self.points = [Point(1, self.c0), Point(2, self.calc_radial_points(self.c0, self.radius, self.orientation, math.radians(0))), Point(3, self.calc_radial_points(self.c0, self.radius, self.orientation, math.radians(self.angle)))] self.curves = [Curve(1, curve_type='line', points=[1, 2]), Curve(2, curve_type='circle', points=[2, 1, 3]), Curve(3, curve_type='line', points=[3, 1])] self.surfaces = [Surface(1, points=[1, 2, 3, 1], curves=[1, 2, 3])] return def calc_radial_points(self, c, r, orientation, angle): point = c[:] if orientation == 0: # x-dir point[1] += np.round(r * np.sin(angle), 5) point[2] += np.round(r * np.cos(angle), 5) elif orientation == 1: # y-dir point[0] += np.round(r * np.sin(angle), 5) point[2] += np.round(r * np.cos(angle), 5) else: # z-dir point[0] += np.round(r * np.sin(angle), 5) point[1] += np.round(r * np.cos(angle), 5) return point class CircleWithHole(Circle): def __init__(self, c0: list, radius: float, rw: float, orientation: int = 1, angle=360.): self.rw = rw super().__init__(c0, radius, orientation, angle) def define_shapes(self): if self.angle == 360.: # need 3 curved lines print("DOESN'T WORK YET") # self.points = [Point(1, self.c0), # Point(2, self.calc_radial_points(self.c0, self.radius, self.orientation, math.radians(0))), # Point(3, self.calc_radial_points(self.c0, self.radius, self.orientation, math.radians(120))), # Point(4, self.calc_radial_points(self.c0, self.radius, self.orientation, math.radians(240))), # Point(5, self.calc_radial_points(self.c0, self.rw, self.orientation, math.radians(0)), lc=1), # Point(6, self.calc_radial_points(self.c0, self.rw, self.orientation, math.radians(120)), lc=1), # Point(7, self.calc_radial_points(self.c0, self.rw, self.orientation, math.radians(240)), lc=1)] # # self.curves = [Curve(1, curve_type='circle', points=[2, 1, 3]), # Curve(2, curve_type='circle', points=[3, 1, 4]), # Curve(3, curve_type='circle', points=[4, 1, 2]), # Curve(4, curve_type='circle', points=[5, 1, 6]), # Curve(5, curve_type='circle', points=[6, 1, 7]), # Curve(6, curve_type='circle', points=[7, 1, 5]) # ] # # self.surfaces = [Surface(1, points=[2, 3, 4, 2, 5, 6, 7, 5], curves=[1, 2, 3, 4, 5, 6])] # # self.physical_curves = [Physical('inner', idxs=[4, 5, 6]), # Physical('outer', idxs=[1, 2, 3]) # ] elif self.angle >= 180.: # need 2 curved lines self.points = [Point(1, self.c0), Point(2, self.calc_radial_points(self.c0, self.rw, self.orientation, math.radians(0)), lc=1), Point(3, self.calc_radial_points(self.c0, self.radius, self.orientation, math.radians(0))), Point(4, self.calc_radial_points(self.c0, self.radius, self.orientation, math.radians(self.angle/2))), Point(5, self.calc_radial_points(self.c0, self.radius, self.orientation, math.radians(self.angle))), Point(6, self.calc_radial_points(self.c0, self.rw, self.orientation, math.radians(self.angle)), lc=1), Point(7, self.calc_radial_points(self.c0, self.rw, self.orientation, math.radians(self.angle/2)), lc=1), ] self.curves = [Curve(1, curve_type='line', points=[2, 3]), Curve(2, curve_type='circle', points=[3, 1, 4]), Curve(3, curve_type='circle', points=[4, 1, 5]), Curve(4, curve_type='line', points=[5, 6]), Curve(5, curve_type='circle', points=[6, 1, 7]), Curve(6, curve_type='circle', points=[7, 1, 2]),] self.surfaces = [Surface(1, points=[2, 3, 4, 5, 6, 7, 2], curves=[1, 2, 3, 4, 5, 6])] self.physical_curves = [Physical('inner', idxs=[5, 6]), Physical('outer', idxs=[2, 3]) ] # self.physical_curves['inner'] = Physical('inner', idxs=[5, 6]) # self.physical_curves['outer'] = Physical('outer', idxs=[2, 3]) else: # need only 1 curved line self.points = [Point(1, self.c0), Point(2, self.calc_radial_points(self.c0, self.rw, self.orientation, math.radians(0)), lc=1), Point(3, self.calc_radial_points(self.c0, self.radius, self.orientation, math.radians(0))), Point(4, self.calc_radial_points(self.c0, self.radius, self.orientation, math.radians(self.angle))), Point(5, self.calc_radial_points(self.c0, self.rw, self.orientation, math.radians(self.angle)), lc=1)] self.curves = [Curve(1, curve_type='line', points=[2, 3]), Curve(2, curve_type='circle', points=[3, 1, 4]), Curve(3, curve_type='line', points=[4, 5]), Curve(4, curve_type='circle', points=[5, 1, 2])] self.surfaces = [Surface(1, points=[2, 3, 4, 5, 2], curves=[1, 2, 3, 4])] self.physical_curves = [Physical('inner', idxs=[4]), Physical('outer', idxs=[2]) ] return
PypiClean
/nwpoetry-1.0.0b100.tar.gz/nwpoetry-1.0.0b100/poetry/packages/constraints/multi_constraint.py
from .base_constraint import BaseConstraint from .constraint import Constraint class MultiConstraint(BaseConstraint): def __init__(self, *constraints): if any(c.operator == "==" for c in constraints): raise ValueError( "A multi-constraint can only be comprised of negative constraints" ) self._constraints = constraints @property def constraints(self): return self._constraints def allows(self, other): for constraint in self._constraints: if not constraint.allows(other): return False return True def allows_all(self, other): if other.is_any(): return False if other.is_empty(): return True if isinstance(other, Constraint): return self.allows(other) our_constraints = iter(self._constraints) their_constraints = iter(other.constraints) our_constraint = next(our_constraints, None) their_constraint = next(their_constraints, None) while our_constraint and their_constraint: if our_constraint.allows_all(their_constraint): their_constraint = next(their_constraints, None) else: our_constraint = next(our_constraints, None) return their_constraint is None def allows_any(self, other): if other.is_any(): return True if other.is_empty(): return True if isinstance(other, Constraint): return self.allows(other) if isinstance(other, MultiConstraint): for c1 in self.constraints: for c2 in other.constraints: if c1.allows(c2): return True return False def intersect(self, other): if isinstance(other, Constraint): constraints = self._constraints if other not in constraints: constraints += (other,) else: constraints = (other,) if len(constraints) == 1: return constraints[0] return MultiConstraint(*constraints) def __eq__(self, other): if not isinstance(other, MultiConstraint): return False return sorted( self._constraints, key=lambda c: (c.operator, c.version) ) == sorted(other.constraints, key=lambda c: (c.operator, c.version)) def __str__(self): constraints = [] for constraint in self._constraints: constraints.append(str(constraint)) return "{}".format(", ").join(constraints)
PypiClean
/followee_notifier-0.0.1a0.tar.gz/followee_notifier-0.0.1a0/followee_notifier/platforms/twitter.py
import json import time import requests from typing import Any from http.cookies import SimpleCookie from urllib.parse import quote, unquote, parse_qs from followee_notifier.types import Follower NAME = 'twitter' FEATURES = { 'blue_business_profile_image_shape_enabled': True, 'responsive_web_graphql_exclude_directive_enabled': True, 'verified_phone_label_enabled': False, 'responsive_web_graphql_timeline_navigation_enabled': True, 'responsive_web_graphql_skip_user_profile_image_extensions_enabled': False, 'tweetypie_unmention_optimization_enabled': True, 'vibe_api_enabled': True, 'responsive_web_edit_tweet_api_enabled': True, 'graphql_is_translatable_rweb_tweet_is_translatable_enabled': True, 'view_counts_everywhere_api_enabled': True, 'longform_notetweets_consumption_enabled': True, 'tweet_awards_web_tipping_enabled': False, 'freedom_of_speech_not_reach_fetch_enabled': True, 'standardized_nudges_misinfo': True, 'tweet_with_visibility_results_prefer_gql_limited_actions_policy_enabled': False, 'interactive_text_enabled': True, 'responsive_web_text_conversations_enabled': False, 'longform_notetweets_rich_text_read_enabled': True, 'responsive_web_enhance_cards_enabled': False, } GRAPHQL_JSON_SEPARATOR = (',', ':') REQUEST_INTERVAL = 3 def parse_cookies(cookies_str: str) -> dict[str, str]: cookies = SimpleCookie() cookies.load(cookies_str) cookies = {key: morsel.value for key, morsel in cookies.items()} for key in ['ct0', 'dnt', 'twid']: if key in cookies.keys(): del cookies[key] return cookies def graphql_url(url: str, params: dict[str, Any]) -> str: url += '?' for key, value in params.items(): url += f'{key}={quote(json.dumps(value, separators=GRAPHQL_JSON_SEPARATOR))}&' if url[-1] == '&': url = url[:-1] return url def fetch(config: dict[str, Any]) -> list[Follower]: result = [] cookies = parse_cookies(config['cookies']) session = requests.Session() session.cookies.update(cookies) # Requset #1 - Get CSRF Token url = f'https://twitter.com/{config["username"]}/followers' print('[HTTP] >', 'GET', url) res = session.get(url) print('[HTTP] <', res.status_code) referer = res.url csrf_token = res.cookies['ct0'] twitter_id = parse_qs(unquote(res.cookies['twid']))['u'][0] print(f'[DATA] Twitter ID: {twitter_id}, CSRF Token: {csrf_token}') # Request #2 - Get Followers next_cursor = None while True: graphql_variables = {"userId": str(twitter_id), "count":20, "includePromotedContent": False} if next_cursor: graphql_variables['cursor'] = next_cursor url = graphql_url('https://twitter.com/i/api/graphql/djdTXDIk2qhd4OStqlUFeQ/Followers', { 'variables': graphql_variables, 'features': FEATURES, }) print('[HTTP] >', 'GET', url) res = session.get(url, headers={ 'referer': referer, 'x-csrf-token': csrf_token, 'authorization': f'Bearer {config["auth"]}', }) print('[HTTP] <', res.status_code) if res.status_code == 429: rate_limit_reset = res.headers['x-rate-limit-reset'] next_cursor = next_cursor sleep_time = int(rate_limit_reset) - int(time.time()) print('[RATE LIMIT]', f'Wait for {sleep_time} seconds.') time.sleep(sleep_time) continue assert res.status_code == 200 instructions = res.json()['data']['user']['result']['timeline']['timeline']['instructions'] followers = list(filter(lambda x: x['type'] == 'TimelineAddEntries', instructions))[0]['entries'] next_cursor = list(filter(lambda x: (x['content']['entryType'] == 'TimelineTimelineCursor') and (x['content'].get('cursorType') == 'Bottom'), followers))[0] followers = list(filter(lambda x: (x['content']['entryType'] == 'TimelineTimelineItem'), followers)) followers = list(filter(lambda x: x['content'].get('itemContent', {}).get("itemType") == "TimelineUser", followers)) followers = [i['content']['itemContent']['user_results']['result'] for i in followers] followers = [{ 'id': i['rest_id'], 'url': f'https://twitter.com/i/user/{i["rest_id"]}', 'name': i['legacy']['screen_name'], 'display_name': i['legacy']['name'], 'avatar': i['legacy'].get('profile_image_url_https', 'data:,'), 'description': i['legacy'].get('description', ''), } for i in followers] result += followers print(f'[DATA] Fetched {len(followers)} followers, total fatched {len(result)} followers.') next_cursor = next_cursor['content']['value'] print(f'[DATA] Next cursor: {next_cursor}') if next_cursor.startswith('0|'): print('[DATA] No more followers. Stop fetching.') break time.sleep(REQUEST_INTERVAL) return result
PypiClean
/websauna.newsletter-1.0a3.tar.gz/websauna.newsletter-1.0a3/websauna/newsletter/importer.py
# Standard Library import logging from typing import Optional # Pyramid from transaction import TransactionManager # Websauna from websauna.system.model.retry import ensure_transactionless from websauna.system.model.retry import retryable from websauna.system.user.models import User from .mailgun import Mailgun logger = logging.getLogger(__name__) def import_subscriber(mailgun: Mailgun, address: str, user: User, upsert=True) -> bool: """Add one subscriber to the mailing list. :return: True if user was fresh and imported """ # Track import status in user_data JSON, so we don't do double requests if the user has already been subscribed once mailing_list_subscribes = user.user_data.get("mailing_list_subscribes", []) if address not in mailing_list_subscribes: # Some sanity logic to filter out emails that are legit in some services, unlegit in Mailgun first_part, second_part = address.split("@") if first_part.startswith(".") or first_part.endswith("."): logger.info("Bad email address: %s", address) return False logger.info("Subscribing %s to %s", user.email, address) # Don't set subscribed field, so that we don't accidentally update unsubscribed users data = { "address": user.email, "name": user.friendly_name, "upsert": upsert and "yes" or "no", } try: mailgun.update_subscription(address, data) except Exception as e: logger.error("Failed to subscribe email %s: %s", user.email, e) return False mailing_list_subscribes.append(address) user.user_data["mailing_list_subscribes"] = mailing_list_subscribes return True return False def import_all_users(mailgun: Mailgun, dbsession, address: str, tm: Optional[TransactionManager]=None) -> int: """Update Mail subscribers database from Websauna internal database. :return: Imported count """ if tm is None: tm = dbsession.transaction_manager count = 0 for obj in dbsession: print(obj) # Make sure we don't have a transaction in progress as we do batching ourselves ensure_transactionless(transaction_manager=tm) @retryable(tm=tm) def tx1(): """Get user ids on the first transaction.""" return [u.id for u in dbsession.query(User.id).all()] @retryable(tm=tm) def tx_n(id): """For each user, import it in a subsequent transaction.""" u = dbsession.query(User).get(id) if import_subscriber(mailgun, address, u): return 1 else: return 0 user_ids = tx1() for id in user_ids: count += tx_n(id) logger.info("Imported %d users", count) return count
PypiClean
/django-dojo-0.0.1.tar.gz/django-dojo-0.0.1/dojo/static/dojo/dojox/grid/enhanced/plugins/GridSource.js
define([ "dojo/_base/declare", "dojo/_base/array", "dojo/_base/lang", "dojo/dnd/Source", "./DnD" ], function(declare, array, lang, Source, DnD){ var _joinToArray = function(arrays){ var a = arrays[0]; for(var i = 1; i < arrays.length; ++i){ a = a.concat(arrays[i]); } return a; }; var GridDnDSource = lang.getObject("dojox.grid.enhanced.plugins.GridDnDSource"); return declare("dojox.grid.enhanced.plugins.GridSource", Source, { // summary: // A special source that can accept grid contents. // Only for non-grid widgets or domNodes. accept: ["grid/cells", "grid/rows", "grid/cols", "text"], // insertNodesForGrid: // If you'd like to insert some sort of nodes into your dnd source, turn this on, // and override getCellContent/getRowContent/getColumnContent // to populate the dnd data in your desired format. insertNodesForGrid: false, markupFactory: function(params, node){ cls = lang.getObject("dojox.grid.enhanced.plugins.GridSource"); return new cls(node, params); }, checkAcceptance: function(source, nodes){ if(source instanceof GridDnDSource){ if(nodes[0]){ var item = source.getItem(nodes[0].id); if(item && (array.indexOf(item.type, "grid/rows") >= 0 || array.indexOf(item.type, "grid/cells") >= 0) && !source.dndPlugin._allDnDItemsLoaded()){ return false; } } this.sourcePlugin = source.dndPlugin; } return this.inherited(arguments); }, onDraggingOver: function(){ if(this.sourcePlugin){ this.sourcePlugin._isSource = true; } }, onDraggingOut: function(){ if(this.sourcePlugin){ this.sourcePlugin._isSource = false; } }, onDropExternal: function(source, nodes, copy){ if(source instanceof GridDnDSource){ var ranges = array.map(nodes, function(node){ return source.getItem(node.id).data; }); var item = source.getItem(nodes[0].id); var grid = item.dndPlugin.grid; var type = item.type[0]; var range; try{ switch(type){ case "grid/cells": nodes[0].innerHTML = this.getCellContent(grid, ranges[0].min, ranges[0].max) || ""; this.onDropGridCells(grid, ranges[0].min, ranges[0].max); break; case "grid/rows": range = _joinToArray(ranges); nodes[0].innerHTML = this.getRowContent(grid, range) || ""; this.onDropGridRows(grid, range); break; case "grid/cols": range = _joinToArray(ranges); nodes[0].innerHTML = this.getColumnContent(grid, range) || ""; this.onDropGridColumns(grid, range); break; } if(this.insertNodesForGrid){ this.selectNone(); this.insertNodes(true, [nodes[0]], this.before, this.current); } item.dndPlugin.onDragOut(!copy); }catch(e){ console.warn("GridSource.onDropExternal() error:",e); } }else{ this.inherited(arguments); } }, getCellContent: function(grid, leftTopCell, rightBottomCell){ // summary: // Fill node innerHTML for dnd grid cells. // example: // | var cells = grid.layout.cells; // | var store = grid.store; // | var cache = grid._by_idx; // | var res = "Grid Cells from " + grid.id + ":<br/>"; // | for(var r = leftTopCell.row; r <= rightBottomCell.row; ++r){ // | for(var c = leftTopCell.col; c <= rightBottomCell.col; ++c){ // | res += store.getValue(cache[r].item, cells[c].field) + ", "; // | } // | res = res.substring(0, res.length - 2) + ";<br/>"; // | } // | return res; }, getRowContent: function(grid, rowIndexes){ // summary: // Fill node innerHTML for dnd grid rows. // example: // | var cells = grid.layout.cells; // | var store = grid.store; // | var cache = grid._by_idx; // | var res = "Grid Rows from " + grid.id + ":<br/>"; // | for(var i = 0; i < rowIndexes.length; ++i){ // | var r = rowIndexes[i]; // | res += "Row " + r + ": "; // | for(var j = 0; j < cells.length; ++j){ // | if(!cells[j].hidden){ // | res += store.getValue(cache[r].item, cells[j].field) + ", "; // | } // | } // | res = res.substring(0, res.length - 2) + ";<br/>"; // | } // | return res; }, getColumnContent: function(grid, colIndexes){ // summary: // Fill node innerHTML for dnd grid columns. // example: // | var cells = grid.layout.cells; // | var res = "Grid Columns from " + grid.id + ":"; // | for(var i = 0; i < colIndexes.length; ++i){ // | var c = colIndexes[i]; // | res += (cells[c].name || cells[c].field) + ", "; // | } // | return res.substring(0, res.length - 2); }, onDropGridCells: function(grid, leftTopCell, rightBottomCell){ }, onDropGridRows: function(grid, rowIndexes){ }, onDropGridColumns: function(grid, colIndexes){ } }); });
PypiClean
/cdktf_cdktf_provider_aws-17.0.2-py3-none-any.whl/cdktf_cdktf_provider_aws/servicecatalog_portfolio_share/__init__.py
import abc import builtins import datetime import enum import typing import jsii import publication import typing_extensions from typeguard import check_type from .._jsii import * import cdktf as _cdktf_9a9027ec import constructs as _constructs_77d1e7e8 class ServicecatalogPortfolioShare( _cdktf_9a9027ec.TerraformResource, metaclass=jsii.JSIIMeta, jsii_type="@cdktf/provider-aws.servicecatalogPortfolioShare.ServicecatalogPortfolioShare", ): '''Represents a {@link https://registry.terraform.io/providers/hashicorp/aws/5.15.0/docs/resources/servicecatalog_portfolio_share aws_servicecatalog_portfolio_share}.''' def __init__( self, scope: _constructs_77d1e7e8.Construct, id_: builtins.str, *, portfolio_id: builtins.str, principal_id: builtins.str, type: builtins.str, accept_language: typing.Optional[builtins.str] = None, id: typing.Optional[builtins.str] = None, share_principals: typing.Optional[typing.Union[builtins.bool, _cdktf_9a9027ec.IResolvable]] = None, share_tag_options: typing.Optional[typing.Union[builtins.bool, _cdktf_9a9027ec.IResolvable]] = None, timeouts: typing.Optional[typing.Union["ServicecatalogPortfolioShareTimeouts", typing.Dict[builtins.str, typing.Any]]] = None, wait_for_acceptance: typing.Optional[typing.Union[builtins.bool, _cdktf_9a9027ec.IResolvable]] = None, connection: typing.Optional[typing.Union[typing.Union[_cdktf_9a9027ec.SSHProvisionerConnection, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.WinrmProvisionerConnection, typing.Dict[builtins.str, typing.Any]]]] = None, count: typing.Optional[typing.Union[jsii.Number, _cdktf_9a9027ec.TerraformCount]] = None, depends_on: typing.Optional[typing.Sequence[_cdktf_9a9027ec.ITerraformDependable]] = None, for_each: typing.Optional[_cdktf_9a9027ec.ITerraformIterator] = None, lifecycle: typing.Optional[typing.Union[_cdktf_9a9027ec.TerraformResourceLifecycle, typing.Dict[builtins.str, typing.Any]]] = None, provider: typing.Optional[_cdktf_9a9027ec.TerraformProvider] = None, provisioners: typing.Optional[typing.Sequence[typing.Union[typing.Union[_cdktf_9a9027ec.FileProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.LocalExecProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.RemoteExecProvisioner, typing.Dict[builtins.str, typing.Any]]]]] = None, ) -> None: '''Create a new {@link https://registry.terraform.io/providers/hashicorp/aws/5.15.0/docs/resources/servicecatalog_portfolio_share aws_servicecatalog_portfolio_share} Resource. :param scope: The scope in which to define this construct. :param id_: The scoped construct ID. Must be unique amongst siblings in the same scope :param portfolio_id: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/aws/5.15.0/docs/resources/servicecatalog_portfolio_share#portfolio_id ServicecatalogPortfolioShare#portfolio_id}. :param principal_id: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/aws/5.15.0/docs/resources/servicecatalog_portfolio_share#principal_id ServicecatalogPortfolioShare#principal_id}. :param type: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/aws/5.15.0/docs/resources/servicecatalog_portfolio_share#type ServicecatalogPortfolioShare#type}. :param accept_language: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/aws/5.15.0/docs/resources/servicecatalog_portfolio_share#accept_language ServicecatalogPortfolioShare#accept_language}. :param id: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/aws/5.15.0/docs/resources/servicecatalog_portfolio_share#id ServicecatalogPortfolioShare#id}. Please be aware that the id field is automatically added to all resources in Terraform providers using a Terraform provider SDK version below 2. If you experience problems setting this value it might not be settable. Please take a look at the provider documentation to ensure it should be settable. :param share_principals: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/aws/5.15.0/docs/resources/servicecatalog_portfolio_share#share_principals ServicecatalogPortfolioShare#share_principals}. :param share_tag_options: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/aws/5.15.0/docs/resources/servicecatalog_portfolio_share#share_tag_options ServicecatalogPortfolioShare#share_tag_options}. :param timeouts: timeouts block. Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/aws/5.15.0/docs/resources/servicecatalog_portfolio_share#timeouts ServicecatalogPortfolioShare#timeouts} :param wait_for_acceptance: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/aws/5.15.0/docs/resources/servicecatalog_portfolio_share#wait_for_acceptance ServicecatalogPortfolioShare#wait_for_acceptance}. :param connection: :param count: :param depends_on: :param for_each: :param lifecycle: :param provider: :param provisioners: ''' if __debug__: type_hints = typing.get_type_hints(_typecheckingstub__e2a0362f1c93184e4efea7c8d3b10f0d51151d397a8f6aa504a7119279f6d31a) check_type(argname="argument scope", value=scope, expected_type=type_hints["scope"]) check_type(argname="argument id_", value=id_, expected_type=type_hints["id_"]) config = ServicecatalogPortfolioShareConfig( portfolio_id=portfolio_id, principal_id=principal_id, type=type, accept_language=accept_language, id=id, share_principals=share_principals, share_tag_options=share_tag_options, timeouts=timeouts, wait_for_acceptance=wait_for_acceptance, connection=connection, count=count, depends_on=depends_on, for_each=for_each, lifecycle=lifecycle, provider=provider, provisioners=provisioners, ) jsii.create(self.__class__, self, [scope, id_, config]) @jsii.member(jsii_name="putTimeouts") def put_timeouts( self, *, create: typing.Optional[builtins.str] = None, delete: typing.Optional[builtins.str] = None, read: typing.Optional[builtins.str] = None, update: typing.Optional[builtins.str] = None, ) -> None: ''' :param create: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/aws/5.15.0/docs/resources/servicecatalog_portfolio_share#create ServicecatalogPortfolioShare#create}. :param delete: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/aws/5.15.0/docs/resources/servicecatalog_portfolio_share#delete ServicecatalogPortfolioShare#delete}. :param read: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/aws/5.15.0/docs/resources/servicecatalog_portfolio_share#read ServicecatalogPortfolioShare#read}. :param update: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/aws/5.15.0/docs/resources/servicecatalog_portfolio_share#update ServicecatalogPortfolioShare#update}. ''' value = ServicecatalogPortfolioShareTimeouts( create=create, delete=delete, read=read, update=update ) return typing.cast(None, jsii.invoke(self, "putTimeouts", [value])) @jsii.member(jsii_name="resetAcceptLanguage") def reset_accept_language(self) -> None: return typing.cast(None, jsii.invoke(self, "resetAcceptLanguage", [])) @jsii.member(jsii_name="resetId") def reset_id(self) -> None: return typing.cast(None, jsii.invoke(self, "resetId", [])) @jsii.member(jsii_name="resetSharePrincipals") def reset_share_principals(self) -> None: return typing.cast(None, jsii.invoke(self, "resetSharePrincipals", [])) @jsii.member(jsii_name="resetShareTagOptions") def reset_share_tag_options(self) -> None: return typing.cast(None, jsii.invoke(self, "resetShareTagOptions", [])) @jsii.member(jsii_name="resetTimeouts") def reset_timeouts(self) -> None: return typing.cast(None, jsii.invoke(self, "resetTimeouts", [])) @jsii.member(jsii_name="resetWaitForAcceptance") def reset_wait_for_acceptance(self) -> None: return typing.cast(None, jsii.invoke(self, "resetWaitForAcceptance", [])) @jsii.member(jsii_name="synthesizeAttributes") def _synthesize_attributes(self) -> typing.Mapping[builtins.str, typing.Any]: return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.invoke(self, "synthesizeAttributes", [])) @jsii.python.classproperty @jsii.member(jsii_name="tfResourceType") def TF_RESOURCE_TYPE(cls) -> builtins.str: return typing.cast(builtins.str, jsii.sget(cls, "tfResourceType")) @builtins.property @jsii.member(jsii_name="accepted") def accepted(self) -> _cdktf_9a9027ec.IResolvable: return typing.cast(_cdktf_9a9027ec.IResolvable, jsii.get(self, "accepted")) @builtins.property @jsii.member(jsii_name="timeouts") def timeouts(self) -> "ServicecatalogPortfolioShareTimeoutsOutputReference": return typing.cast("ServicecatalogPortfolioShareTimeoutsOutputReference", jsii.get(self, "timeouts")) @builtins.property @jsii.member(jsii_name="acceptLanguageInput") def accept_language_input(self) -> typing.Optional[builtins.str]: return typing.cast(typing.Optional[builtins.str], jsii.get(self, "acceptLanguageInput")) @builtins.property @jsii.member(jsii_name="idInput") def id_input(self) -> typing.Optional[builtins.str]: return typing.cast(typing.Optional[builtins.str], jsii.get(self, "idInput")) @builtins.property @jsii.member(jsii_name="portfolioIdInput") def portfolio_id_input(self) -> typing.Optional[builtins.str]: return typing.cast(typing.Optional[builtins.str], jsii.get(self, "portfolioIdInput")) @builtins.property @jsii.member(jsii_name="principalIdInput") def principal_id_input(self) -> typing.Optional[builtins.str]: return typing.cast(typing.Optional[builtins.str], jsii.get(self, "principalIdInput")) @builtins.property @jsii.member(jsii_name="sharePrincipalsInput") def share_principals_input( self, ) -> typing.Optional[typing.Union[builtins.bool, _cdktf_9a9027ec.IResolvable]]: return typing.cast(typing.Optional[typing.Union[builtins.bool, _cdktf_9a9027ec.IResolvable]], jsii.get(self, "sharePrincipalsInput")) @builtins.property @jsii.member(jsii_name="shareTagOptionsInput") def share_tag_options_input( self, ) -> typing.Optional[typing.Union[builtins.bool, _cdktf_9a9027ec.IResolvable]]: return typing.cast(typing.Optional[typing.Union[builtins.bool, _cdktf_9a9027ec.IResolvable]], jsii.get(self, "shareTagOptionsInput")) @builtins.property @jsii.member(jsii_name="timeoutsInput") def timeouts_input( self, ) -> typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, "ServicecatalogPortfolioShareTimeouts"]]: return typing.cast(typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, "ServicecatalogPortfolioShareTimeouts"]], jsii.get(self, "timeoutsInput")) @builtins.property @jsii.member(jsii_name="typeInput") def type_input(self) -> typing.Optional[builtins.str]: return typing.cast(typing.Optional[builtins.str], jsii.get(self, "typeInput")) @builtins.property @jsii.member(jsii_name="waitForAcceptanceInput") def wait_for_acceptance_input( self, ) -> typing.Optional[typing.Union[builtins.bool, _cdktf_9a9027ec.IResolvable]]: return typing.cast(typing.Optional[typing.Union[builtins.bool, _cdktf_9a9027ec.IResolvable]], jsii.get(self, "waitForAcceptanceInput")) @builtins.property @jsii.member(jsii_name="acceptLanguage") def accept_language(self) -> builtins.str: return typing.cast(builtins.str, jsii.get(self, "acceptLanguage")) @accept_language.setter def accept_language(self, value: builtins.str) -> None: if __debug__: type_hints = typing.get_type_hints(_typecheckingstub__4b570a3fed85a4cb18e8efa39595b5263d0172365a8712d03fc62b27986316b9) check_type(argname="argument value", value=value, expected_type=type_hints["value"]) jsii.set(self, "acceptLanguage", value) @builtins.property @jsii.member(jsii_name="id") def id(self) -> builtins.str: return typing.cast(builtins.str, jsii.get(self, "id")) @id.setter def id(self, value: builtins.str) -> None: if __debug__: type_hints = typing.get_type_hints(_typecheckingstub__98b1fd2b7f07281d51541d66c7f10281cf184092191df31cff95d7bcab2034c4) check_type(argname="argument value", value=value, expected_type=type_hints["value"]) jsii.set(self, "id", value) @builtins.property @jsii.member(jsii_name="portfolioId") def portfolio_id(self) -> builtins.str: return typing.cast(builtins.str, jsii.get(self, "portfolioId")) @portfolio_id.setter def portfolio_id(self, value: builtins.str) -> None: if __debug__: type_hints = typing.get_type_hints(_typecheckingstub__acc2cae5f1d65dff8ffa71551cd4d4033f37c922c9fe0ce2308c954eed2e9058) check_type(argname="argument value", value=value, expected_type=type_hints["value"]) jsii.set(self, "portfolioId", value) @builtins.property @jsii.member(jsii_name="principalId") def principal_id(self) -> builtins.str: return typing.cast(builtins.str, jsii.get(self, "principalId")) @principal_id.setter def principal_id(self, value: builtins.str) -> None: if __debug__: type_hints = typing.get_type_hints(_typecheckingstub__963007e5c07679a02e2f51426af01eb6a53c5763453e56c32c6cb715a50ff946) check_type(argname="argument value", value=value, expected_type=type_hints["value"]) jsii.set(self, "principalId", value) @builtins.property @jsii.member(jsii_name="sharePrincipals") def share_principals( self, ) -> typing.Union[builtins.bool, _cdktf_9a9027ec.IResolvable]: return typing.cast(typing.Union[builtins.bool, _cdktf_9a9027ec.IResolvable], jsii.get(self, "sharePrincipals")) @share_principals.setter def share_principals( self, value: typing.Union[builtins.bool, _cdktf_9a9027ec.IResolvable], ) -> None: if __debug__: type_hints = typing.get_type_hints(_typecheckingstub__6bdc1e032fb1837768d0050f9f20ba20e35a5a39ad7aca6ae4de797d1666fb6b) check_type(argname="argument value", value=value, expected_type=type_hints["value"]) jsii.set(self, "sharePrincipals", value) @builtins.property @jsii.member(jsii_name="shareTagOptions") def share_tag_options( self, ) -> typing.Union[builtins.bool, _cdktf_9a9027ec.IResolvable]: return typing.cast(typing.Union[builtins.bool, _cdktf_9a9027ec.IResolvable], jsii.get(self, "shareTagOptions")) @share_tag_options.setter def share_tag_options( self, value: typing.Union[builtins.bool, _cdktf_9a9027ec.IResolvable], ) -> None: if __debug__: type_hints = typing.get_type_hints(_typecheckingstub__dbd73049b29d4fa30e6fa9f509a1fb5799586cad9e360643ac209ab82a10f59f) check_type(argname="argument value", value=value, expected_type=type_hints["value"]) jsii.set(self, "shareTagOptions", value) @builtins.property @jsii.member(jsii_name="type") def type(self) -> builtins.str: return typing.cast(builtins.str, jsii.get(self, "type")) @type.setter def type(self, value: builtins.str) -> None: if __debug__: type_hints = typing.get_type_hints(_typecheckingstub__a5cc9949f13f969cae9dde441712da3a0a507df8ce55d19824dfaf0efafbf01f) check_type(argname="argument value", value=value, expected_type=type_hints["value"]) jsii.set(self, "type", value) @builtins.property @jsii.member(jsii_name="waitForAcceptance") def wait_for_acceptance( self, ) -> typing.Union[builtins.bool, _cdktf_9a9027ec.IResolvable]: return typing.cast(typing.Union[builtins.bool, _cdktf_9a9027ec.IResolvable], jsii.get(self, "waitForAcceptance")) @wait_for_acceptance.setter def wait_for_acceptance( self, value: typing.Union[builtins.bool, _cdktf_9a9027ec.IResolvable], ) -> None: if __debug__: type_hints = typing.get_type_hints(_typecheckingstub__dfeb81cfbefb7b31819fc305681d7a25095565a1efedf2de5689475a0b8d19c2) check_type(argname="argument value", value=value, expected_type=type_hints["value"]) jsii.set(self, "waitForAcceptance", value) @jsii.data_type( jsii_type="@cdktf/provider-aws.servicecatalogPortfolioShare.ServicecatalogPortfolioShareConfig", jsii_struct_bases=[_cdktf_9a9027ec.TerraformMetaArguments], name_mapping={ "connection": "connection", "count": "count", "depends_on": "dependsOn", "for_each": "forEach", "lifecycle": "lifecycle", "provider": "provider", "provisioners": "provisioners", "portfolio_id": "portfolioId", "principal_id": "principalId", "type": "type", "accept_language": "acceptLanguage", "id": "id", "share_principals": "sharePrincipals", "share_tag_options": "shareTagOptions", "timeouts": "timeouts", "wait_for_acceptance": "waitForAcceptance", }, ) class ServicecatalogPortfolioShareConfig(_cdktf_9a9027ec.TerraformMetaArguments): def __init__( self, *, connection: typing.Optional[typing.Union[typing.Union[_cdktf_9a9027ec.SSHProvisionerConnection, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.WinrmProvisionerConnection, typing.Dict[builtins.str, typing.Any]]]] = None, count: typing.Optional[typing.Union[jsii.Number, _cdktf_9a9027ec.TerraformCount]] = None, depends_on: typing.Optional[typing.Sequence[_cdktf_9a9027ec.ITerraformDependable]] = None, for_each: typing.Optional[_cdktf_9a9027ec.ITerraformIterator] = None, lifecycle: typing.Optional[typing.Union[_cdktf_9a9027ec.TerraformResourceLifecycle, typing.Dict[builtins.str, typing.Any]]] = None, provider: typing.Optional[_cdktf_9a9027ec.TerraformProvider] = None, provisioners: typing.Optional[typing.Sequence[typing.Union[typing.Union[_cdktf_9a9027ec.FileProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.LocalExecProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.RemoteExecProvisioner, typing.Dict[builtins.str, typing.Any]]]]] = None, portfolio_id: builtins.str, principal_id: builtins.str, type: builtins.str, accept_language: typing.Optional[builtins.str] = None, id: typing.Optional[builtins.str] = None, share_principals: typing.Optional[typing.Union[builtins.bool, _cdktf_9a9027ec.IResolvable]] = None, share_tag_options: typing.Optional[typing.Union[builtins.bool, _cdktf_9a9027ec.IResolvable]] = None, timeouts: typing.Optional[typing.Union["ServicecatalogPortfolioShareTimeouts", typing.Dict[builtins.str, typing.Any]]] = None, wait_for_acceptance: typing.Optional[typing.Union[builtins.bool, _cdktf_9a9027ec.IResolvable]] = None, ) -> None: ''' :param connection: :param count: :param depends_on: :param for_each: :param lifecycle: :param provider: :param provisioners: :param portfolio_id: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/aws/5.15.0/docs/resources/servicecatalog_portfolio_share#portfolio_id ServicecatalogPortfolioShare#portfolio_id}. :param principal_id: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/aws/5.15.0/docs/resources/servicecatalog_portfolio_share#principal_id ServicecatalogPortfolioShare#principal_id}. :param type: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/aws/5.15.0/docs/resources/servicecatalog_portfolio_share#type ServicecatalogPortfolioShare#type}. :param accept_language: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/aws/5.15.0/docs/resources/servicecatalog_portfolio_share#accept_language ServicecatalogPortfolioShare#accept_language}. :param id: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/aws/5.15.0/docs/resources/servicecatalog_portfolio_share#id ServicecatalogPortfolioShare#id}. Please be aware that the id field is automatically added to all resources in Terraform providers using a Terraform provider SDK version below 2. If you experience problems setting this value it might not be settable. Please take a look at the provider documentation to ensure it should be settable. :param share_principals: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/aws/5.15.0/docs/resources/servicecatalog_portfolio_share#share_principals ServicecatalogPortfolioShare#share_principals}. :param share_tag_options: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/aws/5.15.0/docs/resources/servicecatalog_portfolio_share#share_tag_options ServicecatalogPortfolioShare#share_tag_options}. :param timeouts: timeouts block. Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/aws/5.15.0/docs/resources/servicecatalog_portfolio_share#timeouts ServicecatalogPortfolioShare#timeouts} :param wait_for_acceptance: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/aws/5.15.0/docs/resources/servicecatalog_portfolio_share#wait_for_acceptance ServicecatalogPortfolioShare#wait_for_acceptance}. ''' if isinstance(lifecycle, dict): lifecycle = _cdktf_9a9027ec.TerraformResourceLifecycle(**lifecycle) if isinstance(timeouts, dict): timeouts = ServicecatalogPortfolioShareTimeouts(**timeouts) if __debug__: type_hints = typing.get_type_hints(_typecheckingstub__e2c118b9f27a068600c34d0e2c4508f46c093ba883d65003c36c6f61750cf324) check_type(argname="argument connection", value=connection, expected_type=type_hints["connection"]) check_type(argname="argument count", value=count, expected_type=type_hints["count"]) check_type(argname="argument depends_on", value=depends_on, expected_type=type_hints["depends_on"]) check_type(argname="argument for_each", value=for_each, expected_type=type_hints["for_each"]) check_type(argname="argument lifecycle", value=lifecycle, expected_type=type_hints["lifecycle"]) check_type(argname="argument provider", value=provider, expected_type=type_hints["provider"]) check_type(argname="argument provisioners", value=provisioners, expected_type=type_hints["provisioners"]) check_type(argname="argument portfolio_id", value=portfolio_id, expected_type=type_hints["portfolio_id"]) check_type(argname="argument principal_id", value=principal_id, expected_type=type_hints["principal_id"]) check_type(argname="argument type", value=type, expected_type=type_hints["type"]) check_type(argname="argument accept_language", value=accept_language, expected_type=type_hints["accept_language"]) check_type(argname="argument id", value=id, expected_type=type_hints["id"]) check_type(argname="argument share_principals", value=share_principals, expected_type=type_hints["share_principals"]) check_type(argname="argument share_tag_options", value=share_tag_options, expected_type=type_hints["share_tag_options"]) check_type(argname="argument timeouts", value=timeouts, expected_type=type_hints["timeouts"]) check_type(argname="argument wait_for_acceptance", value=wait_for_acceptance, expected_type=type_hints["wait_for_acceptance"]) self._values: typing.Dict[builtins.str, typing.Any] = { "portfolio_id": portfolio_id, "principal_id": principal_id, "type": type, } if connection is not None: self._values["connection"] = connection if count is not None: self._values["count"] = count if depends_on is not None: self._values["depends_on"] = depends_on if for_each is not None: self._values["for_each"] = for_each if lifecycle is not None: self._values["lifecycle"] = lifecycle if provider is not None: self._values["provider"] = provider if provisioners is not None: self._values["provisioners"] = provisioners if accept_language is not None: self._values["accept_language"] = accept_language if id is not None: self._values["id"] = id if share_principals is not None: self._values["share_principals"] = share_principals if share_tag_options is not None: self._values["share_tag_options"] = share_tag_options if timeouts is not None: self._values["timeouts"] = timeouts if wait_for_acceptance is not None: self._values["wait_for_acceptance"] = wait_for_acceptance @builtins.property def connection( self, ) -> typing.Optional[typing.Union[_cdktf_9a9027ec.SSHProvisionerConnection, _cdktf_9a9027ec.WinrmProvisionerConnection]]: ''' :stability: experimental ''' result = self._values.get("connection") return typing.cast(typing.Optional[typing.Union[_cdktf_9a9027ec.SSHProvisionerConnection, _cdktf_9a9027ec.WinrmProvisionerConnection]], result) @builtins.property def count( self, ) -> typing.Optional[typing.Union[jsii.Number, _cdktf_9a9027ec.TerraformCount]]: ''' :stability: experimental ''' result = self._values.get("count") return typing.cast(typing.Optional[typing.Union[jsii.Number, _cdktf_9a9027ec.TerraformCount]], result) @builtins.property def depends_on( self, ) -> typing.Optional[typing.List[_cdktf_9a9027ec.ITerraformDependable]]: ''' :stability: experimental ''' result = self._values.get("depends_on") return typing.cast(typing.Optional[typing.List[_cdktf_9a9027ec.ITerraformDependable]], result) @builtins.property def for_each(self) -> typing.Optional[_cdktf_9a9027ec.ITerraformIterator]: ''' :stability: experimental ''' result = self._values.get("for_each") return typing.cast(typing.Optional[_cdktf_9a9027ec.ITerraformIterator], result) @builtins.property def lifecycle(self) -> typing.Optional[_cdktf_9a9027ec.TerraformResourceLifecycle]: ''' :stability: experimental ''' result = self._values.get("lifecycle") return typing.cast(typing.Optional[_cdktf_9a9027ec.TerraformResourceLifecycle], result) @builtins.property def provider(self) -> typing.Optional[_cdktf_9a9027ec.TerraformProvider]: ''' :stability: experimental ''' result = self._values.get("provider") return typing.cast(typing.Optional[_cdktf_9a9027ec.TerraformProvider], result) @builtins.property def provisioners( self, ) -> typing.Optional[typing.List[typing.Union[_cdktf_9a9027ec.FileProvisioner, _cdktf_9a9027ec.LocalExecProvisioner, _cdktf_9a9027ec.RemoteExecProvisioner]]]: ''' :stability: experimental ''' result = self._values.get("provisioners") return typing.cast(typing.Optional[typing.List[typing.Union[_cdktf_9a9027ec.FileProvisioner, _cdktf_9a9027ec.LocalExecProvisioner, _cdktf_9a9027ec.RemoteExecProvisioner]]], result) @builtins.property def portfolio_id(self) -> builtins.str: '''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/aws/5.15.0/docs/resources/servicecatalog_portfolio_share#portfolio_id ServicecatalogPortfolioShare#portfolio_id}.''' result = self._values.get("portfolio_id") assert result is not None, "Required property 'portfolio_id' is missing" return typing.cast(builtins.str, result) @builtins.property def principal_id(self) -> builtins.str: '''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/aws/5.15.0/docs/resources/servicecatalog_portfolio_share#principal_id ServicecatalogPortfolioShare#principal_id}.''' result = self._values.get("principal_id") assert result is not None, "Required property 'principal_id' is missing" return typing.cast(builtins.str, result) @builtins.property def type(self) -> builtins.str: '''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/aws/5.15.0/docs/resources/servicecatalog_portfolio_share#type ServicecatalogPortfolioShare#type}.''' result = self._values.get("type") assert result is not None, "Required property 'type' is missing" return typing.cast(builtins.str, result) @builtins.property def accept_language(self) -> typing.Optional[builtins.str]: '''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/aws/5.15.0/docs/resources/servicecatalog_portfolio_share#accept_language ServicecatalogPortfolioShare#accept_language}.''' result = self._values.get("accept_language") return typing.cast(typing.Optional[builtins.str], result) @builtins.property def id(self) -> typing.Optional[builtins.str]: '''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/aws/5.15.0/docs/resources/servicecatalog_portfolio_share#id ServicecatalogPortfolioShare#id}. Please be aware that the id field is automatically added to all resources in Terraform providers using a Terraform provider SDK version below 2. If you experience problems setting this value it might not be settable. Please take a look at the provider documentation to ensure it should be settable. ''' result = self._values.get("id") return typing.cast(typing.Optional[builtins.str], result) @builtins.property def share_principals( self, ) -> typing.Optional[typing.Union[builtins.bool, _cdktf_9a9027ec.IResolvable]]: '''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/aws/5.15.0/docs/resources/servicecatalog_portfolio_share#share_principals ServicecatalogPortfolioShare#share_principals}.''' result = self._values.get("share_principals") return typing.cast(typing.Optional[typing.Union[builtins.bool, _cdktf_9a9027ec.IResolvable]], result) @builtins.property def share_tag_options( self, ) -> typing.Optional[typing.Union[builtins.bool, _cdktf_9a9027ec.IResolvable]]: '''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/aws/5.15.0/docs/resources/servicecatalog_portfolio_share#share_tag_options ServicecatalogPortfolioShare#share_tag_options}.''' result = self._values.get("share_tag_options") return typing.cast(typing.Optional[typing.Union[builtins.bool, _cdktf_9a9027ec.IResolvable]], result) @builtins.property def timeouts(self) -> typing.Optional["ServicecatalogPortfolioShareTimeouts"]: '''timeouts block. Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/aws/5.15.0/docs/resources/servicecatalog_portfolio_share#timeouts ServicecatalogPortfolioShare#timeouts} ''' result = self._values.get("timeouts") return typing.cast(typing.Optional["ServicecatalogPortfolioShareTimeouts"], result) @builtins.property def wait_for_acceptance( self, ) -> typing.Optional[typing.Union[builtins.bool, _cdktf_9a9027ec.IResolvable]]: '''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/aws/5.15.0/docs/resources/servicecatalog_portfolio_share#wait_for_acceptance ServicecatalogPortfolioShare#wait_for_acceptance}.''' result = self._values.get("wait_for_acceptance") return typing.cast(typing.Optional[typing.Union[builtins.bool, _cdktf_9a9027ec.IResolvable]], result) def __eq__(self, rhs: typing.Any) -> builtins.bool: return isinstance(rhs, self.__class__) and rhs._values == self._values def __ne__(self, rhs: typing.Any) -> builtins.bool: return not (rhs == self) def __repr__(self) -> str: return "ServicecatalogPortfolioShareConfig(%s)" % ", ".join( k + "=" + repr(v) for k, v in self._values.items() ) @jsii.data_type( jsii_type="@cdktf/provider-aws.servicecatalogPortfolioShare.ServicecatalogPortfolioShareTimeouts", jsii_struct_bases=[], name_mapping={ "create": "create", "delete": "delete", "read": "read", "update": "update", }, ) class ServicecatalogPortfolioShareTimeouts: def __init__( self, *, create: typing.Optional[builtins.str] = None, delete: typing.Optional[builtins.str] = None, read: typing.Optional[builtins.str] = None, update: typing.Optional[builtins.str] = None, ) -> None: ''' :param create: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/aws/5.15.0/docs/resources/servicecatalog_portfolio_share#create ServicecatalogPortfolioShare#create}. :param delete: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/aws/5.15.0/docs/resources/servicecatalog_portfolio_share#delete ServicecatalogPortfolioShare#delete}. :param read: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/aws/5.15.0/docs/resources/servicecatalog_portfolio_share#read ServicecatalogPortfolioShare#read}. :param update: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/aws/5.15.0/docs/resources/servicecatalog_portfolio_share#update ServicecatalogPortfolioShare#update}. ''' if __debug__: type_hints = typing.get_type_hints(_typecheckingstub__fbb419b22cd2801ebde2d069ab4cab35313fa7a2db9ca57b92be01477a2cfea8) check_type(argname="argument create", value=create, expected_type=type_hints["create"]) check_type(argname="argument delete", value=delete, expected_type=type_hints["delete"]) check_type(argname="argument read", value=read, expected_type=type_hints["read"]) check_type(argname="argument update", value=update, expected_type=type_hints["update"]) self._values: typing.Dict[builtins.str, typing.Any] = {} if create is not None: self._values["create"] = create if delete is not None: self._values["delete"] = delete if read is not None: self._values["read"] = read if update is not None: self._values["update"] = update @builtins.property def create(self) -> typing.Optional[builtins.str]: '''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/aws/5.15.0/docs/resources/servicecatalog_portfolio_share#create ServicecatalogPortfolioShare#create}.''' result = self._values.get("create") return typing.cast(typing.Optional[builtins.str], result) @builtins.property def delete(self) -> typing.Optional[builtins.str]: '''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/aws/5.15.0/docs/resources/servicecatalog_portfolio_share#delete ServicecatalogPortfolioShare#delete}.''' result = self._values.get("delete") return typing.cast(typing.Optional[builtins.str], result) @builtins.property def read(self) -> typing.Optional[builtins.str]: '''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/aws/5.15.0/docs/resources/servicecatalog_portfolio_share#read ServicecatalogPortfolioShare#read}.''' result = self._values.get("read") return typing.cast(typing.Optional[builtins.str], result) @builtins.property def update(self) -> typing.Optional[builtins.str]: '''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/hashicorp/aws/5.15.0/docs/resources/servicecatalog_portfolio_share#update ServicecatalogPortfolioShare#update}.''' result = self._values.get("update") return typing.cast(typing.Optional[builtins.str], result) def __eq__(self, rhs: typing.Any) -> builtins.bool: return isinstance(rhs, self.__class__) and rhs._values == self._values def __ne__(self, rhs: typing.Any) -> builtins.bool: return not (rhs == self) def __repr__(self) -> str: return "ServicecatalogPortfolioShareTimeouts(%s)" % ", ".join( k + "=" + repr(v) for k, v in self._values.items() ) class ServicecatalogPortfolioShareTimeoutsOutputReference( _cdktf_9a9027ec.ComplexObject, metaclass=jsii.JSIIMeta, jsii_type="@cdktf/provider-aws.servicecatalogPortfolioShare.ServicecatalogPortfolioShareTimeoutsOutputReference", ): def __init__( self, terraform_resource: _cdktf_9a9027ec.IInterpolatingParent, terraform_attribute: builtins.str, ) -> None: ''' :param terraform_resource: The parent resource. :param terraform_attribute: The attribute on the parent resource this class is referencing. ''' if __debug__: type_hints = typing.get_type_hints(_typecheckingstub__0a14096fa0dbe10bc9a507243e1dd4f9160ba825bcaa4dc54a98cd85f190874f) check_type(argname="argument terraform_resource", value=terraform_resource, expected_type=type_hints["terraform_resource"]) check_type(argname="argument terraform_attribute", value=terraform_attribute, expected_type=type_hints["terraform_attribute"]) jsii.create(self.__class__, self, [terraform_resource, terraform_attribute]) @jsii.member(jsii_name="resetCreate") def reset_create(self) -> None: return typing.cast(None, jsii.invoke(self, "resetCreate", [])) @jsii.member(jsii_name="resetDelete") def reset_delete(self) -> None: return typing.cast(None, jsii.invoke(self, "resetDelete", [])) @jsii.member(jsii_name="resetRead") def reset_read(self) -> None: return typing.cast(None, jsii.invoke(self, "resetRead", [])) @jsii.member(jsii_name="resetUpdate") def reset_update(self) -> None: return typing.cast(None, jsii.invoke(self, "resetUpdate", [])) @builtins.property @jsii.member(jsii_name="createInput") def create_input(self) -> typing.Optional[builtins.str]: return typing.cast(typing.Optional[builtins.str], jsii.get(self, "createInput")) @builtins.property @jsii.member(jsii_name="deleteInput") def delete_input(self) -> typing.Optional[builtins.str]: return typing.cast(typing.Optional[builtins.str], jsii.get(self, "deleteInput")) @builtins.property @jsii.member(jsii_name="readInput") def read_input(self) -> typing.Optional[builtins.str]: return typing.cast(typing.Optional[builtins.str], jsii.get(self, "readInput")) @builtins.property @jsii.member(jsii_name="updateInput") def update_input(self) -> typing.Optional[builtins.str]: return typing.cast(typing.Optional[builtins.str], jsii.get(self, "updateInput")) @builtins.property @jsii.member(jsii_name="create") def create(self) -> builtins.str: return typing.cast(builtins.str, jsii.get(self, "create")) @create.setter def create(self, value: builtins.str) -> None: if __debug__: type_hints = typing.get_type_hints(_typecheckingstub__c7a1bd1ca2b536b69f80baa35871f702ec649ec74486d35627062e963de4eeda) check_type(argname="argument value", value=value, expected_type=type_hints["value"]) jsii.set(self, "create", value) @builtins.property @jsii.member(jsii_name="delete") def delete(self) -> builtins.str: return typing.cast(builtins.str, jsii.get(self, "delete")) @delete.setter def delete(self, value: builtins.str) -> None: if __debug__: type_hints = typing.get_type_hints(_typecheckingstub__decab16dd2ff3d9972f87a0ad1a088cafef1859fd83cd0a9ad7a023e6c60b428) check_type(argname="argument value", value=value, expected_type=type_hints["value"]) jsii.set(self, "delete", value) @builtins.property @jsii.member(jsii_name="read") def read(self) -> builtins.str: return typing.cast(builtins.str, jsii.get(self, "read")) @read.setter def read(self, value: builtins.str) -> None: if __debug__: type_hints = typing.get_type_hints(_typecheckingstub__641f4a59df6defc7be8ecc9a92a293fa2d1506ddc1a4ed5831e3e9710de413dd) check_type(argname="argument value", value=value, expected_type=type_hints["value"]) jsii.set(self, "read", value) @builtins.property @jsii.member(jsii_name="update") def update(self) -> builtins.str: return typing.cast(builtins.str, jsii.get(self, "update")) @update.setter def update(self, value: builtins.str) -> None: if __debug__: type_hints = typing.get_type_hints(_typecheckingstub__fc0dd06161f8effd09aef1eb13d332d1b485713ad2cb8c7d2d3a99884b976d69) check_type(argname="argument value", value=value, expected_type=type_hints["value"]) jsii.set(self, "update", value) @builtins.property @jsii.member(jsii_name="internalValue") def internal_value( self, ) -> typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, ServicecatalogPortfolioShareTimeouts]]: return typing.cast(typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, ServicecatalogPortfolioShareTimeouts]], jsii.get(self, "internalValue")) @internal_value.setter def internal_value( self, value: typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, ServicecatalogPortfolioShareTimeouts]], ) -> None: if __debug__: type_hints = typing.get_type_hints(_typecheckingstub__185f881c871d4f500c13daf506b92072ef866035514f49ca7c7145c3fb0018fe) check_type(argname="argument value", value=value, expected_type=type_hints["value"]) jsii.set(self, "internalValue", value) __all__ = [ "ServicecatalogPortfolioShare", "ServicecatalogPortfolioShareConfig", "ServicecatalogPortfolioShareTimeouts", "ServicecatalogPortfolioShareTimeoutsOutputReference", ] publication.publish() def _typecheckingstub__e2a0362f1c93184e4efea7c8d3b10f0d51151d397a8f6aa504a7119279f6d31a( scope: _constructs_77d1e7e8.Construct, id_: builtins.str, *, portfolio_id: builtins.str, principal_id: builtins.str, type: builtins.str, accept_language: typing.Optional[builtins.str] = None, id: typing.Optional[builtins.str] = None, share_principals: typing.Optional[typing.Union[builtins.bool, _cdktf_9a9027ec.IResolvable]] = None, share_tag_options: typing.Optional[typing.Union[builtins.bool, _cdktf_9a9027ec.IResolvable]] = None, timeouts: typing.Optional[typing.Union[ServicecatalogPortfolioShareTimeouts, typing.Dict[builtins.str, typing.Any]]] = None, wait_for_acceptance: typing.Optional[typing.Union[builtins.bool, _cdktf_9a9027ec.IResolvable]] = None, connection: typing.Optional[typing.Union[typing.Union[_cdktf_9a9027ec.SSHProvisionerConnection, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.WinrmProvisionerConnection, typing.Dict[builtins.str, typing.Any]]]] = None, count: typing.Optional[typing.Union[jsii.Number, _cdktf_9a9027ec.TerraformCount]] = None, depends_on: typing.Optional[typing.Sequence[_cdktf_9a9027ec.ITerraformDependable]] = None, for_each: typing.Optional[_cdktf_9a9027ec.ITerraformIterator] = None, lifecycle: typing.Optional[typing.Union[_cdktf_9a9027ec.TerraformResourceLifecycle, typing.Dict[builtins.str, typing.Any]]] = None, provider: typing.Optional[_cdktf_9a9027ec.TerraformProvider] = None, provisioners: typing.Optional[typing.Sequence[typing.Union[typing.Union[_cdktf_9a9027ec.FileProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.LocalExecProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.RemoteExecProvisioner, typing.Dict[builtins.str, typing.Any]]]]] = None, ) -> None: """Type checking stubs""" pass def _typecheckingstub__4b570a3fed85a4cb18e8efa39595b5263d0172365a8712d03fc62b27986316b9( value: builtins.str, ) -> None: """Type checking stubs""" pass def _typecheckingstub__98b1fd2b7f07281d51541d66c7f10281cf184092191df31cff95d7bcab2034c4( value: builtins.str, ) -> None: """Type checking stubs""" pass def _typecheckingstub__acc2cae5f1d65dff8ffa71551cd4d4033f37c922c9fe0ce2308c954eed2e9058( value: builtins.str, ) -> None: """Type checking stubs""" pass def _typecheckingstub__963007e5c07679a02e2f51426af01eb6a53c5763453e56c32c6cb715a50ff946( value: builtins.str, ) -> None: """Type checking stubs""" pass def _typecheckingstub__6bdc1e032fb1837768d0050f9f20ba20e35a5a39ad7aca6ae4de797d1666fb6b( value: typing.Union[builtins.bool, _cdktf_9a9027ec.IResolvable], ) -> None: """Type checking stubs""" pass def _typecheckingstub__dbd73049b29d4fa30e6fa9f509a1fb5799586cad9e360643ac209ab82a10f59f( value: typing.Union[builtins.bool, _cdktf_9a9027ec.IResolvable], ) -> None: """Type checking stubs""" pass def _typecheckingstub__a5cc9949f13f969cae9dde441712da3a0a507df8ce55d19824dfaf0efafbf01f( value: builtins.str, ) -> None: """Type checking stubs""" pass def _typecheckingstub__dfeb81cfbefb7b31819fc305681d7a25095565a1efedf2de5689475a0b8d19c2( value: typing.Union[builtins.bool, _cdktf_9a9027ec.IResolvable], ) -> None: """Type checking stubs""" pass def _typecheckingstub__e2c118b9f27a068600c34d0e2c4508f46c093ba883d65003c36c6f61750cf324( *, connection: typing.Optional[typing.Union[typing.Union[_cdktf_9a9027ec.SSHProvisionerConnection, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.WinrmProvisionerConnection, typing.Dict[builtins.str, typing.Any]]]] = None, count: typing.Optional[typing.Union[jsii.Number, _cdktf_9a9027ec.TerraformCount]] = None, depends_on: typing.Optional[typing.Sequence[_cdktf_9a9027ec.ITerraformDependable]] = None, for_each: typing.Optional[_cdktf_9a9027ec.ITerraformIterator] = None, lifecycle: typing.Optional[typing.Union[_cdktf_9a9027ec.TerraformResourceLifecycle, typing.Dict[builtins.str, typing.Any]]] = None, provider: typing.Optional[_cdktf_9a9027ec.TerraformProvider] = None, provisioners: typing.Optional[typing.Sequence[typing.Union[typing.Union[_cdktf_9a9027ec.FileProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.LocalExecProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.RemoteExecProvisioner, typing.Dict[builtins.str, typing.Any]]]]] = None, portfolio_id: builtins.str, principal_id: builtins.str, type: builtins.str, accept_language: typing.Optional[builtins.str] = None, id: typing.Optional[builtins.str] = None, share_principals: typing.Optional[typing.Union[builtins.bool, _cdktf_9a9027ec.IResolvable]] = None, share_tag_options: typing.Optional[typing.Union[builtins.bool, _cdktf_9a9027ec.IResolvable]] = None, timeouts: typing.Optional[typing.Union[ServicecatalogPortfolioShareTimeouts, typing.Dict[builtins.str, typing.Any]]] = None, wait_for_acceptance: typing.Optional[typing.Union[builtins.bool, _cdktf_9a9027ec.IResolvable]] = None, ) -> None: """Type checking stubs""" pass def _typecheckingstub__fbb419b22cd2801ebde2d069ab4cab35313fa7a2db9ca57b92be01477a2cfea8( *, create: typing.Optional[builtins.str] = None, delete: typing.Optional[builtins.str] = None, read: typing.Optional[builtins.str] = None, update: typing.Optional[builtins.str] = None, ) -> None: """Type checking stubs""" pass def _typecheckingstub__0a14096fa0dbe10bc9a507243e1dd4f9160ba825bcaa4dc54a98cd85f190874f( terraform_resource: _cdktf_9a9027ec.IInterpolatingParent, terraform_attribute: builtins.str, ) -> None: """Type checking stubs""" pass def _typecheckingstub__c7a1bd1ca2b536b69f80baa35871f702ec649ec74486d35627062e963de4eeda( value: builtins.str, ) -> None: """Type checking stubs""" pass def _typecheckingstub__decab16dd2ff3d9972f87a0ad1a088cafef1859fd83cd0a9ad7a023e6c60b428( value: builtins.str, ) -> None: """Type checking stubs""" pass def _typecheckingstub__641f4a59df6defc7be8ecc9a92a293fa2d1506ddc1a4ed5831e3e9710de413dd( value: builtins.str, ) -> None: """Type checking stubs""" pass def _typecheckingstub__fc0dd06161f8effd09aef1eb13d332d1b485713ad2cb8c7d2d3a99884b976d69( value: builtins.str, ) -> None: """Type checking stubs""" pass def _typecheckingstub__185f881c871d4f500c13daf506b92072ef866035514f49ca7c7145c3fb0018fe( value: typing.Optional[typing.Union[_cdktf_9a9027ec.IResolvable, ServicecatalogPortfolioShareTimeouts]], ) -> None: """Type checking stubs""" pass
PypiClean
/service_runner-1.0.0b3.tar.gz/service_runner-1.0.0b3/service_runner/service_runner/asset/consumers.py
from asgiref.sync import async_to_sync from channels.generic.websocket import WebsocketConsumer, AsyncWebsocketConsumer import json import base64 from django.http.request import QueryDict import paramiko import socket from threading import Thread import time import os from django.utils.six import StringIO from django.conf import settings from .models import Host def get_key_obj(pkeyobj, pkey_file=None, pkey_obj=None, password=None): if pkey_file: with open(pkey_file) as fo: try: pkey = pkeyobj.from_private_key(fo, password=password) return pkey except: pass else: try: pkey = pkeyobj.from_private_key(pkey_obj, password=password) return pkey except: pkey_obj.seek(0) class SSHBridge(object): """ 桥接WebSocket和ssh 参考:https://blog.51cto.com/hongchen99/2336087 """ def __init__(self, websocket, simpleuser): self.websocket = websocket self.simpleuser = simpleuser def connect(self, host, user, pwd=None, key=None, port=22, timeout=6, term='xterm', pty_width=80, pty_height=24): """ 建立SSH连接,放在 self.ssh_channel 通道中,之后直接在通道中交互数据 :param host: :param user: :param pwd: :param key: :param port: :param timeout: :param term: :param pty_width: :param pty_height: :return: """ ssh_client = paramiko.SSHClient() ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) try: if key: # 密钥方式认证 pkey = get_key_obj(paramiko.RSAKey, pkey_obj=key, password=pwd) or \ get_key_obj(paramiko.DSSKey, pkey_obj=key, password=pwd) or \ get_key_obj(paramiko.ECDSAKey, pkey_obj=key, password=pwd) or \ get_key_obj(paramiko.Ed25519Key, pkey_obj=key, password=pwd) ssh_client.connect(username=user, hostname=host, port=port, pkey=pkey, timeout=timeout) else: ssh_client.connect(hostname=host, port=port, username=user, password=pwd, timeout=timeout) except Exception as e: # print(e) message = json.dumps({'flag': 'fail', 'message': str(e)}) self.websocket.send_message_or_team(message) return transport = ssh_client.get_transport() """ 另一种方式建立通道 transport = paramiko.Transport((host, port,)) transport.start_client() transport.auth_password(username=user, password=pwd) """ # 打开一个通道 self.ssh_channel = transport.open_session() # 获取一个终端 self.ssh_channel.get_pty(term=term, width=pty_width, height=pty_height) # 激活终端,这样就可以登录到终端了,就和我们用类似于xshell登录系统一样 self.ssh_channel.invoke_shell() # 获取ssh连接主机后的返回内容,例如Linux,会显示上次登录等信息,把这些信息通过WebSocket显示到Web终端。 # 连接建立一次,之后交互数据不会再进入该方法 for i in range(2): recv = self.ssh_channel.recv(1024).decode('utf-8') message = json.dumps({'flag': 'msg', 'message': recv}) # print('【WS --websocket--> Web】建立SSH通道后,返回欢迎信息:', message) self.websocket.send_message_or_team(message) def close(self): message = {'flag': 0, 'message': '关闭WebSocket和SSH连接'} # 向WebSocket发送一个关闭消息 self.websocket.send_message_or_team(json.dumps(message)) try: # 关闭ssh通道 self.ssh_channel.close() # 关闭WebSocket连接 self.websocket.close() except BaseException as e: # print('关闭WebSocket和SSH连接产生异常:', e) pass def _ws_to_ssh(self, data): """ 尝试发送数据到ssh通道,产生异常则关闭所有连接 """ try: # print('【Func --paramiko--> SSH】WebSocket中的数据发送数据到ssh通道:', data) self.ssh_channel.send(data) except OSError as e: # print(e) self.close() def _ssh_to_ws(self): try: # while True: while not self.ssh_channel.exit_status_ready(): data = self.ssh_channel.recv(1024).decode('utf-8') # print('【SSH --paramiko--> Func】获取ssh通道返回的数据:', data) if len(data) != 0: message = {'flag': 'msg', 'message': data} # print('【WS --websocket--> Web】通过WebSocket把信息发回前端,显示到Web终端:', message) self.websocket.send_message_or_team(json.dumps(message)) else: break except: self.close() def shell(self, data): Thread(target=self._ws_to_ssh, args=(data,)).start() Thread(target=self._ssh_to_ws).start() """ t1 = Thread(target=self._ws_to_ssh, args=(data,)) t1.setDaemon(True) t1.start() t2 = Thread(target=self._ssh_to_ws) t2.setDaemon(True) t2.start() """ def resize_pty(self, cols, rows): self.ssh_channel.resize_pty(width=cols, height=rows) class WebsshConsumer(WebsocketConsumer): """ 1、xterm.js 在浏览器端模拟 shell 终端, 监听用户输入通过 websocket 将用户输入的内容上传到 django 2、django 接受到用户上传的内容, 将用户在前端页面输入的内容通过 paramiko 建立的 ssh 通道上传到远程服务器执行 3、paramiko 将远程服务器的处理结果返回给 django 4、django 将 paramiko 返回的结果通过 websocket 返回给用户 5、xterm.js 接收 django 返回的数据并将其写入前端页面 """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.host_id = '' self.simple_user = '' self.is_team = False self.team_name = '' def connect(self): """ 建立WebSocket连接,并实例化SSHBridge类,在这个对象中建立SSH连接,放在 self.ssh_channel 通道中 :return: """ self.host_id = self.scope['url_route']['kwargs'].get('host_id') # 获取session中的值 self.simple_user = self.scope["user"].username # print('【Web --websocket--> WS】建立WebSocket通道,当前连接用户:', self.simple_user) host_obj = Host.objects.get(id=self.host_id) self.accept() # WebSocket连接成功后,连接ssh query_string = self.scope.get('query_string') ws_args = QueryDict(query_string=query_string, encoding='utf-8') # # print(ws_args) # <QueryDict: {'user': ['admin'], 'host': ['192.168.96.20'], 'port': ['22'], 'auth': ['pwd'], 'pwd': ['ZGphbmdvYWRtaW4='], 'key': [''], 'width': ['113'], 'height': ['43']}> # 根据参数判断是否是协作 team = ws_args.get('team') if team: self.is_team = True self.team_name = "team_{}".format(self.host_id) # 加到这个通道组 async_to_sync(self.channel_layer.group_add)( self.team_name, self.channel_name ) # 用户连接时,同一群组发送消息 self.send_message_or_team(json.dumps( {'flag': 'user', 'message': '用户 {} 已连接本终端'.format(self.simple_user)})) width = ws_args.get('width') height = ws_args.get('height') width = int(width) height = int(height) # ssh连接要求int类型:required argument is an integer ssh_connect_dict = {} user = self.simple_user host = host_obj.ip port = host_obj.ssh_port port = int(port) auth = host_obj.ssh_user pwd = host_obj.ssh_passwd # if pwd: # pwd = base64.b64decode(pwd).decode('utf-8') sshkey_filename_path = host_obj.ssh_key.ssh_key.path if host_obj.ssh_key else None ssh_connect_dict = { 'host': host, 'user': auth, 'port': port, 'timeout': 30, 'pty_width': width, 'pty_height': height, 'pwd': pwd } if sshkey_filename_path: if not os.path.exists(sshkey_filename_path): self.send(json.dumps( {'flag': 'error', 'message': '密钥文件不存在'})) else: try: f = open(sshkey_filename_path, 'r', encoding='utf-8') key = f.read() string_io = StringIO() string_io.write(key) string_io.flush() string_io.seek(0) ssh_connect_dict['key'] = string_io # os.remove(sshkey_filename_path) # 用完之后删除key文件 except BaseException as e: # print('打开密钥文件出错', e) pass # 建立SSH连接 self.ssh = SSHBridge(websocket=self, simpleuser=self.simple_user) # print('【WS --SSHBridge--> SSH】连接SSH参数:', ssh_connect_dict) self.ssh.connect(**ssh_connect_dict) def disconnect(self, close_code): # 断开连接 # print('用户 {} 断开WebSocket连接,断开SSH连接'.format(self.simple_user)) try: if self.is_team: # 用户连接时,同一群组发送消息 self.send_message_or_team(json.dumps( {'flag': 'user', 'message': '用户 {} 已断开本终端'.format(self.simple_user)})) # 退出群组 async_to_sync(self.channel_layer.group_discard)( self.team_name, self.channel_name ) self.ssh.close() except BaseException as e: pass def receive(self, text_data=None, bytes_data=None): # 从WebSocket中接收消息 text_data = json.loads(text_data) # json字符串转字典 # print('\n\n【Web --websocket--> WS】Web终端按键内容通过WebSocket传到后端:', text_data) if type(text_data) == dict: if text_data.get('flag') == 'entered_key': # 获取前端传过来输入的按键值,并传递给shell data = text_data.get('entered_key', '') # print('【WS --SSHBridge--> Func】WebSocket转发SSHBridge:', text_data) self.ssh.shell(data=data) else: cols = text_data['cols'] rows = text_data['rows'] # 改变通道中终端大小 self.ssh.resize_pty(cols=cols, rows=rows) else: # print('【!!!】收到的数据不是dict类型') pass def send_message_or_team(self, message): if self.is_team: async_to_sync(self.channel_layer.group_send)( self.team_name, { 'type': 'team_message', 'message': message } ) else: self.send(message) def team_message(self, event): message = event['message'] # 发送消息到WebSocket self.send(message)
PypiClean
/jupyterhub_url_sharing-0.1.0.tar.gz/jupyterhub_url_sharing-0.1.0/node_modules/jsonpointer/LICENSE.md
The MIT License (MIT) Copyright (c) 2011-2015 Jan Lehnardt <[email protected]> & Marc Bachmann <https://github.com/marcbachmann> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
PypiClean
/botogram-beta-0.7b0.tar.gz/botogram-beta-0.7b0/botogram/api.py
import os import requests # These API methods sends something to a chat # This list is used to filter which method to check for unavailable chats SEND_TO_CHAT_METHODS = ( "sendMessage", "forwardMessage", "sendPhoto", "sendAudio", "sendDocument", "sendSticker", "sendVideo", "sendVoice", "sendLocation", "sendChatAction", "getChat", ) class APIError(Exception): """Something went wrong with the API""" def __init__(self, response): self.error_code = response["error_code"] self.description = response["description"] msg = "Request failed with code %s. Response from Telegram: \"%s\"" % ( self.error_code, self.description ) super(APIError, self).__init__(msg) class ChatUnavailableError(APIError): """A chat is unavailable, which means you can't send messages to it""" def __init__(self, reason, chat_id): self.reason = reason self.chat_id = chat_id if reason == "blocked": msg = "The user with ID %s blocked your bot" % chat_id elif reason == "account_deleted": msg = "The user with ID %s deleted his account" % chat_id elif reason == "not_contacted": msg = "The user with ID %s didn't contact you before" % chat_id elif reason == "not_found": msg = "The chat with ID %s doesn't exist" % chat_id elif reason == "kicked": msg = "The bot was kicked from the group with ID %s" % chat_id elif reason == "chat_moved": msg = "The chat with ID %s moved, and the old ID is no longer " \ "valid" % chat_id else: raise ValueError("Unknown reason: %s" % reason) Exception.__init__(self, msg) class TelegramAPI: """Main interface to the Telegram API""" def __init__(self, api_key, endpoint=None): # Fill the default API endpoint if endpoint is None: endpoint = "https://api.telegram.org/" self._api_key = api_key self._endpoint = endpoint self._session_cache = None self._session_pid = -1 def _session(self): """Get the current requests session""" # Ensure a new session is created if the PID changes. This is because # sessions behaves badly if you use them after fork() if self._session_pid != os.getpid() or self._session_cache is None: self._session_cache = requests.Session() self._session_pid = os.getpid() return self._session_cache def call(self, method, params=None, files=None, expect=None): """Call a method of the API""" url = self._endpoint + "bot%s/%s" % (self._api_key, method) response = self._session().get(url, params=params, files=files, timeout=10) content = response.json() if not content["ok"]: status = content["error_code"] message = content["description"] # Special handling for unavailable chats if method in SEND_TO_CHAT_METHODS: reason = None # This happens when the bot tries to send messages to an user # who blocked the bot if status == 403 and "blocked" in message: # Error code # 403 # Bot was blocked by the user reason = "blocked" # This happens when the user deleted its account elif status == 403 and "deactivated" in message: # Error code # 403 # Forbidden: user is deactivated reason = "account_deleted" # This happens, as @BotSupport says, when the Telegram API # isn't able to determine why your bot can't contact an user elif status == 400 and "PEER_ID_INVALID" in message: # Error code # 400 # Bad request: PEER_ID_INVALID reason = "not_found" # This happens when the bot can't contact the user or the user # doesn't exist elif status == 400 and "not found" in message: # Error code # 400 # Bad Request: chat not found reason = "not_found" # This happens when the bot is kicked from the group chat it's # trying to send messages to elif status == 403 and "kicked" in message: # Error code # 403 # Forbidden: bot was kicked from the group chat # Forbidden: bot was kicked from the supergroup chat reason = "kicked" # This happens when the ID points to a group chat, which was # migrated to a supergroup chat, thus changing its ID elif status == 400 and "migrated" in message: # Error code # 400 # Bad Request: group chat is migrated to a supergroup chat reason = "chat_moved" if reason is not None: raise ChatUnavailableError(reason, params["chat_id"]) raise APIError(content) # If no special object is expected, return the decoded json. # Else, return the "pythonized" result if expect is None: return content else: wrapped = expect(content["result"]) if hasattr(wrapped, "set_api"): wrapped.set_api(self) return wrapped def file_content(self, path): """Get the content of an user-submitted file""" url = self._endpoint + "file/bot%s/%s" % (self._api_key, path) response = requests.get(url) return response.content @property def token(self): return self._api_key
PypiClean
/ansible-kkvesper-2.3.2.0.tar.gz/ansible-kkvesper-2.3.2.0/lib/ansible/modules/storage/netapp/sf_volume_manager.py
# (c) 2017, NetApp, Inc # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' module: sf_volume_manager short_description: Manage SolidFire volumes extends_documentation_fragment: - netapp.solidfire version_added: '2.3' author: Sumit Kumar ([email protected]) description: - Create, destroy, or update volumes on SolidFire options: state: description: - Whether the specified volume should exist or not. required: true choices: ['present', 'absent'] name: description: - The name of the volume to manage. required: true account_id: description: - Account ID for the owner of this volume. required: true 512emulation: description: - Should the volume provide 512-byte sector emulation? - Required when C(state=present) required: false qos: description: Initial quality of service settings for this volume. required: false default: None attributes: description: A YAML dictionary of attributes that you would like to apply on this volume. required: false default: None volume_id: description: - The ID of the volume to manage or update. - In order to create multiple volumes with the same name, but different volume_ids, please declare the I(volume_id) parameter with an arbitary value. However, the specified volume_id will not be assigned to the newly created volume (since it's an auto-generated property). required: false default: None size: description: - The size of the volume in (size_unit). - Required when C(state = present). required: false size_unit: description: - The unit used to interpret the size parameter. required: false choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb'] default: 'gb' access: required: false choices: ['readOnly', 'readWrite', 'locked', 'replicationTarget'] description: - "Access allowed for the volume." - "readOnly: Only read operations are allowed." - "readWrite: Reads and writes are allowed." - "locked: No reads or writes are allowed." - "replicationTarget: Identify a volume as the target volume for a paired set of volumes. If the volume is not paired, the access status is locked." - "If unspecified, the access settings of the clone will be the same as the source." default: None ''' EXAMPLES = """ - name: Create Volume sf_volume_manager: hostname: "{{ solidfire_hostname }}" username: "{{ solidfire_username }}" password: "{{ solidfire_password }}" state: present name: AnsibleVol account_id: 3 enable512e: False size: 1 size_unit: gb - name: Update Volume sf_volume_manager: hostname: "{{ solidfire_hostname }}" username: "{{ solidfire_username }}" password: "{{ solidfire_password }}" state: present name: AnsibleVol account_id: 3 access: readWrite - name: Delete Volume sf_volume_manager: hostname: "{{ solidfire_hostname }}" username: "{{ solidfire_username }}" password: "{{ solidfire_password }}" state: absent name: AnsibleVol account_id: 2 """ RETURN = """ msg: description: Success message returned: success type: string """ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.pycompat24 import get_exception import ansible.module_utils.netapp as netapp_utils HAS_SF_SDK = netapp_utils.has_sf_sdk() class SolidFireVolume(object): def __init__(self): self._size_unit_map = netapp_utils.SF_BYTE_MAP self.argument_spec = netapp_utils.ontap_sf_host_argument_spec() self.argument_spec.update(dict( state=dict(required=True, choices=['present', 'absent']), name=dict(required=True, type='str'), account_id=dict(required=True, type='int'), enable512e=dict(type='bool', aliases=['512emulation']), qos=dict(required=False, type='str', default=None), attributes=dict(required=False, type='dict', default=None), volume_id=dict(type='int', default=None), size=dict(type='int'), size_unit=dict(default='gb', choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb'], type='str'), access=dict(required=False, type='str', default=None, choices=['readOnly', 'readWrite', 'locked', 'replicationTarget']), )) self.module = AnsibleModule( argument_spec=self.argument_spec, required_if=[ ('state', 'present', ['size', 'enable512e']) ], supports_check_mode=True ) p = self.module.params # set up state variables self.state = p['state'] self.name = p['name'] self.account_id = p['account_id'] self.enable512e = p['enable512e'] self.qos = p['qos'] self.attributes = p['attributes'] self.volume_id = p['volume_id'] self.size_unit = p['size_unit'] if p['size'] is not None: self.size = p['size'] * self._size_unit_map[self.size_unit] else: self.size = None self.access = p['access'] if HAS_SF_SDK is False: self.module.fail_json(msg="Unable to import the SolidFire Python SDK") else: self.sfe = netapp_utils.create_sf_connection(module=self.module) def get_volume(self): """ Return volume object if found :return: Details about the volume. None if not found. :rtype: dict """ volume_list = self.sfe.list_volumes_for_account(account_id=self.account_id) for volume in volume_list.volumes: if volume.name == self.name: # Update self.volume_id if self.volume_id is not None: if volume.volume_id == self.volume_id and str(volume.delete_time) == "": return volume else: if str(volume.delete_time) == "": self.volume_id = volume.volume_id return volume return None def create_volume(self): try: self.sfe.create_volume(name=self.name, account_id=self.account_id, total_size=self.size, enable512e=self.enable512e, qos=self.qos, attributes=self.attributes) except: err = get_exception() self.module.fail_json(msg="Error provisioning volume %s of size %s" % (self.name, self.size), exception=str(err)) def delete_volume(self): try: self.sfe.delete_volume(volume_id=self.volume_id) except: err = get_exception() self.module.fail_json(msg="Error deleting volume %s" % self.volume_id, exception=str(err)) def update_volume(self): try: self.sfe.modify_volume(self.volume_id, account_id=self.account_id, access=self.access, qos=self.qos, total_size=self.size, attributes=self.attributes) except: err = get_exception() self.module.fail_json(msg="Error updating volume %s" % self.name, exception=str(err)) def apply(self): changed = False volume_exists = False update_volume = False volume_detail = self.get_volume() if volume_detail: volume_exists = True if self.state == 'absent': # Checking for state change(s) here, and applying it later in the code allows us to support # check_mode changed = True elif self.state == 'present': if volume_detail.access is not None and self.access is not None and volume_detail.access != self.access: update_volume = True changed = True elif volume_detail.account_id is not None and self.account_id is not None \ and volume_detail.account_id != self.account_id: update_volume = True changed = True elif volume_detail.qos is not None and self.qos is not None and volume_detail.qos != self.qos: update_volume = True changed = True elif volume_detail.total_size is not None and volume_detail.total_size != self.size: size_difference = abs(float(volume_detail.total_size - self.size)) # Change size only if difference is bigger than 0.001 if size_difference/self.size > 0.001: update_volume = True changed = True elif volume_detail.attributes is not None and self.attributes is not None and \ volume_detail.attributes != self.attributes: update_volume = True changed = True else: if self.state == 'present': changed = True result_message = "" if changed: if self.module.check_mode: result_message = "Check mode, skipping changes" pass else: if self.state == 'present': if not volume_exists: self.create_volume() result_message = "Volume created" elif update_volume: self.update_volume() result_message = "Volume updated" elif self.state == 'absent': self.delete_volume() result_message = "Volume deleted" self.module.exit_json(changed=changed, msg=result_message) def main(): v = SolidFireVolume() v.apply() if __name__ == '__main__': main()
PypiClean
/Pyql700-0.4.9.tar.gz/Pyql700-0.4.9/README.md
# Project: pyql700 ## Module: stripes Uses **imagemagick** now for short texts, ` ./stripes.py textline 62 "Toto je text" ` when print is needed directly: ` ./stripes.py textline 62 "Toto je text" -p ` Prints an image file: ` ./stripes.py print_it 62 imagefile ` Games with an image file - prototype for dither/pattern: ` ./stripes.py image 62 imagefile ` ## Module multextimg Based on PIL and uses - WIDTH - maxchars_per_line, it creates multiline text image with exact HEIGHT dimension. Prototype now. `./image_ut.py mk 640 "Mnohokrát děkuji." 60 `
PypiClean
/tensorflow_cpu-2.14.0rc1-cp311-cp311-macosx_10_15_x86_64.whl/tensorflow/core/function/capture/restore_captures.py
import warnings from tensorflow.core.function.polymorphism import function_type as function_type_lib from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_util from tensorflow.python.ops import handle_data_util from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import variables as variables_lib from tensorflow.python.trackable import asset from tensorflow.python.trackable import resource def get_tensor_from_node(node): """Resolves a saved model graph node into a tensor to be captured. Args: node: a tensor, variable, or resource to be resolved into a capturable tensor Returns: A list of tensors. Raises: ValueError: if the node cannot be converted into a tensor. """ with ops.init_scope(): # TODO(b/210144904): Use __tf_tensor__ instead of `is_[...]` checks if getattr(node, "is_distributed_variable", False): return node elif getattr(node, "is_distributed_table", False): return node elif getattr(node, "is_sharded_variable", False): return node elif resource_variable_ops.is_resource_variable(node): return node.handle elif isinstance(node, asset.Asset): return node.asset_path elif tensor_util.is_tf_type(node): return node elif isinstance(node, resource.CapturableResource): # Note: this executes restored functions in the CapturableResource. return node.resource_handle raise ValueError(f"Cannot convert node {node} to tensor.") def restore_captures(concrete_function, inputs): """Restore captures for the concrete function. Used at deserialization time. For functions that are being deserialized, saved model restores objects that tensors were captured from, but functions only know about their tensors -- object information is destroyed by tracing. This additional logic extracts the tensors which the function originally captured. Args: concrete_function: the concrete function for which to restore captures inputs: a list tensors or other Python objects (such as variables) which contain tensors that were originally captured by the function """ bound_inputs = [get_tensor_from_node(obj) for obj in inputs] # pylint: disable=g-complex-comprehension bound_variables = [ obj for obj in inputs if isinstance( obj, (variables_lib.Variable, resource_variable_ops.BaseResourceVariable), ) ] # TODO(b/205010575): This is only injecting the captured inputs into the # concrete function, note that we did not modify the FuncGraph # itself. captured_inputs_list = [] concrete_function.set_variables(bound_variables) if bound_inputs: for bound_input, internal_capture in zip( bound_inputs, concrete_function.inputs[-len(bound_inputs) :] ): # Distributed inputs have special logic for capturing, so we call their # custom restoration methods if hasattr(bound_input, "__tf_experimental_restore_capture__"): captured_inputs_list.append( bound_input.__tf_experimental_restore_capture__( concrete_function, internal_capture ) ) else: captured_inputs_list.append(bound_input) concrete_function.graph.replace_capture(bound_input, internal_capture) if internal_capture.dtype == dtypes.resource: if resource_variable_ops.is_resource_variable(bound_input): try: handle = bound_input.handle except ValueError: # For mirrored variables we'll copy handle data for components # as they get captured. pass else: handle_data_util.copy_handle_data(handle, internal_capture) else: # TODO(b/213451747): Remove need to call copy_handle_data handle_data_util.copy_handle_data(bound_input, internal_capture) # Setting "captures" first means "capture" won't create a new # placeholder for this input. concrete_function.graph.capture(bound_input) if any([inp is None for inp in captured_inputs_list]): warnings.warn( "Trying to load ShardedVariables using tf.saved_model.load. " "This won't work if using a tf.distribute.Strategy, and may " "use excess memory if not using a Strategy. Ignore this " "warning if using tf.keras.models.load_model." ) concrete_function.set_external_captures(captured_inputs_list) # Update FunctionType with new captures. if concrete_function.function_type: concrete_function._function_type = function_type_lib.FunctionType( # pylint: disable=protected-access concrete_function.function_type.parameters.values(), concrete_function.graph.function_captures.capture_types, return_annotation=concrete_function.function_type.output, )
PypiClean
/byterec-0.0.1.tar.gz/byterec-0.0.1/torch_ctr/models/din.py
import torch from torch_ctr.layers import FeaturesEmbedding, SequenceFeaturesEmbedding, MultiLayerPerceptron from torch.nn import ModuleList, Sequential, Linear, PReLU from ..activation import Dice class DIN(torch.nn.Module): def __init__(self, sequence_field_dims, sparse_field_dims, embed_dim, mlp_dims=None, attention_mlp_dims=[36], activation='dice', dropout=None): super(DIN, self).__init__() self.num_seq_fields = len(sequence_field_dims) self.embedding_sequence = SequenceFeaturesEmbedding(sequence_field_dims, embed_dim, mode="concat") self.embedding_sparse = FeaturesEmbedding(sparse_field_dims, embed_dim) self.embed_output_dim = (len(sparse_field_dims) + 2*self.num_seq_fields)*embed_dim self.mlp = MultiLayerPerceptron(self.embed_output_dim, mlp_dims, dropout, activation="dice") self.attention_layers = ModuleList([ ActivationUnit(embed_dim, attention_mlp_dims, activation, use_softmax=False) for i in range(self.num_seq_fields)]) def forward(self, x_sequence, x_candidate, x_sparse): """ The original paper without dense feature. :param x_sequence: Long tensor of size ``(batch_size, num_seq_fields, seq_length)`` # each fields is a tensor list :param x_candidate: Long tensor of size ``(batch_size, num_seq_fields)`` # each fields is a candidate field id for x_sequence :param x_sparse: Long tensor of size ``(batch_size, num_sparse_fields)`` # sparse: user_profile_feature, candidate_feature, context_feature """ #embed_x_sequence: (batch_size, num_seq_fields, seq_length, emb_dim) #embed_x_candidate: (batch_size, num_seq_fields, emb_dim) embed_x_sequence, embed_x_candidate = self.embedding_sequence(x_sequence, x_candidate) embed_x_sparse = self.embedding_sparse(x_sparse) #(batch_size, num_sparse_fields, embed_dim) pooling_concat = [] for i in range(self.num_seq_fields): #为每一个序列特征都训练一个独立的激活单元 eg.历史序列item id,历史序列item id品牌 pooling_seq = self.attention_layers[i](embed_x_sequence[:,i,:,:], embed_x_candidate[:,i,:]) pooling_concat.append(pooling_seq.unsqueeze(1)) #(batch_size, 1, emb_dim) pooling_concat = torch.cat(pooling_concat, dim=1) #(batch_size, num_seq_fields, emb_dim) mlp_in = torch.cat([pooling_concat.flatten(start_dim=1), embed_x_candidate.flatten(start_dim=1), embed_x_sparse.flatten(start_dim=1)], dim=1) #(batch_size, N) y = self.mlp(mlp_in) return torch.sigmoid(y.squeeze(1)) class ActivationUnit(torch.nn.Module): """ DIN Attention Layer """ def __init__(self, emb_dim, attention_mlp_dims=[36], activation="dice", use_softmax=False): super(ActivationUnit, self).__init__() self.emb_dim = emb_dim self.use_softmax = use_softmax self.attention = MultiLayerPerceptron(4 * self.emb_dim, attention_mlp_dims, activation=activation) def forward(self, history, candidate): """ :param history: Long tensor of size ``(batch_size, seq_length, emb_dim) `` :param candidate: Long tensor of size ``(batch_size, emb_dim)`` """ seq_length = history.size(1) candidate = candidate.unsqueeze(1).expand(-1, seq_length, -1) #batch_size,seq_length,emb_dim att_input = torch.cat([candidate, history, candidate - history, candidate * history], dim=-1) # batch_size,seq_length,4*emb_dim att_weight = self.attention(att_input.view(-1, 4*self.emb_dim)) # #(batch_size*seq_length,4*emb_dim) att_weight = att_weight.view(-1, seq_length) #(batch_size*seq_length, 1) -> (batch_size,seq_length) if self.use_softmax: att_weight = att_weight.softmax(dim=-1) # (batch_size, seq_length, 1) * (batch_size, seq_length, emb_dim) output = (att_weight.unsqueeze(-1) * history).sum(dim=1) #(batch_size,emb_dim) return output
PypiClean
/plone.app.event-ploneintegration-1.1.zip/plone.app.event-ploneintegration-1.1/README.rst
plone.app.event-ploneintegration ================================ This package integrates plone.app.event into Plone 4 releases, where plone.app.event is not in the core. This is the case for all Plone 4 releases at the time of this release. Installation for Plone 4.1 -------------------------- To install plone.app.event for Plone 4.1, please use the plone.app.event-ploneintegration package. Include it in your buildout config or in your integration package's setup.py and apply the "plone.app.event Plone4 integration" profile. The plone.app.event-ploneintegration package pulls all dependencies, which are needed for plone.app.event. Warning ------- !!! Backup! Don't do this on a Plone setups in production, only install plone.app.event for new setups or report any upgrade issues. Upgrading is yet not tested and no upgrade steps are provided - this is still a task to do. Expect weired behavior regarding date/time/timezones and any other bugs. !!! Bug reporting ------------- Please report bugs here: https://github.com/collective/plone.app.event This url may change to https://github.com/plone/plone.app.event some time soon! More information ---------------- See: https://python.org/pypi/plone.app.event
PypiClean
/inspectnn-0.4.2-py3-none-any.whl/tflite/BuiltinOptions.py
# namespace: tflite class BuiltinOptions(object): NONE = 0 Conv2DOptions = 1 DepthwiseConv2DOptions = 2 ConcatEmbeddingsOptions = 3 LSHProjectionOptions = 4 Pool2DOptions = 5 SVDFOptions = 6 RNNOptions = 7 FullyConnectedOptions = 8 SoftmaxOptions = 9 ConcatenationOptions = 10 AddOptions = 11 L2NormOptions = 12 LocalResponseNormalizationOptions = 13 LSTMOptions = 14 ResizeBilinearOptions = 15 CallOptions = 16 ReshapeOptions = 17 SkipGramOptions = 18 SpaceToDepthOptions = 19 EmbeddingLookupSparseOptions = 20 MulOptions = 21 PadOptions = 22 GatherOptions = 23 BatchToSpaceNDOptions = 24 SpaceToBatchNDOptions = 25 TransposeOptions = 26 ReducerOptions = 27 SubOptions = 28 DivOptions = 29 SqueezeOptions = 30 SequenceRNNOptions = 31 StridedSliceOptions = 32 ExpOptions = 33 TopKV2Options = 34 SplitOptions = 35 LogSoftmaxOptions = 36 CastOptions = 37 DequantizeOptions = 38 MaximumMinimumOptions = 39 ArgMaxOptions = 40 LessOptions = 41 NegOptions = 42 PadV2Options = 43 GreaterOptions = 44 GreaterEqualOptions = 45 LessEqualOptions = 46 SelectOptions = 47 SliceOptions = 48 TransposeConvOptions = 49 SparseToDenseOptions = 50 TileOptions = 51 ExpandDimsOptions = 52 EqualOptions = 53 NotEqualOptions = 54 ShapeOptions = 55 PowOptions = 56 ArgMinOptions = 57 FakeQuantOptions = 58 PackOptions = 59 LogicalOrOptions = 60 OneHotOptions = 61 LogicalAndOptions = 62 LogicalNotOptions = 63 UnpackOptions = 64 FloorDivOptions = 65 SquareOptions = 66 ZerosLikeOptions = 67 FillOptions = 68 BidirectionalSequenceLSTMOptions = 69 BidirectionalSequenceRNNOptions = 70 UnidirectionalSequenceLSTMOptions = 71 FloorModOptions = 72 RangeOptions = 73 ResizeNearestNeighborOptions = 74 LeakyReluOptions = 75 SquaredDifferenceOptions = 76 MirrorPadOptions = 77 AbsOptions = 78 SplitVOptions = 79 UniqueOptions = 80 ReverseV2Options = 81 AddNOptions = 82 GatherNdOptions = 83 CosOptions = 84 WhereOptions = 85 RankOptions = 86 ReverseSequenceOptions = 87 MatrixDiagOptions = 88 QuantizeOptions = 89 MatrixSetDiagOptions = 90 HardSwishOptions = 91 IfOptions = 92 WhileOptions = 93 DepthToSpaceOptions = 94 NonMaxSuppressionV4Options = 95 NonMaxSuppressionV5Options = 96 ScatterNdOptions = 97 SelectV2Options = 98 DensifyOptions = 99 SegmentSumOptions = 100 BatchMatMulOptions = 101
PypiClean
/cfw-fido-1.6.0rc3.tar.gz/cfw-fido-1.6.0rc3/fido/update_signatures.py
from __future__ import print_function from argparse import ArgumentParser import os from shutil import rmtree import sys import time from xml.etree import ElementTree as CET import zipfile from . import __version__, CONFIG_DIR, query_yes_no from .prepare import run as prepare_pronom_to_fido from .versions import get_local_versions from .pronom.soap import get_pronom_sig_version, get_pronom_signature, NS from .pronom.http import get_sig_xml_for_puid DEFAULTS = { 'signatureFileName': 'DROID_SignatureFile-v{0}.xml', 'pronomZipFileName': 'pronom-xml-v{0}.zip', 'fidoSignatureVersion': 'format_extensions.xml', 'containerVersion': 'container-signature-UPDATE-ME.xml', # container version is frozen and needs human attention before updating, } OPTIONS = { 'http_throttle': 0.5, # in secs, to prevent DoS of PRONOM server 'tmp_dir': os.path.join(CONFIG_DIR, 'tmp'), 'deleteTempDirectory': True, } def run(defaults=None): """ Update PRONOM signatures. Interactive script, requires keyboard input. """ print("FIDO signature updater v{}".format(__version__)) defaults = defaults or DEFAULTS try: print("Contacting PRONOM...") currentVersion, signatureFile = sig_version_check(defaults) download_sig_file(defaults, currentVersion, signatureFile) print("Extracting PRONOM PUID's from signature file...") tree = CET.parse(signatureFile) format_eles = tree.findall('.//sig:FileFormat', NS) print("Found {} PRONOM FileFormat elements".format(len(format_eles))) tmpdir, resume = init_sig_download(defaults) download_signatures(defaults, format_eles, resume, tmpdir) create_zip_file(defaults, format_eles, currentVersion, tmpdir) if defaults['deleteTempDirectory']: print("Deleting temporary folder and files...") rmtree(tmpdir, ignore_errors=True) update_versions_xml(defaults, currentVersion) # TODO: there should be a check here to handle prepare.main exit() signal (-1/0/1/...) print("Preparing to convert PRONOM formats to FIDO signatures...") prepare_pronom_to_fido() print("FIDO signatures successfully updated") except KeyboardInterrupt: sys.exit('Aborting update...') def sig_version_check(defaults): """Return a tuple consisting of current sig file version and the derived file name.""" print("Contacting PRONOM...") currentVersion = get_pronom_sig_version() if not currentVersion: sys.exit('Failed to obtain PRONOM signature file version number, please try again.') print("Querying latest signaturefile version...") signatureFile = os.path.join(CONFIG_DIR, defaults['signatureFileName'].format(currentVersion)) if os.path.isfile(signatureFile): print("You already have the latest PRONOM signature file, version", currentVersion) if not query_yes_no("Update anyway?"): sys.exit('Aborting update...') return currentVersion, signatureFile def download_sig_file(defaults, version, signatureFile): """Download the latest version of the PRONOM sigs to signatureFile.""" print("Downloading signature file version {}...".format(version)) currentFile, _ = get_pronom_signature() if not currentFile: sys.exit('Failed to obtain PRONOM signature file, please try again.') print("Writing {0}...".format(defaults['signatureFileName'].format(version))) with open(signatureFile, 'w') as file_: file_.write(currentFile) def init_sig_download(defaults): """ Initialise the download of individual PRONOM signatures. Handles user input and resumption of interupted downloads. Return a tuple of the temp directory for writing and a boolean resume flag. """ print("Downloading signatures can take a while") if not query_yes_no("Continue and download signatures?"): sys.exit('Aborting update...') tmpdir = defaults['tmp_dir'] resume = False if os.path.isdir(tmpdir): print("Found previously created temporary folder for download:", tmpdir) resume = query_yes_no('Do you want to resume download (yes) or start over (no)?') if resume: print("Resuming download...") else: print("Creating temporary folder for download:", tmpdir) try: os.mkdir(tmpdir) except OSError: pass if not os.path.isdir(tmpdir): sys.stderr.write("Failed to create temporary folder for PUID's, using: " + tmpdir) return tmpdir, resume def download_signatures(defaults, format_eles, resume, tmpdir): """Download PRONOM signatures and write to individual files.""" print("Downloading signatures, one moment please...") numberPuids = len(format_eles) one_percent = (float(numberPuids) / 100) numfiles = 0 for format_ele in format_eles: download_sig(format_ele, tmpdir, resume) numfiles += 1 percent = int(float(numfiles) / one_percent) print(r"{}/{} files [{}%]".format(numfiles, numberPuids, percent)) time.sleep(defaults['http_throttle']) print("100%") def download_sig(format_ele, tmpdir, resume): """ Download an individual PRONOM signature. The signature to be downloaded is identified by the FileFormat element parameter format_ele. The downloaded signature is written to tmpdir. """ puid, puidFileName = get_puid_file_name(format_ele) filename = os.path.join(tmpdir, puidFileName) if os.path.isfile(filename) and resume: return try: xml = get_sig_xml_for_puid(puid) except Exception as e: sys.stderr.write("Failed to download signature file:" + puid) sys.stderr.write("Error:" + str(e)) sys.exit('Please restart and resume download.') with open(filename, 'wb') as file_: file_.write(xml) def create_zip_file(defaults, format_eles, currentVersion, tmpdir): """Create zip file of signatures.""" print("Creating PRONOM zip...") compression = zipfile.ZIP_DEFLATED if 'zlib' in sys.modules else zipfile.ZIP_STORED modes = {zipfile.ZIP_DEFLATED: 'deflated', zipfile.ZIP_STORED: 'stored'} zf = zipfile.ZipFile(os.path.join(CONFIG_DIR, defaults['pronomZipFileName'].format(currentVersion)), mode='w') print("Adding files with compression mode", modes[compression]) for format_ele in format_eles: _, puidFileName = get_puid_file_name(format_ele) filename = os.path.join(tmpdir, puidFileName) if os.path.isfile(filename): zf.write(filename, arcname=puidFileName, compress_type=compression) if defaults['deleteTempDirectory']: os.unlink(filename) zf.close() def get_puid_file_name(format_ele): """Return a tupe of PUID and PUID file name derived from format_ele.""" puid = format_ele.get('PUID') puidType, puidNum = puid.split("/") return puid, 'puid.{}.{}.xml'.format(puidType, puidNum) def update_versions_xml(defaults, currentVersion): """Create new versions identified sig XML file.""" print('Updating versions.xml...') versions = get_local_versions() versions.pronom_version = str(currentVersion) versions.pronom_signature = "formats-v" + str(currentVersion) + ".xml" versions.pronom_container_signature = defaults['containerVersion'] versions.fido_extension_signature = defaults['fidoSignatureVersion'] versions.update_script = __version__ versions.write() def main(): """Main CLI entrypoint.""" parser = ArgumentParser(description='Download and convert the latest PRONOM signatures') parser.add_argument('-tmpdir', default=OPTIONS['tmp_dir'], help='Location to store temporary files', dest='tmp_dir') parser.add_argument('-keep_tmp', default=OPTIONS['deleteTempDirectory'], help='Do not delete temporary files after completion', dest='deleteTempDirectory', action='store_false') parser.add_argument('-http_throttle', default=OPTIONS['http_throttle'], help='Time (in seconds) to wait between downloads', type=float, dest='http_throttle') args = parser.parse_args() opts = DEFAULTS.copy() opts.update(vars(args)) run(opts) if __name__ == '__main__': main()
PypiClean
/catcli-0.9.6.tar.gz/catcli-0.9.6/README.md
# CATCLI [![Tests Status](https://github.com/deadc0de6/catcli/workflows/tests/badge.svg?branch=master)](https://github.com/deadc0de6/catcli/actions) [![License: GPL v3](https://img.shields.io/badge/License-GPL%20v3-blue.svg)](http://www.gnu.org/licenses/gpl-3.0) [![Coveralls](https://img.shields.io/coveralls/github/deadc0de6/catcli)](https://coveralls.io/github/deadc0de6/catcli?branch=master) [![PyPI version](https://badge.fury.io/py/catcli.svg)](https://badge.fury.io/py/catcli) [![AUR](https://img.shields.io/aur/version/catcli-git.svg)](https://aur.archlinux.org/packages/catcli-git) [![Python](https://img.shields.io/pypi/pyversions/catcli.svg)](https://pypi.python.org/pypi/catcli) [![Donate](https://img.shields.io/badge/donate-KoFi-blue.svg)](https://ko-fi.com/deadc0de6) *The command line catalog tool for your offline data* Did you ever wanted to find back that specific file that should be on one of your backup DVDs or one of your external hard drives? You usually go through all of them hoping to find the right one on the first try? [Catcli](https://github.com/deadc0de6/catcli) indexes external media in a catalog file and allows to quickly find specific files or even navigate in the catalog of indexed files while these are not connected to your host. Features: * Index any directories in a catalog * Ability to search for files by name in the catalog * Ability to navigate through indexed data à la `ls` * Support for fuse to mount the indexed data as a virtual filesystem * Handle archive files (zip, tar, ...) and index their content * Save catalog to json for easy versioning with git * Command line interface FTW * Store files and directories sizes * Store md5 hash of files * Ability to update the catalog * Support for `fzf` for finding files * Tag your different storages with additional information * Export catalog to CSV <a href="https://asciinema.org/a/hRE22qbVtBGxOM1yxw2y4fBy8"><img src="https://asciinema.org/a/hRE22qbVtBGxOM1yxw2y4fBy8.png" width="50%" height="50%"></a> Quick start: ```bash # install catcli with pip pip3 install catcli --user # index a directory in the catalog catcli index --meta='some description' log /var/log # display the content catcli ls -r # navigate catcli ls log # find files/directories named '*log*' catcli find log ``` see [usage](#usage) for specific info ## Why catcli? [Catcli](https://github.com/deadc0de6/catcli) gives the ability to navigate, explore and find your files that are stored on external media (DVDs, hard drives, USB sticks, etc) when those are not connected. Catcli can just as easily index any arbitrary directories. See the [examples](#examples) for an overview of the available features. --- **Table of Contents** * [Installation](#installation) * [Usage](#usage) * [Index data](#index-data) * [Index archive files](#index-archive-files) * [Walk indexed files with ls](#walk-indexed-files-with-ls) * [Find files](#find-files) * [Mount catalog](#mount-catalog) * [Display entire hierarchy](#display-entire-hierarchy) * [Catalog graph](#catalog-graph) * [Edit storage](#edit-storage) * [Update catalog](#update-catalog) * [CSV format](#csv-format) * [Examples](#examples) * [Contribution](#contribution) * [Thank you](#thank-you) # Installation Install from Pypi ```bash $ pip3 install catcli --user ``` Or from github directly ```bash $ cd /tmp; git clone https://github.com/deadc0de6/catcli && cd catcli $ sudo python3 setup.py install $ catcli --help ``` To work with catcli without installing it, you can do the following ```bash $ cd /tmp; git clone https://github.com/deadc0de6/catcli && cd catcli $ pip3 install -r requirements.txt --user $ python3 -m catcli.catcli --help ``` or install it in a virtualenv ```bash $ cd /tmp; git clone https://github.com/deadc0de6/catcli && cd catcli $ virtualenv -p python3 env $ source env/bin/activate $ python setup.py install $ catcli --help ``` Catcli is also available on aur: https://aur.archlinux.org/packages/catcli-git/ # Usage Each indexed directory is stored in the catalog. Multiple directories can be indexed and they are all available through the command line interface of catcli. Five different types of entry are present in a catalog: * **top node**: this is the root of the hierarchy * **storage node**: this represents an indexed storage (a DVD, an external hard drive, an USB drive, some arbitrary directory, etc). * **dir node**: this is a directory * **file node**: this is a file * **archive node**: this is a file contained in an archive (tar, zip, etc) ## Index data Let's say the DVD or external hard drive that needs to be indexed is mounted on `/media/mnt`. The following command will index the entire directory `/media/mnt` and store that in your catalog under the name `<short-name>`. ```bash $ catcli index --meta=<some-description> <short-name> /media/mnt ``` If not specified otherwise (with the switch `--catalog`), the catalog is saved in the current directory under `catcli.catalog`. The `--meta` switch allows to add any additional information to store along in the catalog like for example `the blue disk in my office`. Catcli will calculate and store the total size of each node (directories, storages, etc) unless the `-n --no-subsize` switch is used. Using the `-a --archive` switch allows to also index archive files as explained [below](#index-archive-files). ## Index archive files Catcli is able to index and explore the content of archive files. Following archive formats are supported: *tar*, *tar.gz*, *tar.xz*, *lzma*, *tar.bz2*, *zip*. Catcli is also able to find files within indexed archive files. See the [archive example](#archive-example) for more. ## Walk indexed files with ls A catalog can be walked using the command `ls` as if the media is mounted (File/directories separator is `/`). ```bash $ catcli ls tmp/a/b/c ``` Resulting files can be sorted by size using `-S --sortsize`. See the [examples](#examples) for more. ## Find files Files and directories can be found based on their names using the `find` command. `Find` support two formats that allow to use `fzf` for searching: * `--format=fzf-native`: display the result in native format * `--format=fzf-csv`: display the result in csv See the [examples](#examples) for more. ## Mount catalog The catalog can be mounted with [fuse](https://www.kernel.org/doc/html/next/filesystems/fuse.html) and navigate like any filesystem. ```bash $ mkdir /tmp/mnt $ catcli index -c github .github $ catcli mount /tmp/mnt $ ls -laR /tmp/mnt drwxrwxrwx - user 8 Mar 22:08 github mnt/github: .rwxrwxrwx 17 user 19 Oct 2022 FUNDING.yml drwxrwxrwx - user 2 Mar 10:15 workflows mnt/github/workflows: .rwxrwxrwx 691 user 19 Oct 2022 pypi-release.yml .rwxrwxrwx 635 user 8 Mar 21:08 testing.yml ``` ## Display entire hierarchy The entire catalog can be shown using the `ls -r` command. Resulting files can be sorted by size using the `-S --sortsize` switch. See the [examples](#examples) for more. ## Catalog graph The catalog can be exported in a dot file that can be used to generate a graph of the indexed files. ```bash $ catcli graph dot file created under "/tmp/catcli.dot" create graph with "dot /tmp/catcli.dot -T png -o /tmp/tree.png" (you need graphviz) $ dot /tmp/catcli.dot -T png -o /tmp/tree.png ``` ## Edit storage Storage entry can be edited with following catcli commands: * `rename` - rename the storage * `edit` - edit storage metadata ## Update catalog The catalog can be updated with the `update` command. Updates are based on the access time of each of the files and on the hash checksum if present (catalog was indexed with `-c --hash` and `update` is called with the switch `-c --hash`). ## CSV format Results can be printed to CSV using `--format=csv`. Fields are separated by a comma (`,`) and are quoted with double quotes (`"`). Each line contains the following fields: * **name**: the entry name * **type**: the entry type (file, directory, storage, etc) * **path**: the entry path * **size**: the entry size * **indexed_at**: when this entry was indexed * **maccess**: the entry modification date/time * **md5**: the entry checksum (if any) * **nbfiles**: the number of children (empty for nodes that are not storage or directory) * **free_space**: free space (empty for not storage nodes) * **total_space**: total space (empty for not storage nodes) * **meta**: meta information (empty for not storage nodes) # Examples ## Simple example Let's first create some files and directories: ```bash $ mkdir -p /tmp/test/{a,b,c} $ echo 'something in files in a' > /tmp/test/a/{1,2,3} $ echo 'something else in files in b' > /tmp/test/b/{4,5,6} $ echo 'some bytes' > /tmp/test/c/{7,8,9} $ tree /tmp/test /tmp/test ├── a │   ├── 1 │   ├── 2 │   └── 3 ├── b │   ├── 4 │   ├── 5 │   └── 6 └── c ├── 7 ├── 8 └── 9 3 directories, 9 files ``` First this directory is indexed with `catcli` as if it was some kind of external storage: ```bash $ catcli index --meta='my test directory' tmptest /tmp/test ``` Catcli creates its catalog file in the current directory as `catcli.catalog`. Printing the entire catalog as a tree is done with the command `ls -r` ``` $ catcli ls -r top └── storage: tmptest (my test directory) (nbfiles:3, free:3.7G/3.7G, date:2019-01-26 19:59:47) ├── a [nbfiles:3, totsize:72] │ ├── 1 [size:24] │ ├── 2 [size:24] │ └── 3 [size:24] ├── b [nbfiles:3, totsize:87] │ ├── 4 [size:29] │ ├── 5 [size:29] │ └── 6 [size:29] └── c [nbfiles:3, totsize:33] ├── 7 [size:11] ├── 8 [size:11] └── 9 [size:11] ``` The catalog can be walked with `ls` as if it was a normal directory ``` $ catcli ls top - storage: tmptest (my test directory) (nbfiles:3, free:3.7G/3.7G, date:2019-01-26 19:59:47) $ catcli ls tmptest storage: tmptest (my test directory) (nbfiles:3, free:3.7G/3.7G, date:2019-01-26 19:59:47) - a [nbfiles:3, totsize:72] - b [nbfiles:3, totsize:87] - c [nbfiles:3, totsize:33] $ catcli ls tmptest/b b [nbfiles:3, totsize:87] - 4 [size:29] - 5 [size:29] - 6 [size:29] ``` And files can be found using the command `find` ```bash $ catcli find 9 c/9 [size:11, storage:tmptest] ``` When using the `-b --script` switch, a one-liner is generated that allows to handle the found file(s) ``` $ catcli find 9 --script c/9 [size:11, storage:tmptest] op=file; source=/media/mnt; $op ${source}/c/9 ``` ## Archive example Let's consider a directory containing archive files: ```bash $ ls -1 /tmp/catcli catcli-0.3.1 v0.3.1.tar.gz v0.3.1.zip ``` To enable the indexing of archive contents use the `-a --archive` switch ```bash $ catcli index -au some-name /tmp/catcli ``` Then any command can be used to explore the catalog as for normal files but, by providing the `-a --archive` switch, archive content are displayed. ```bash $ catcli ls some-name storage: some-name (free:800G, total:1T) - catcli-0.3.1 [nbfiles:11, totsize:80.5K] - v0.3.1.tar.gz [size:24.2K] - v0.3.1.zip [size:31.2K] $ catcli ls -r some-name/v0.3.1.zip v0.3.1.zip [size:31.2K] $ catcli ls -ar some-name/v0.3.1.zip v0.3.1.zip [size:31.2K] ├── catcli-0.3.1 [archive:v0.3.1.zip] │ ├── catcli [archive:v0.3.1.zip] │ │ ├── __init__.py [archive:v0.3.1.zip] │ │ ├── catalog.py [archive:v0.3.1.zip] │ │ ├── catcli.py [archive:v0.3.1.zip] │ │ ├── logger.py [archive:v0.3.1.zip] │ │ ├── noder.py [archive:v0.3.1.zip] │ │ ├── utils.py [archive:v0.3.1.zip] │ │ └── walker.py [archive:v0.3.1.zip] │ ├── .gitignore [archive:v0.3.1.zip] │ ├── LICENSE [archive:v0.3.1.zip] │ ├── MANIFEST.in [archive:v0.3.1.zip] │ ├── README.md [archive:v0.3.1.zip] │ ├── requirements.txt [archive:v0.3.1.zip] │ ├── setup.cfg [archive:v0.3.1.zip] │ ├── setup.py [archive:v0.3.1.zip] │ ├── tests [archive:v0.3.1.zip] │ │ ├── __init__.py [archive:v0.3.1.zip] │ │ ├── helpers.py [archive:v0.3.1.zip] │ │ ├── test_find.py [archive:v0.3.1.zip] │ │ ├── test_graph.py [archive:v0.3.1.zip] │ │ ├── test_index.py [archive:v0.3.1.zip] │ │ ├── test_ls.py [archive:v0.3.1.zip] │ │ ├── test_rm.py [archive:v0.3.1.zip] │ │ └── test_tree.py [archive:v0.3.1.zip] │ ├── tests.sh [archive:v0.3.1.zip] │ └── .travis.yml [archive:v0.3.1.zip] └── catcli-0.3.1/ [archive:v0.3.1.zip] ``` # Contribution If you are having trouble installing or using catcli, open an issue. If you want to contribute, feel free to do a PR (please follow PEP8). The `tests.sh` script can be run to check the code. # Thank you If you like catcli, [buy me a coffee](https://ko-fi.com/deadc0de6). # License This project is licensed under the terms of the GPLv3 license.
PypiClean
/allennlp-light-1.0.0.tar.gz/allennlp-light-1.0.0/allennlp_light/modules/attention/bilinear_attention.py
import torch from torch.nn.parameter import Parameter from allennlp_light.modules.attention.attention import Attention from allennlp_light.nn import Activation @Attention.register("bilinear") class BilinearAttention(Attention): """ Computes attention between a vector and a matrix using a bilinear attention function. This function has a matrix of weights `W` and a bias `b`, and the similarity between the vector `x` and the matrix `y` is computed as `x^T W y + b`. Registered as an `Attention` with name "bilinear". # Parameters vector_dim : `int`, required The dimension of the vector, `x`, described above. This is `x.size()[-1]` - the length of the vector that will go into the similarity computation. We need this so we can build the weight matrix correctly. matrix_dim : `int`, required The dimension of the matrix, `y`, described above. This is `y.size()[-1]` - the length of the vector that will go into the similarity computation. We need this so we can build the weight matrix correctly. activation : `Activation`, optional (default=`linear`) An activation function applied after the `x^T W y + b` calculation. Default is linear, i.e. no activation. normalize : `bool`, optional (default=`True`) If true, we normalize the computed similarities with a softmax, to return a probability distribution for your attention. If false, this is just computing a similarity score. """ def __init__( self, vector_dim: int, matrix_dim: int, activation: Activation = None, normalize: bool = True, ) -> None: super().__init__(normalize) self._weight_matrix = Parameter(torch.Tensor(vector_dim, matrix_dim)) self._bias = Parameter(torch.Tensor(1)) self._activation = activation or Activation.by_name("linear")() self.reset_parameters() def reset_parameters(self): torch.nn.init.xavier_uniform_(self._weight_matrix) self._bias.data.fill_(0) def _forward_internal(self, vector: torch.Tensor, matrix: torch.Tensor) -> torch.Tensor: intermediate = vector.mm(self._weight_matrix).unsqueeze(1) return self._activation(intermediate.bmm(matrix.transpose(1, 2)).squeeze(1) + self._bias)
PypiClean
/memecomplete_desktop-0.2-py3-none-any.whl/memecomplete_desktop-0.2.dist-info/DESCRIPTION.rst
Unix: |Unix Build Status| Windows: |Windows Build Status|\ Metrics: |Coverage Status| |Scrutinizer Code Quality|\ Usage: |PyPI Version| |PyPI Downloads| Overview ======== Desktop client for https://memecomplete.com. Setup ===== Requirements ------------ - Python 3.6+ - SpeechRecognition requirements: https://github.com/Uberi/speech_recognition#requirements - macOS: ``$ brew install flac portaudio swig`` Installation ------------ Install the client with pip: .. code:: sh $ pip install memecomplete-desktop or directly from the source code: .. code:: sh $ git clone https://github.com/jacebrowning/memecomplete-desktop.git $ cd memecomplete-desktop $ python setup.py install Usage ===== Launch the GUI from the command-line: .. code:: sh $ memecomplete .. |Unix Build Status| image:: http://img.shields.io/travis/jacebrowning/memecomplete-desktop/master.svg :target: https://travis-ci.org/jacebrowning/memecomplete-desktop .. |Windows Build Status| image:: https://img.shields.io/appveyor/ci/jacebrowning/memecomplete-desktop/master.svg :target: https://ci.appveyor.com/project/jacebrowning/memecomplete-desktop .. |Coverage Status| image:: http://img.shields.io/coveralls/jacebrowning/memecomplete-desktop/master.svg :target: https://coveralls.io/r/jacebrowning/memecomplete-desktop .. |Scrutinizer Code Quality| image:: http://img.shields.io/scrutinizer/g/jacebrowning/memecomplete-desktop.svg :target: https://scrutinizer-ci.com/g/jacebrowning/memecomplete-desktop/?branch=master .. |PyPI Version| image:: http://img.shields.io/pypi/v/memecomplete-desktop.svg :target: https://pypi.python.org/pypi/memecomplete-desktop .. |PyPI Downloads| image:: http://img.shields.io/pypi/dm/memecomplete-desktop.svg :target: https://pypi.python.org/pypi/memecomplete-desktop Revision History ================ 0.2 (2017/06/12) ---------------- - Updated API to use https://memecomplete.com. 0.1.1 (2016/09/14) ------------------ - Fixed image loading. 0.1 (2016/09/05) ---------------- - Initial release.
PypiClean
/VIP-yt-2022.7.26.tar.gz/VIP-yt-2022.7.26/VIP_yt/extractor/epicon.py
import re from .common import InfoExtractor from ..utils import ExtractorError class EpiconIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?epicon\.in/(?:documentaries|movies|tv-shows/[^/?#]+/[^/?#]+)/(?P<id>[^/?#]+)' _TESTS = [{ 'url': 'https://www.epicon.in/documentaries/air-battle-of-srinagar', 'info_dict': { 'id': 'air-battle-of-srinagar', 'ext': 'mp4', 'title': 'Air Battle of Srinagar', 'description': 'md5:c4de2013af9bc05ae4392e4115d518d7', 'thumbnail': r're:^https?://.*\.jpg$', } }, { 'url': 'https://www.epicon.in/movies/krit', 'info_dict': { 'id': 'krit', 'ext': 'mp4', 'title': 'Krit', 'description': 'md5:c12b35dad915d48ccff7f013c79bab4a', 'thumbnail': r're:^https?://.*\.jpg$', } }, { 'url': 'https://www.epicon.in/tv-shows/paapnaashini-ganga/season-1/vardaan', 'info_dict': { 'id': 'vardaan', 'ext': 'mp4', 'title': 'Paapnaashini Ganga - Season 1 - Ep 1 - VARDAAN', 'description': 'md5:f517058c3d0402398eefa6242f4dd6ae', 'thumbnail': r're:^https?://.*\.jpg$', } }, { 'url': 'https://www.epicon.in/movies/jayadev', 'info_dict': { 'id': 'jayadev', 'ext': 'mp4', 'title': 'Jayadev', 'description': 'md5:09e349eecd8e585a3b6466904f19df6c', 'thumbnail': r're:^https?://.*\.jpg$', } }] def _real_extract(self, url): id = self._match_id(url) webpage = self._download_webpage(url, id) cid = self._search_regex(r'class=\"mylist-icon\ iconclick\"\ id=\"(\d+)', webpage, 'cid') headers = {'content-type': 'application/x-www-form-urlencoded; charset=UTF-8'} data = f'cid={cid}&action=st&type=video'.encode() data_json = self._parse_json(self._download_json('https://www.epicon.in/ajaxplayer/', id, headers=headers, data=data), id) if not data_json['success']: raise ExtractorError(data_json['message'], expected=True) title = self._search_regex(r'setplaytitle=\"([^\"]+)', webpage, 'title') description = self._og_search_description(webpage) or None thumbnail = self._og_search_thumbnail(webpage) or None formats = self._extract_m3u8_formats(data_json['url']['video_url'], id) self._sort_formats(formats) subtitles = {} for subtitle in data_json.get('subtitles', []): sub_url = subtitle.get('file') if not sub_url: continue subtitles.setdefault(subtitle.get('lang', 'English'), []).append({ 'url': self._proto_relative_url(sub_url), }) return { 'id': id, 'formats': formats, 'title': title, 'description': description, 'thumbnail': thumbnail, 'subtitles': subtitles, } class EpiconSeriesIE(InfoExtractor): _VALID_URL = r'(?!.*season)https?://(?:www\.)?epicon\.in/tv-shows/(?P<id>[^/?#]+)' _TESTS = [{ 'url': 'https://www.epicon.in/tv-shows/1-of-something', 'playlist_mincount': 5, 'info_dict': { 'id': '1-of-something', }, }, { 'url': 'https://www.epicon.in/tv-shows/eco-india-english', 'playlist_mincount': 76, 'info_dict': { 'id': 'eco-india-english', }, }, { 'url': 'https://www.epicon.in/tv-shows/s/', 'playlist_mincount': 25, 'info_dict': { 'id': 's', }, }, { 'url': 'https://www.epicon.in/tv-shows/ekaant', 'playlist_mincount': 38, 'info_dict': { 'id': 'ekaant', }, }] def _real_extract(self, url): id = self._match_id(url) webpage = self._download_webpage(url, id) episodes = re.findall(r'ct-tray-url=\"(tv-shows/%s/[^\"]+)' % id, webpage) entries = [self.url_result('https://www.epicon.in/%s' % episode, ie=EpiconIE.ie_key()) for episode in episodes] return self.playlist_result(entries, playlist_id=id)
PypiClean
/formification-1.2.0-py3-none-any.whl/formulaic/static/admin/formulaic/ember-formulaic/node_modules/yargs/lib/usage.js
var cliui = require('cliui') var decamelize = require('decamelize') var wsize = require('window-size') module.exports = function (yargs, y18n) { var __ = y18n.__ var self = {} // methods for ouputting/building failure message. var fails = [] self.failFn = function (f) { fails.push(f) } var failMessage = null var showHelpOnFail = true self.showHelpOnFail = function (enabled, message) { if (typeof enabled === 'string') { message = enabled enabled = true } else if (typeof enabled === 'undefined') { enabled = true } failMessage = message showHelpOnFail = enabled return self } var failureOutput = false self.fail = function (msg) { if (fails.length) { fails.forEach(function (f) { f(msg) }) } else { // don't output failure message more than once if (!failureOutput) { failureOutput = true if (showHelpOnFail) yargs.showHelp('error') if (msg) console.error(msg) if (failMessage) { if (msg) console.error('') console.error(failMessage) } } if (yargs.getExitProcess()) { process.exit(1) } else { throw new Error(msg) } } } // methods for ouputting/building help (usage) message. var usage self.usage = function (msg) { usage = msg } var examples = [] self.example = function (cmd, description) { examples.push([cmd, description || '']) } var commands = [] self.command = function (cmd, description) { commands.push([cmd, description || '']) } self.getCommands = function () { return commands } var descriptions = {} self.describe = function (key, desc) { if (typeof key === 'object') { Object.keys(key).forEach(function (k) { self.describe(k, key[k]) }) } else { descriptions[key] = desc } } self.getDescriptions = function () { return descriptions } var epilog self.epilog = function (msg) { epilog = msg } var wrap = windowWidth() self.wrap = function (cols) { wrap = cols } var deferY18nLookupPrefix = '__yargsString__:' self.deferY18nLookup = function (str) { return deferY18nLookupPrefix + str } self.help = function () { normalizeAliases() var demanded = yargs.getDemanded() var options = yargs.getOptions() var keys = Object.keys( Object.keys(descriptions) .concat(Object.keys(demanded)) .concat(Object.keys(options.default)) .reduce(function (acc, key) { if (key !== '_') acc[key] = true return acc }, {}) ) var ui = cliui({ width: wrap, wrap: !!wrap }) // the usage string. if (usage) { var u = usage.replace(/\$0/g, yargs.$0) ui.div(u + '\n') } // your application's commands, i.e., non-option // arguments populated in '_'. if (commands.length) { ui.div(__('Commands:')) commands.forEach(function (command) { ui.div( {text: command[0], padding: [0, 2, 0, 2], width: maxWidth(commands) + 4}, {text: command[1]} ) }) ui.div() } // the options table. var aliasKeys = (Object.keys(options.alias) || []) .concat(Object.keys(yargs.parsed.newAliases) || []) keys = keys.filter(function (key) { return !yargs.parsed.newAliases[key] && aliasKeys.every(function (alias) { return (options.alias[alias] || []).indexOf(key) === -1 }) }) var switches = keys.reduce(function (acc, key) { acc[key] = [ key ].concat(options.alias[key] || []) .map(function (sw) { return (sw.length > 1 ? '--' : '-') + sw }) .join(', ') return acc }, {}) if (keys.length) { ui.div(__('Options:')) keys.forEach(function (key) { var kswitch = switches[key] var desc = descriptions[key] || '' var type = null if (~desc.lastIndexOf(deferY18nLookupPrefix)) desc = __(desc.substring(deferY18nLookupPrefix.length)) if (~options.boolean.indexOf(key)) type = '[' + __('boolean') + ']' if (~options.count.indexOf(key)) type = '[' + __('count') + ']' if (~options.string.indexOf(key)) type = '[' + __('string') + ']' if (~options.normalize.indexOf(key)) type = '[' + __('string') + ']' if (~options.array.indexOf(key)) type = '[' + __('array') + ']' var extra = [ type, demanded[key] ? '[' + __('required') + ']' : null, options.choices && options.choices[key] ? '[' + __('choices:') + ' ' + self.stringifiedValues(options.choices[key]) + ']' : null, defaultString(options.default[key], options.defaultDescription[key]) ].filter(Boolean).join(' ') ui.span( {text: kswitch, padding: [0, 2, 0, 2], width: maxWidth(switches) + 4}, desc ) if (extra) ui.div({text: extra, padding: [0, 0, 0, 2], align: 'right'}) else ui.div() }) ui.div() } // describe some common use-cases for your application. if (examples.length) { ui.div(__('Examples:')) examples.forEach(function (example) { example[0] = example[0].replace(/\$0/g, yargs.$0) }) examples.forEach(function (example) { ui.div( {text: example[0], padding: [0, 2, 0, 2], width: maxWidth(examples) + 4}, example[1] ) }) ui.div() } // the usage string. if (epilog) { var e = epilog.replace(/\$0/g, yargs.$0) ui.div(e + '\n') } return ui.toString() } // return the maximum width of a string // in the left-hand column of a table. function maxWidth (table) { var width = 0 // table might be of the form [leftColumn], // or {key: leftColumn}} if (!Array.isArray(table)) { table = Object.keys(table).map(function (key) { return [table[key]] }) } table.forEach(function (v) { width = Math.max(v[0].length, width) }) // if we've enabled 'wrap' we should limit // the max-width of the left-column. if (wrap) width = Math.min(width, parseInt(wrap * 0.5, 10)) return width } // make sure any options set for aliases, // are copied to the keys being aliased. function normalizeAliases () { var demanded = yargs.getDemanded() var options = yargs.getOptions() ;(Object.keys(options.alias) || []).forEach(function (key) { options.alias[key].forEach(function (alias) { // copy descriptions. if (descriptions[alias]) self.describe(key, descriptions[alias]) // copy demanded. if (demanded[alias]) yargs.demand(key, demanded[alias].msg) // type messages. if (~options.boolean.indexOf(alias)) yargs.boolean(key) if (~options.count.indexOf(alias)) yargs.count(key) if (~options.string.indexOf(alias)) yargs.string(key) if (~options.normalize.indexOf(alias)) yargs.normalize(key) if (~options.array.indexOf(alias)) yargs.array(key) }) }) } self.showHelp = function (level) { level = level || 'error' console[level](self.help()) } self.functionDescription = function (fn) { var description = fn.name ? decamelize(fn.name, '-') : __('generated-value') return ['(', description, ')'].join('') } self.stringifiedValues = function (values, separator) { var string = '' var sep = separator || ', ' var array = [].concat(values) if (!values || !array.length) return string array.forEach(function (value) { if (string.length) string += sep string += JSON.stringify(value) }) return string } // format the default-value-string displayed in // the right-hand column. function defaultString (value, defaultDescription) { var string = '[' + __('default:') + ' ' if (value === undefined && !defaultDescription) return null if (defaultDescription) { string += defaultDescription } else { switch (typeof value) { case 'string': string += JSON.stringify(value) break case 'object': string += JSON.stringify(value) break default: string += value } } return string + ']' } // guess the width of the console window, max-width 80. function windowWidth () { return wsize.width ? Math.min(80, wsize.width) : null } // logic for displaying application version. var version = null self.version = function (ver, opt, msg) { version = ver } self.showVersion = function () { if (typeof version === 'function') console.log(version()) else console.log(version) } return self }
PypiClean
/variant_tools-3.1.3-cp38-cp38-macosx_10_9_x86_64.whl/variant_tools/pipeline.py
import bz2 import collections import copy import csv import glob import gzip import hashlib import logging import os import pipes import pprint import random import re import shlex import shutil import subprocess import sys import tarfile import tempfile import threading import time import traceback import zipfile from collections import MutableMapping from itertools import combinations, tee from multiprocessing import Process from .project import PipelineDescription, Project from .ucsctools import showTrack from .utils import (TEMP, FileInfo, ProgressBar, RuntimeFiles, calculateMD5, decompressGzFile, delayedAction, downloadFile, downloadURL, env, existAndNewerThan, substituteVars, typeOfValues, validFieldName, which) class NamedList: '''This class implements a named list to assist users in inputting a large number of items to a pipeline. Because these lists are often stored in a text or excel file and are associated with a name or some other meta information, users can input all them in the format of three ':' seprated parts 1. name (optional): name of the list. 2. values: values can be either a space or comma separated list such as A1 A2 ... or A1,A2,A3,... or a field in a file in the format of fieldname@filename?query. Fieldname should be one or more fields from the file joined with a non-alphabetical character (e.g. '+'). Filename should be a file in .csv, text (tab delimited) or EXCEL file. Query is a query that can limit the items from the retrieved list. A query can consist other fields in the input data file with the entire file treated as a retional database. 3. meta (optional): meta information for the list, which can be extra comment, weight (relative to other list), and others, must be in the format of comma separated name=value Examples: [email protected] A column called phenotype from "phenotype.xls". Age will be treated as name of the list. deceased:[email protected] A list with name "deceased" retrieved from column "dead" of "phenotype.xls". id1,id2,id3,id4 A list of IDs. [email protected]?recurrance=="1" get a list of IDs (column IDs) from "phenotype.xls" where the recurrance column has value "1" [email protected]:weight==1000 A named list with meta information 1000 The named list has attribute name Name of the list, default to "" unless default_name is given. items A list of items. meta Optional meta information as a dictionary. Default to {}. ''' def __init__(self, value_string, default_name=""): self.name = default_name self.items = [] self.meta = {} # if it is None etc if value_string is None: return if not isinstance(value_string, str): if len(value_string) == 1: value_string = value_string[0] else: # if multiple items are passed, treat directly # as list of strings. self.items = value_string return # self._parse(value_string, default_name) def _parse(self, value_string, default_name): if not value_string: return # single space separated string if re.match('([\w\d-]+\s+)+[\w\d-]+', value_string): self.items = value_string.split() return # # comma separated named list matched = re.match( '^([\w\d-]+:)*((([\w\d-]+,)+[\w\d-]+)|([^:]*)@([^:?]*)(\?([^:]*))*)(:([^:]+))*$', value_string) if matched is None: raise ValueError(( '"{}" is not a valid named list / query string, which should be name (optional) + comma separated list or ' 'colname@filename with optional query string (?), with optional meta. Three parts should be separated by :.' ).format(value_string)) name = matched.group(1) comma_list = matched.group(3) colname = matched.group(5) filename = matched.group(6) query = matched.group(8) meta = matched.group(10) if name is not None: self.name = name.rstrip(':') if meta is not None: matched = re.match( '[\d\w_]+\s*=\s*[^=,]*(,[\d\w_]+\s*=\s*[^=,]*)*$', meta) if not matched: raise ValueError( 'Meta information is not in the format of key=value: {}' .format(meta)) self.meta = { x.split('=')[0].strip(): x.split('=')[1].strip() for x in meta.split(',') } # if comma_list is not None: self.items = comma_list.split(',') else: import pandas as pd filename = os.path.expanduser(filename) if not os.path.isfile(filename): raise ValueError('File does not exist: {}'.format(filename)) # pandas can be slow to import if filename.endswith('.csv'): data = pd.read_csv(filename) else: data = pd.read_excel(filename) # convert everything to str, however, there are cases where 1 is converted to 1.0 when missing value # is present (na is considered float, thus the behavior) data = data.applymap(str) env.logger.trace("{} records are loaded from {}".format( data.shape[0], filename)) # env.logger.trace(data) # # if query? if query is not None: if re.match('.*[\d\w_]+\s*=\s*[\d\w_]+.*', query): raise ValueError( 'Syntax "a=b" is not allowed. Please use "a==b" instead: {}' .format(query)) try: pre_filter = data.shape[0] data = data.query(query) if pre_filter != data.shape[0]: env.logger.info( '{} out of {} records are removed by filter {}' .format(pre_filter - data.shape[0], pre_filter, query)) except Exception as e: raise ValueError( 'Failed to apply query "{}" to data file {}: {}'.format( query, filename, e)) # values = None for col in re.split('([^\w\d_])', colname): if re.match('[^\w\d_]', col): if values is None: raise ValueError( 'Leading non-ascii word is not allowed. {}'.format( colname)) else: values = [x + col for x in values] continue if col not in data.columns: raise ValueError( 'File {} does not have column {}. Available columns are {}' .format(filename, col, ', '.join(list(data.columns)))) if values is None: values = list(data[col].fillna('')) else: values = [ x + y for x, y in zip(values, data[col].fillna('')) ] # self.items = values if self.name == default_name: self.name = colname def rvec(vec): return 'c(' + ','.join([repr(x) for x in vec]) + ')' class SkipIf: '''An input emitter that skips the step (does not pass any input to the action) if certain condition is met. The input will be passed directly as output by default. This emitter is equivalent to ``EmitInput(select=not cond, pass_unselected)`` ''' def __init__(self, cond=None, pass_unselected=True): '''Does not emit input and skip the step if cond is ``True``. Parameters: cond (boolean): A boolean value True or False. In practice ``cond`` is usually a lambda function that checks the existence of a file or value of a pipeline variable. pass_unselected (boolean): Pass input files to output if ``cond`` is not met. ''' self.cond = cond self.pass_unselected = pass_unselected def __call__(self, ifiles, pipeline=None): if self.cond: return [], [], ifiles else: return [ifiles], [{}], [] class EmitInput: '''An input emitter that emits input files individually, in pairs, or altogether.''' def __init__(self, group_by='all', select=True, skip=False, pass_unselected=True, labels=None, for_each=None, loop_labels=None): '''Select input files of certain types, group them, and send input files to action. Selection criteria can be True (all input file types, default), 'False' (select no input file, but an empty list will still be passed to pipeline action), 'fastq' (check content of files), or one or more file extensions (e.g. ['.sam', '.bam']). Eligible files are by default sent altogether (group_by='all') to action (${INPUT} equals ${INPUT#} where # is the index of step, but can also be sent individually (group_by='single', ${INPUT} equals to a list of a single file) or in pairs (group_by='paired', e.g. filename_1.txt and filename_2.txt), pairwise for (a0, a1), (a1, a2), (a2, a3) ..., or combinations for all combination of different input files. Unselected files are by default passed directly as output of a step. If skip is set to True, the whole step will be skipped. If labels is set to the name of one or more pipeline variable of type list, values of these variables will be matched to each input file name, and variables of name _${labels} will be set for each emitted group of files (so that they are always list with the same length as ${input}. If for_each is set to the name of one or more pipeline variables, the output will be repeated for each item of this variable, with _${for_each} set to each item. for_each='A,B' is equivalent to for _A,_B in zip(VARS[A], VARS[B]), and for_each=['A', 'B'] is equivalent to nested loop of _A in VARS[A] and _B in VARS[B]. ''' self.group_by = group_by # if labels is None or not labels: self.labels = [] elif isinstance(labels, str): self.labels = [labels] elif isinstance(labels, list): self.labels = labels else: raise ValueError( 'Unacceptable value for parameter labels: {}'.format(labels)) # if for_each is None or not for_each: self.for_each = [] elif isinstance(for_each, str): self.for_each = [for_each] elif isinstance(for_each, list): self.for_each = for_each else: raise ValueError( 'Unacceptable value for parameter for_each: {}'.format( for_each)) # if loop_labels is None or not loop_labels: self.loop_labels = [] elif isinstance(loop_labels, str): self.loop_labels = [loop_labels] elif isinstance(loop_labels, list): self.loop_labels = loop_labels else: raise ValueError( 'Unacceptable value for parameter loop_labels: {}'.format( loop_labels)) # if type(select) == str: if select not in ['fastq', 'bam', 'sam' ] and not str(select).startswith('.'): raise ValueError( "Value to option select can only be True/False, " "'fastq', or a file extension with leading '.': '{}' provided." .format(select)) self.select = [select] elif select in [True, False]: self.select = select else: for s in select: if s not in ['fastq', 'bam', 'sam' ] and not str(s).startswith('.'): raise ValueError( "Value to option select can only be True/False, " "'fastq', or a file extension with leading '.': '{}' provided." .format(s)) self.select = select self.skip = skip self.pass_unselected = pass_unselected def _isFastq(self, filename): try: if not os.path.isfile(filename) and not os.path.isfile( filename + '.file_info'): raise RuntimeError('File not found') fl = FileInfo(filename).firstline() if fl is None: env.logger.info( 'Cannot detect the type of file because the {} has been removed.' .format(filename)) return filename.lower().split('.')[-1] not in [ 'bam', 'sam', 'gz', 'zip' ] if not fl.startswith('@'): return False if filename.endswith('.gz'): env.logger.warning( '{}: compressed fastq file might not be ' 'acceptable to downstream analysis.'.format(filename)) except Exception as e: env.logger.debug('Input file {} is not in fastq format: {}'.format( filename, e)) return False return True def _is_paired(self, f1, f2, at=None): if len(f1) != len(f2): return False if f1 >= f2: return False diffs = [x != y for x, y in zip(f1, f2)] if sum(diffs) != 1: return False diff_at = diffs.index(True) if sorted([f1[diff_at], f2[diff_at]]) != ['1', '2']: return False if at is not None and diff_at not in at: return False return True def _pairByReadNames(self, selected, unselected): # we should pair files by actual read names read_map = {} for filename in selected: read = FileInfo(filename).firstline().strip() if read[:-1] in read_map: if read.endswith('1'): if read_map[read[:-1]][0] is not None: raise RuntimeError( 'Fastq file {} has the same first read as {}' .format(filename, read_map[read[:-1]][0])) else: read_map[read[:-1]][0] = filename elif read.endswith('2'): if read_map[read[:-1]][1] is not None: raise RuntimeError( 'Fastq file {} has the same first read as {}' .format(filename, read_map[read[:-1]][1])) else: read_map[read[:-1]][1] = filename else: raise RuntimeError( 'Fastq file {} is not paired because its read name does ' 'not end with 1 or 2'.format(filename)) else: if read.endswith('1'): read_map[read[:-1]] = [filename, None] elif read.endswith('2'): read_map[read[:-1]] = [None, filename] else: raise RuntimeError( 'Fastq file {} is not paired because its read name does ' 'not end with 1 or 2'.format(filename)) # now, let us go through files pairs = [] for read, filenames in read_map.items(): if filenames[0] is None: raise RuntimeError( 'Fastq file {} is not paired (no matching read is found)' .format(filenames[0])) elif filenames[1] is None: raise RuntimeError( 'Fastq file {} is not paired (no matching read is found)' .format(filenames[1])) else: if not self._is_paired(filenames[0], filenames[1]): env.logger.warning( '{} and {} contain paired reads but the filenames ' 'do not follow illumina filename convention'.format( filenames[0], filenames[1])) pairs.append(filenames) return sorted(pairs), unselected def _pairByFileName(self, selected, unselected): # # there is a possibility that one name differ at multiple parts # with another name. e.g # # A1_TAGCTT_L007_R1_001.fastq.gz # # differ with the following two names by a number # # A1_TAGCTT_L007_R1_002.fastq.gz # A1_TAGCTT_L007_R2_001.fastq.gz # # the code below tries to find good pairs first, then use matched # locations to pair others if not selected: env.logger.warning( 'No file matching type "{}" is selected for pairing.'.format( self.select)) return [], unselected all_pairs = [ [x, y] for x in selected for y in selected if self._is_paired(x, y) ] unpaired = [x for x in selected if not any([x in y for y in all_pairs])] if unpaired: raise ValueError('Failed to pair input filenames: {} is not paired' 'with any other names.'.format( ', '.join(unpaired))) uniquely_paired = [ x for x in selected if sum([x in y for y in all_pairs]) == 1 ] # if some filenames are uniquely paired, we can use them to identify # index locations. if uniquely_paired: pairs = [ x for x in all_pairs if x[0] in uniquely_paired or x[1] in uniquely_paired ] if len(pairs) != all_pairs: # find the differentiating index of existing pairs diff_at = set([[i != j for i, j in zip(x[0], x[1])].index(True) for x in pairs]) # use the diff_at locations to screen the rest of the pairs pairs.extend([ x for x in all_pairs if x not in pairs and self._is_paired(x[0], x[1], diff_at) ]) # if len(pairs) * 2 != len(selected): unpaired = [ x for x in selected if not any([x in y for y in pairs]) ] raise ValueError('Failed to pair input files because they ' 'match multiple filenames: {}'.format( ', '.join(unpaired))) return sorted(pairs), unselected else: # all filenames match to multiple names, so we try to get all # differentiating indexes and see if one of them can pair filenames # perfectly. We start from the end because we assume that _1 _2 # are close to the end of filenames. # diff_at = set([[i != j for i, j in zip(x[0], x[1])].index(True) for x in all_pairs]) acceptable_diff_at = [] for d in diff_at: # try to pair all names at this location. pairs = [ x for x in all_pairs if self._is_paired(x[0], x[1], [d]) ] if len(pairs) * 2 != len(selected): continue # all filename should appear once and only once if not all( [sum([x in y for y in pairs]) == 1 for x in selected]): continue acceptable_diff_at.append(d) # fortunately, only one perfect pairing is found if len(acceptable_diff_at) == 1: pairs = [ x for x in all_pairs if self._is_paired(x[0], x[1], acceptable_diff_at) ] return sorted(pairs), unselected elif len(acceptable_diff_at) > 1: env.logger.warning( 'There are {} ways to match all filenames ' 'perfectly. The one using a latter differentiating index ' 'is used.'.format(len(acceptable_diff_at))) diff_at = sorted(list(acceptable_diff_at))[-1] pairs = [ x for x in all_pairs if self._is_paired(x[0], x[1], [diff_at]) ] return sorted(pairs), unselected else: raise ValueError( 'All filenames match multiple names but no differentiating ' 'index can pair filenames perfectly.') def get_groups(self, ifiles, pipeline=None): if self.skip: return [], ifiles selected = [] unselected = [] for filename in ifiles: match = False if self.select is True: match = True elif self.select is False: pass else: # list of types for t in self.select: if t == 'fastq': if self._isFastq(filename): match = True break if filename.lower().endswith('.' + t.lstrip('.').lower()): match = True break # if match: selected.append(filename) elif self.pass_unselected: unselected.append(filename) # # for this special case, the step is skipped if self.group_by == 'single': return [[x] for x in selected], unselected elif self.group_by == 'all': return [selected], unselected elif self.group_by == 'paired': if 'fastq' in self.select: try: return self._pairByReadNames(selected, unselected) except Exception as e: # if failed to pair by read name, pair by filenames env.logger.warning( 'Failed to pair fastq files by read names. ' 'Trying to pair files by filenames: {}'.format(e)) else: # this should not happen becase we do not need to pair non-fastq files # at this point, but I will leave the code here anyway. env.logger.warning( 'It is unsafe to pair input files by names instead of ' 'their content. Please add option select="fastq" if you need to ' 'pair input fastq files') return self._pairByFileName(selected, unselected) elif self.group_by == 'pairwise': f1, f2 = tee(selected) next(f2, None) return [list(x) for x in zip(f1, f2)], unselected elif self.group_by == 'combinations': return [list(x) for x in combinations(selected, 2)], unselected def __call__(self, ifiles, pipeline=None): selected_groups, unselected = self.get_groups(ifiles, pipeline) set_vars = [{} for x in selected_groups] for wv in self.labels: values = pipeline.VARS[wv] if not isinstance(values, list): raise ValueError( 'with_var variable {} is not a list ("{}")'.format( wv, values)) if len(values) != len(ifiles): raise ValueError( 'Length of variable {} (length {}) should match the number of input files (length {}).' .format(wv, len(values), len(ifiles))) file_map = {x: y for x, y in zip(ifiles, values)} #env.logger.error('Paring {}'.format(file_map)) for idx, val in enumerate(values): set_vars[idx]['_' + wv] = [file_map[x] for x in selected_groups[idx]] for fe_all in self.for_each: loop_size = None for fe in fe_all.split(','): values = pipeline.VARS[fe] if not isinstance(values, list): raise ValueError( 'for_each variable {} is not a list ("{}")'.format( fe, values)) if loop_size is None: loop_size = len(values) elif loop_size != len(values): raise ValueError( 'Length of variable {} (length {}) should match the length of variable {} (length {}).' .format(fe, len(values), fe_all.split(',')[0], loop_size)) # expand selected_groups = selected_groups * loop_size tmp = [] for vidx in range(loop_size): for idx in range(len(set_vars)): for fe in fe_all.split(','): set_vars[idx]['_' + fe] = pipeline.VARS[fe][vidx] tmp.extend(copy.deepcopy(set_vars)) set_vars = tmp # env.logger.trace('SELECTED GROUPS {}\nVARS {}\nUNSELECTED {}' # .format( selected_groups, set_vars, unselected)) return selected_groups, set_vars, unselected class PipelineAction: '''Base class for all pipeline actions. If one or more output files are specified, the pipeline will record the runtime signature of this action in a file ``${cache_dir}/path/to/$OUTPUT[0].exe_info``, which consists of the MD5 signature of input and output files, command used, and additional information such as start and end time of execution, standard and error outputs. An action will be skipped if the action is re-run with the same input, output and command. NOTE: The ``__call__`` function or a pipeline action implements the runtime signature feature and calls function ``execute`` for actual work. User-defined actions should either override the ``__call__`` function (without the runtime signature feature) or function ``_execute(self, ifiles, pipeline)`` (with runtime signature feature). User can also define function ``_bypass(self, ifiles, pipeline)`` if the step is bypassed due to identical execution signatures. ''' def __init__(self, cmd='', output=[]): ''' Parameters: cmd (string or list of strings): one or more commands to be executed. It should capture the name and all options used by the command. output (string or list of strings): Output files. If at least one output file is specified, the runtime signature of this action will be saved to $output[0].exe_info. The output directory will be created if it does not exist. ''' # multiple command is not allowed. if not cmd: self.cmd = [] elif isinstance(cmd, str): self.cmd = [' '.join(cmd.split('\n'))] else: self.cmd = [' '.join(x.split('\n')) for x in cmd] # if not output: self.output = [] elif isinstance(output, str): self.output = [output] else: self.output = output # self.runtime = RuntimeFiles(self.output) def _bypass(self, ifiles, pipeline=None): '''Function called by ``__call__`` if the step is bypassed due to identical execution signature. This function can be used, for example, to set pipeline variable even when the step is not executed.''' return True def _execute(self, ifiles, pipeline=None): '''Function called by ``__call__`` for actual action performed on ifiles. A user-defined action should re-define __call__ or this function. This funciton should return ``True`` if the action is completed successfully, ``False`` for pending (signature will be written later, and raise an exception for errors. ''' raise RuntimeError( 'Please define your own execute function in an derived class of PipelineAction.' ) def _write_info(self, pipeline=None): if not self.output: return with open(self.runtime.proc_info, 'a') as exe_info: exe_info.write('#End: {}\n'.format(time.asctime(time.localtime()))) for f in self.output: if not os.path.isfile(f): raise RuntimeError( 'Output file {} does not exist after completion of the job.' .format(f)) # for performance considerations, use partial MD5 exe_info.write('{}\t{}\t{}\n'.format( f, os.path.getsize(f), calculateMD5(f, partial=True))) # write standard output to exe_info exe_info.write('\n\nSTDOUT\n\n') if os.path.isfile(self.runtime.proc_out): with open(self.runtime.proc_out) as stdout: for line in stdout: exe_info.write(line) # write standard error to exe_info exe_info.write('\n\nSTDERR\n\n') if os.path.isfile(self.runtime.proc_err): with open(self.runtime.proc_err) as stderr: for line in stderr: exe_info.write(line) # if command succeed, remove all out_ and err_ files, self.runtime.clear() def _useVars(self, text, pipeline_vars={}): # reduce extra newline, space etc text = ' '.join(text.split()) # # if there are pipeline vars, try to use pipeline vars to replace the # text if pipeline_vars: files_and_dirs = [] for key, item in pipeline_vars.items(): if isinstance(item, str) and (os.path.isfile(item) or os.path.isdir(item)): files_and_dirs.append([key, item]) # sort by length files_and_dirs = sorted(files_and_dirs, key=lambda x: -len(x[1])) # # try to subsitute pieces = text.split() for key, item in files_and_dirs: for idx, p in enumerate(pieces): if p.startswith(item) and (len(p) == len(item) or not (p[len(item)].isalpha() or p[len(item)].isdigit())): pieces[idx] = '${{{}}}{}'.format( key, pieces[idx][len(item):]) text = ' '.join(pieces) return text def __call__(self, ifiles, pipeline=None): '''Execute action with input files ``ifiles`` with runtime information stored in ``pipeline``. This function is called by the pipeline and calls user-defined ``execute`` function. Parameters: ifiles (string or list of strings): input file names pipeline (an pipeline object): An Pipeline object for which the action is executed. The action can set or retrieve runtime information from a dictionary ``pipeline.VARS``. Result: An action returns output files (parameter ``output`` of the action) if any output is given. Otherwise input files (``ifiles``) are passed through and returned. ''' if self.output: if os.path.isfile(self.runtime.proc_info): with open(self.runtime.proc_info) as exe_info: cmd = exe_info.readline().strip() if self._useVars(cmd, pipeline.VARS) == self._useVars('; '.join(self.cmd), pipeline.VARS) \ and existAndNewerThan(self.output, ifiles + pipeline.step_dependent_files, md5file=self.runtime.proc_info, pipeline=pipeline): env.logger.info('Reuse existing {}'.format(', '.join( self.output))) self._bypass(ifiles, pipeline) if self.output: return self.output else: return ifiles # create directory if output directory does not exist for d in [ os.path.split(os.path.abspath(x))[0] for x in self.output ]: if not os.path.isdir(d): try: os.makedirs(d) except Exception as e: raise RuntimeError( 'Failed to create directory {} for output file: {}' .format(d, e)) # We cannot ignore this step, but do we have all the input files? # If not, we will have to rewind the execution for ifile in ifiles: if not os.path.isfile(ifile): env.logger.warning( 'Rewind execution because input file {} does not exist.' .format(ifile)) raise RewindExecution(ifile) # if self.output: with open(self.runtime.proc_info, 'w') as exe_info: exe_info.write('{}\n'.format('; '.join(self.cmd))) exe_info.write('#Start: {}\n'.format( time.asctime(time.localtime()))) for f in ifiles + pipeline.step_dependent_files: # for performance considerations, use partial MD5 exe_info.write('{}\t{}\t{}\n'.format( f, os.path.getsize(f), calculateMD5(f, partial=True))) # now, run the job, write info if it is successfully finished. # Otherwise the job might be forked and it will record the signature by itself. ret = self._execute(ifiles, pipeline) if ret not in [True, False]: env.logger.warning( 'User defined execute function of a PipelineAction should return True or False' ) if ret: self._write_info(pipeline) # if self.output: return self.output else: return ifiles # for backward compatibility SkiptableAction = PipelineAction try: from .simulation import * hasSimuPOP = True except ImportError: hasSimuPOP = False class SequentialActions(PipelineAction): '''Define an action that calls a list of actions, specified by Action1, Action2 etc. This allows the specification of multiple small tasks in a single pipeline step. NOTE: this action is automatically applied if a list or tuple of actions are specified in the SPEC file (e.g. action=Action1(), Action2()). Examples: action=CheckCommands('bowtie'), CheckOutput('bowtie --version', '1.1.1') ''' def __init__(self, actions): ''' Parameters: actions (a tuple or list of actions): A list of actions that will be applied to ''' self.actions = [] for a in actions: if hasattr(a, '__call__'): self.actions.append(a.__call__) else: self.actions.append(a) def __call__(self, ifiles, pipeline=None): ''' Pass ifiles to the first action, take its output and pass it to the second action, and so on. Return the output from the last action as the result of this ``SequentialAction``. ''' for a in self.actions: # the input of the next action is the output of the # previous action. However, ${INPUT} is the same for all # options because it is substitute before ... ifiles = a(ifiles, pipeline) # return the output of the last action return ifiles class IfElse(PipelineAction): '''Define an action that runs an action with a given condition. ''' def __init__(self, cond, if_action=None, else_action=None): ''' Parameters: cond: a condition, with True, 'True' as true, empty string, False or 'False' as false. if_action: action if condition is met else_action: action if condition is not met ''' self.if_action = if_action self.else_action = else_action self.cond = cond def __call__(self, ifiles, pipeline=None): ''' Pass ifiles to the first action, take its output and pass it to the second action, and so on. Return the output from the last action as the result of this ``SequentialAction``. ''' if self.cond in [False, 'False', '']: if self.else_action is not None: return self.else_condition(ifiles, pipeline) else: if self.if_action is not None: return self.if_condition(ifiles, pipeline) # return the output of the last action return ifiles class CheckVariantToolsVersion(PipelineAction): '''Check the version of variant tools and determine if it is recent enough to execute the pipeline. File Flow: Input passthrough. INPUT ====> INPUT Raises: Fail if the version of variant tools used to execute the pipeline is older than the specified version. Examples: action=CheckVariantToolsVersion('2.5.0') ''' def __init__(self, version=''): ''' Parameters: version (string): Oldest version of variant tools that can be used to execute this pipeline ''' self.min_version = version PipelineAction.__init__(self) def __call__(self, ifiles, pipeline=None): vtools_version = [ int(x) for x in re.sub('\D', ' ', pipeline.VARS['vtools_version']).split() ] # e.g. minimal 2.2.0, vtools 2.1.1 if [int(x) for x in re.sub('\D', ' ', self.min_version).split() ] > vtools_version: raise RuntimeError( 'Version {} is required to execute this pipeline. ' 'Please upgrade your installation of variant tools (version {})' .format(self.min_version, pipeline.VARS['vtools_version'])) return ifiles class ImportModules(PipelineAction): '''Import functions and action from a Python module. This action passed input files to output and does not change the pipeline. File Flow: Input passthrough, but import symbols to pipeline. Pipeline ^ INPUT =============> INPUT Raises: Raise a RuntimeError if one or more modules can not be imported. Examples: action=ImportModules('DNASeq_tools.py') action=ImportModules(['DNASeq_tools.py', 'simuPOP.demography']) ''' def __init__(self, modules=[], script=''): '''Import one or more modules to be used by the existing pipeline. Parameters: module (string or list of strings): One or more module, which can be either the name of a system module or a .py file. In the latter case, Variant Tools will try to locate the file directly (a full path can be given), look for the module in the path of the pipeline (if a local pipeline is used), or download from the Variant Tools Repository under directory pipeline. script (string or list of strings): One or more in-line script that defines Python functions or customized actions that will be used in this pipeline. This allows users to define actions and utility functions that do not need to be shared with other pipelines but might be used repeatedly in this pipeline. Otherwise a ExecutePipelineFunction action can be used. ''' if isinstance(modules, str): self.modules = [modules] else: self.modules = modules # if isinstance(script, str): self.script = script else: self.script = '\n'.join(script) def __call__(self, ifiles, pipeline=None): for module in self.modules: # this is a path to a .py file if module.endswith('.py'): if os.path.isfile(module): pyfile = module # if the .py file locates in the same directory as the pipeline file elif pipeline is not None \ and os.path.isfile(os.path.join(os.path.split(pipeline.spec_file)[0], module)): pyfile = os.path.join( os.path.split(pipeline.spec_file)[0], module) else: # try to download it from online try: pyfile = downloadFile('simulation/{}'.format(module)) except Exception: try: pyfile = downloadFile('pipeline/{}'.format(module)) except Exception as e: raise ValueError( 'Failed to download required python module {}: {}' .format(module, e)) try: p, f = os.path.split( os.path.abspath(os.path.expanduser(pyfile))) sys.path.append(p) local_dict = __import__(f[:-3] if f.endswith('.py') else f, globals(), locals(), module.split('.', 1)[-1:]) env.logger.info( '{} symbols are imported form module {}'.format( len(local_dict.__dict__), module)) pipeline.GLOBALS.update(local_dict.__dict__) except Exception as e: raise RuntimeError('Failed to import module {}: {}'.format( module, e)) # now a system module else: try: # allow loading from current directory sys.path.append(os.getcwd()) local_dict = __import__(module, globals(), locals(), module.split('.', 1)[-1:]) env.logger.info( '{} symbols are imported form module {}'.format( len(local_dict.__dict__), module)) pipeline.GLOBALS.update(local_dict.__dict__) except ImportError as e: raise RuntimeError('Failed to import module {}: {}'.format( module, e)) # script if self.script: try: local_dict = {} exec(self.script, globals(), local_dict) env.logger.info( '{} symbols are imported form inline script'.format( len(local_dict))) pipeline.GLOBALS.update(local_dict) except Exception as e: raise RuntimeError('Failed to execute script "{}": {}'.format( self.script, e)) return ifiles class CheckCommands(PipelineAction): '''Check the existence of specified commands and raise an error if one of the commands does not exist. File Flow: Input passthrough. INPUT ====> INPUT Raises: A RuntimeError will be raised if a command is not found. Examples: action=CheckCommands('java') action=CheckCommands(['java', 'tophat2']) ''' def __init__(self, commands): ''' Parameters: commands (string or list of strings): Name of one of more commands to be checked. No option is allowed. ''' PipelineAction.__init__(self) if type(commands) == type(''): self.commands = [commands] else: self.commands = commands def __call__(self, ifiles, pipeline=None): for cmd in self.commands: if which(cmd) is None: raise RuntimeError( 'Command {} does not exist. Please install it and try again.' .format(cmd)) else: env.logger.info('Command {} is located.'.format(cmd)) return ifiles class CheckOutput(PipelineAction): '''Run a command and check if its output matches at least one of specified patterns. The pipeline will be terminated if failIfMismatch is set to True (default). Otherwise a warning message will be printed. File Flow: Input passthrough. INPUT ====> INPUT Raises: Raise a RuntimeError if the output of command does not match any of the patterns, if ``failIfMismatch`` is set to ``True``. Examples: action=CheckOutput('tophat2 --version', ['v2.0.13', 'v2.0.14']) # if strict_version is a command line parameter action=CheckOutput('samtools', '0.1.19', %(strict_version)s) ''' def __init__(self, command, patterns, failIfMismatch=True): ''' Parameters: command (string): A command (with or without options) patterns (string or list of strings): One or more patterns (usually a piece of version string) that will be compared to the output of ``command`` failIfMismatch (boolean): If set to ``True`` (default), the action will terminate the pipeline if the output of command does not match any of the patterns. Otherwise a warning message will be printed when the output of command does not match any of the patterns. ''' self.command = command if isinstance(patterns, str): self.patterns = [patterns] else: self.patterns = patterns self.fail = failIfMismatch PipelineAction.__init__(self) def __call__(self, ifiles, pipeline=None): try: # do not use subprocess.check_output because I need to get # output even when the command returns non-zero return code p = subprocess.Popen( self.command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) odata, edata = p.communicate() output = odata.decode() + edata.decode() env.logger.trace('Output of command "{}" is "{}"'.format( self.command, output)) except Exception as e: raise RuntimeError('Failed to execute command "{}": {}'.format( self.cmd, e)) # if all( [re.search(x, output, re.MULTILINE) is None for x in self.patterns ]): msg = ('Output of command "{}" ("{}") does not ' + 'match specified regular expression {}.').format( self.command, ' '.join(output[:40].split()), ' or '.join(self.patterns)) if self.fail: raise RuntimeError(msg) else: env.logger.warning(msg) return ifiles class CheckFiles(PipelineAction): '''Check the existence of specified files and raise an error if one of the files does not exist. File Flow: Input passthrough. INPUT ====> INPUT Raises: Raise a RuntimeError if any of the files is not found. Example: # assume gatk_path is a command line argument action=CheckFile('%(gatk_path)s/GenomeAnalysisTK.jar', 'Please point --gatk_path to a directory with GenomeAnalysisTK.jar') ''' def __init__(self, files, message=''): ''' Parameters: files (string or list of strings): One or more files to check. message (string): A message when one of the files cannot be found. ''' if type(files) == str: self.files = [files] else: self.files = files self.message = message PipelineAction.__init__(self) def __call__(self, ifiles, pipeline=None): for f in self.files: if os.path.isfile(os.path.expanduser(f)): env.logger.info('{} is located.'.format(f)) else: raise RuntimeError('Cannot locate {}: {}'.format( f, self.message)) return ifiles class CheckDirs(PipelineAction): '''Check the existence of specified directories and raise an error if one of the directories does not exist. File Flow: Input passthrough. INPUT ====> INPUT Raises: Raise a RuntimeError if any of the directories is not found. Example: action=CheckDirs('${cmd_output}', 'Value of parameter --output need to be an existing directory') ''' def __init__(self, dirs, message=''): ''' Parameters: files (string or list of strings): One or more directories to check. message (string): A message when one of the directories cannot be found. ''' if type(dirs) == str: self.dirs = [dirs] else: self.dirs = dirs self.message = message PipelineAction.__init__(self) def __call__(self, ifiles, pipeline=None): for d in self.dirs: if os.path.isdir(d): env.logger.info('Directory {} is located.'.format(d)) else: raise RuntimeError('Cannot locate directory {}. {}'.format( d, self.message)) return ifiles class TerminateIf(PipelineAction): '''Terminate a pipeline if a condition is not met. File Flow: Input passthrough. INPUT ====> INPUT Raises: A RuntimeError will be raised to terminate the pipeline if the condition is met. Examples: action=TerminateIf(not '${cmd_output}', 'No --output is specified.') ''' def __init__(self, cond, message=''): ''' Parameters: cond (boolean): True or False. In practice, ``cond`` is usually a lambda function that checks the existence of a file or value of a pipeline variable. message (string): A message to be outputted when the condition is met. ''' self.cond = cond self.message = message PipelineAction.__init__(self) def __call__(self, ifiles, pipeline=None): if self.cond: raise RuntimeError(self.message) return ifiles class WarnIf(PipelineAction): '''Send a warning message if a condition is not met. File Flow: Input passthrough. INPUT ====> INPUT Examples: action=WarnIf('%(LGD)' == 'NA', 'Default value of parameter --LGD is used.') ''' def __init__(self, cond, message): ''' Parameters: cond (boolean): True or False. In practice, ``cond`` is usually a lambda function that checks the existence of a file or value of a pipeline variable. message (string): A message to be outputted when the condition is met. ''' self.cond = cond self.message = message PipelineAction.__init__(self) def __call__(self, ifiles, pipeline=None): if self.cond: env.logger.warning(self.message) return ifiles class OutputText(PipelineAction): '''Write specified text to standard output, or a file if a filename is specified. The text can be a list of strings. A new line is added automatically to each line of the text. File Flow: Input passthrough. INPUT ====> INPUT Examples: action=OutputText('Hey, the biggest part is done.') ''' def __init__(self, text='', output=None, mode='a'): ''' Parameters: text (string or list of strings): Text to be written to output. output (a file name or None): Output files. The text will be written to standard output if no output is specified. mode (string): Mode to open file. 'a' for append and 'w' for overwrite. ''' if not isinstance(text, str): self.text = ''.join([str(x) + '\n' for x in text]) else: self.text = text + '\n' self.filename = output self.mode = mode PipelineAction.__init__(self, 'OutputText', self.filename if self.filename is not None else '') def __call__(self, ifiles, pipeline=None): if self.filename is not None: with open(self.filename, self.mode) as output: output.write(self.text) else: sys.stdout.write(self.text) return ifiles class FieldsFromTextFile(PipelineAction): '''Read a text file, guess its delimeter, field name (from header) and create field descriptions. If a vcf file is encountered, all fields will be exported. File Flow: extract format of input and output format. INPUT ==> Get Format ==> OUTPUT Raises: Raise a RuntimeError if this action failed to guess format (fields) from the input file. Examples: action=FieldsFromTextFile('format.txt') ''' def __init__(self, output): ''' Parameters: output: Output file that records the format of the input files. ''' PipelineAction.__init__(self, 'FieldsFromTextFile', output) def _execute(self, ifiles, pipeline=None): if len(ifiles) > 1: env.logger.warning( 'Only the format of the first input file would be outputted.') try: if ifiles[0].endswith('.vcf') or ifiles[0].endswith('.vcf.gz'): showTrack(ifiles[0], self.output[0]) else: with open(self.output[0], 'w') as fo: csv_dialect = csv.Sniffer().sniff( open(ifiles[0], 'rU').read(8192)) fo.write('delimiter="{}"\n\n'.format( csv_dialect.delimiter.replace('\t', r'\t'))) values = [] with open(ifiles[0], 'rU') as fi: reader = csv.reader(fi, dialect=csv_dialect) headers = reader.next() values = [[] for x in headers] for line in reader: for idx in range(len(headers)): values[idx].append(line[idx]) if len(values[0]) > 100: break # for idx, header in enumerate(headers): fo.write('[{}]\n'.format(validFieldName(header))) fo.write('index={}\n'.format(idx + 1)) fo.write('type={}\n\n'.format( typeOfValues(values[idx]))) except Exception as e: raise RuntimeError('Failed to guess fields from {}: {}'.format( ifiles[0], e)) # return True class RewindExecution(Exception): pass class NullAction(PipelineAction): '''A pipeline action that does nothing. This is usually used when the goal of the step is to change input, output, or assign variables to pipelines. The action will be assumed if an empty action line is given. File Flow: Input passthrough. INPUT ====> INPUT Example: action= action=NullAction() ''' def __init__(self, output=[], *args, **kwargs): '''A null action that does nothing.''' PipelineAction.__init__(self, cmd='NullAction', output=output) def _execute(self, ifiles, pipeline=None): return True class MonitorThread(threading.Thread): def __init__(self, group=None, target=None, name=None, args=(), kwargs={}, Verbose=None): threading.Thread.__init__(self, group, target, name, args, kwargs, Verbose) self._return = None def run(self): if self._Thread__target is not None: self._return = self._Thread__target(*self._Thread__args, **self._Thread__kwargs) def join(self): threading.Thread.join(self) return self._return class SharedProcess: def __init__(self, runtime): self.runtime = runtime def update_prog(self): while True: with open(self.runtime.proc_prog, 'w') as prog: prog.write(' ') time.sleep(30) def __enter__(self): # wait for the availability of lock prog_time = None while True: # there is a progress file if os.path.isfile(self.runtime.proc_prog): if prog_time is None: env.logger.trace('Job started with progress file {}'.format( self.runtime.proc_prog)) os.remove(self.runtime.proc_prog) prog_time = time.time() # if prog_time is None or time.time() - prog_time > 60: # no progress by another thread break # if os.path.isfile(self.runtime.proc_done): break # # start monitoring process self.update_process = Process(target=self.update_prog) self.update_process.start() def __exit__(self, type, value, traceback): with open(self.runtime.proc_done, 'w') as prog: prog.write('1') self.runtime.clear(['prog']) self.update_process.terminate() class RunCommand(PipelineAction): """This action execute specified commands. If the pipeline is running in parallel mode and a submitter is specified, it will use the submitter command to execute the commands in a separate job. File Flow: Input passthrough if no output file is specified. INPUT ====> INPUT Generate output if one or more output files are specified. INPUT ==> CMD ==> OUTPUT Raises: Raises an error if an command fails to execute. Examples: # simple commands without checking output action=RunCommand(cmd='vtools init myproj -f') action=RunCommand(cmd=[ '[ -d ${DIR1} ] || mkdir -p ${DIR1}', '[ -d ${DIR2} ] || mkdir -p ${DIR2}' ]) # multiple commands, change working directory # check output action=RunCommand([ 'update_blastdb.pl human_genomic --decompress || true', 'update_blastdb.pl nt --decompress || true', ], working_dir='${NCBI_RESOURCE_DIR}/blast/db', output=['${NCBI_RESOURCE_DIR}/blast/db/human_genomic.nal', '${NCBI_RESOURCE_DIR}/blast/db/nt.nal'] ) # run command in background, with pipes action=RunCommand('''samtools view -h ${INPUT} | awk '$6 ~/N/' | awk '{ if ($9 ~ /^-/) {print $1"\t-"} else print $1"\t+" }' | sort -T ${TEMP_DIR} -u | wc -l > ${ALIGNMENT_OUT}/junction.count''', output='${ALIGNMENT_OUT}/junction.count', submitter='sh {} &') """ def __init__(self, cmd='', output=[], working_dir=None, submitter=None, wait=True, max_jobs=None): '''This action accepts one (a string) or more command (a list of strings) and executes them in a shell environment, possibly as a separate job. Parameters: cmd (string or list of strings): One or more commands to execute. The commands will be executed in shell mode so pipes are allowed. output (string or list of strings): Expected output files of the action. If specified, the execution signature will be created to record the input, output and command of the action, and ignore the action if the signature matches of a previous run. working_dir (None or string): Working directory of the command. Variant Tools will change to this directory before executing the commands if a valid directory is passed. submitter (None or string): If a submitter is specified and the pipeline is executed in multi-job mode (e.g. --jobs 2), a shell script will be written with the commands to be executed. The submitter command will be executed with ``{}`` in parameter ``submitter`` replaced by the name of shell script. For example, submitter='sh {} &' will run the job as a background job, and submitter='qsub -q long < {}' will submit the shell script to the long queue of a cluster system. Because the pipeline will be terminated if the submitter command fails, `qsub new_job ... && false` can be used to replace the running process by start a new job and terminate the existing process intentionally. wait (True, False, or number of seconds): If a job is submitted, whether or not wait it to be completed. The default is True, meaning that the master thread will continue to execute until is will be waiting for the outcome of this command. If you set this parameter to False, the pipeline execute will be stopped and you can re-run the pipeline till the subcommand is completed. You can also set a number to let the master thread wait for a pre-determined period of time. This option is useful if the subprocess might die. max_jobs: (deprecated) ''' self.submitter = submitter self.working_dir = working_dir self.wait = wait if type(output) == str: self.output = [os.path.expanduser(output)] else: self.output = [os.path.expanduser(x) for x in output] # self.runtime = RuntimeFiles(self.output) self.start_time = time.time() if not cmd: cmd = ['echo "None command executed."'] PipelineAction.__init__(self, cmd=cmd, output=output) def _elapsed_time(self): '''Return the elapsed time in human readable format since start time''' second_elapsed = int(time.time() - self.start_time) days_elapsed = second_elapsed // 86400 return ('{} days '.format(days_elapsed) if days_elapsed else '') + \ time.strftime('%H:%M:%S', time.gmtime(second_elapsed % 86400)) def _run_command(self): '''Call a list of external command cmd, raise an error if any of them fails. ''' if self.runtime.proc_lck: env.lock(self.runtime.proc_lck, str(os.getpid())) for cur_cmd in self.cmd: if self.working_dir is not None and not os.path.isdir( self.working_dir): os.makedirs(self.working_dir) env.logger.info('Running ``{}``{}'.format( cur_cmd, ' under {}'.format(self.working_dir) if self.working_dir else '')) ret = subprocess.call( cur_cmd, shell=True, stdout=None if self.runtime.proc_out is None else open( self.runtime.proc_out, 'w'), stderr=None if self.runtime.proc_err is None else open( self.runtime.proc_err, 'w'), cwd=self.working_dir) if ret < 0: if self.output: try: env.unlock(self.runtime.proc_lck, str(os.getpid())) except: pass raise RuntimeError( "Command '{}' was terminated by signal {} after executing {}" .format(cur_cmd, -ret, self._elapsed_time())) elif ret > 0: if self.output: with open(self.runtime.proc_err) as err: for line in err.read().split('\n')[-50:]: env.logger.error(line) try: env.unlock(self.runtime.proc_lck, str(os.getpid())) except: pass raise RuntimeError( "Execution of command '{}' failed after {} (return code {})." .format(cur_cmd, self._elapsed_time(), ret)) def _monitor(self): start_time = time.time() prog_time = None while True: if os.path.isfile(self.runtime.proc_prog): if prog_time is None: env.logger.trace('Job started with progress file {}'.format( self.runtime.proc_prog)) os.remove(self.runtime.proc_prog) prog_time = time.time() # if prog_time is None: # if the job has not been started for 10 minutes, quite if time.time() - start_time > 600: return ( 'Background job has not been started after 10 minutes.') else: if time.time() - prog_time > 60: return ( 'Background job has not updated it progress for 1 minutes.' ) # if os.path.isfile(self.runtime.proc_done): break else: if self.wait is False: return ( 'Do not wait for the completion of submitted job (wait=False).' ) if self.wait is not True and isinstance( self.wait, int) and prog_time is not None and time.time( ) - prog_time > self.wait: return ('Quitted after waiting {} seconds.'.format( self.wait)) time.sleep(10) try: env.unlock(self.runtime.proc_lck, str(os.getpid())) except: env.logger.warning('Failed to remove lock for file {}'.format( self.output[0])) pass try: with open(self.runtime.proc_done) as done: ret = int(done.read().strip()) except Exception as e: return ( 'Failed to retrive return information for forked process from {}. {}' .format(self.runtime.proc_done, e)) # if ret < 0: return ( "Command '{}' was terminated by signal {} after executing {}" .format('; '.join(self.cmd), -ret, self._elapsed_time())) elif ret > 0: if self.output: with open(self.runtime.proc_err) as err: for line in err.read().split('\n')[-50:]: env.logger.error(line) return ( "Execution of command '{}' failed after {} (return code {})." .format('; '.join(self.cmd), self._elapsed_time(), ret)) # remove the .done file if not self.output[0] in self.pipeline.THREADS: return ('Output is not waited by any threads') # DO NOT POP FROM ANOTHER THREAD, this will cause race condition # (unless we use thread safe dictionry). In this case, we only need # to monitor the status of threads from the master threads. # self.pipeline.THREADS.pop(self.output[0]) # # the thread will end here env.logger.trace('Thread for output {} ends.'.format(self.output[0])) self.runtime.clear(['done']) return ('') def _submit_command(self): '''Submit a job and wait for its completion.''' # use full path because the command might be submitted to a remote machine if os.path.isfile(self.runtime.proc_done): os.remove(self.runtime.proc_done) if self.runtime.proc_lck: env.lock(self.runtime.proc_lck, str(os.getpid())) # if os.path.isfile(self.runtime.proc_cmd): with open(self.runtime.proc_cmd) as old_cmd: old_script = old_cmd.read() else: old_script = None # create a batch file for execution with open(self.runtime.proc_cmd, 'w') as sh_file: sh_file.write('#PBS -o {}\n'.format( os.path.abspath(self.runtime.proc_out))) sh_file.write('#PBS -e {}\n'.format( os.path.abspath(self.runtime.proc_err))) sh_file.write('#PBS -N {}\n'.format( os.path.basename(self.output[0]))) # sh_file.write('#PBS -N {}.{}_{}\n'.format(self.runtime.proc_err)) sh_file.write('#PBS -V\n') # we try to reproduce the environment as much as possible becaus ehte # script might be executed in a different environment for k, v in os.environ.items(): if any([k.startswith(x) for x in ('SSH', 'PBS', '_') ]) or not k.replace('_', '').isalpha(): continue sh_file.write('export {}="{}"\n'.format(k, v.replace('\n', '\\n'))) # sh_file.write('\ncd {}\n'.format(os.path.abspath(os.getcwd()))) if self.working_dir is not None: sh_file.write('[ -d {0} ] || mkdir -p {0}\ncd {0}\n'.format( os.path.abspath(self.working_dir))) # sh_file.write(''' progress() {{ while true do touch {} sleep 30 done }} progress & MYSELF=$! '''.format(self.runtime.proc_prog)) sh_file.write('\n'.join(self.cmd)) # sh_file.write( '\n\nCMD_RET=$?\nif [ $CMD_RET == 0 ]; then vtools admin --record_exe_info {} {}; fi\n' .format(os.getpid(), ' '.join(self.output))) # a signal to show the successful completion of the job sh_file.write( '\nrm -f {}\nkill $MYSELF >/dev/null 2>&1\necho $CMD_RET > {}\n' .format(self.runtime.proc_prog, self.runtime.proc_done)) # if old_script is not None: # with open(self.runtime.proc_cmd) as new_cmd: # if old_script == new_cmd.read(): # env.logger.debug('Identical script {}'.format(self.runtime.proc_cmd)) # if there is no change in command other_prog = glob.glob( os.path.abspath(self.output[0]) + '.working_*') if other_prog: for op in other_prog: # if the working file is less than 2 minutes old, ... if time.time() - os.path.getmtime(op) < 120: env.logger.info( 'Another process appears to be working on {}, checking ...' .format(self.output[0])) last_time = os.path.getmtime(op) time.sleep(60) # if the working file does not change after 60 seconds if os.path.getmtime(op) != last_time: raise RuntimeError( 'Failed to submit job because a job is currently running or has been failed within 2 minutes. Status file is {} (pid is {})' .format(op, os.getpid())) # try to submit command if '{}' in self.submitter: submit_cmd = self.submitter.replace('{}', self.runtime.proc_cmd) else: submit_cmd = self.submitter # env.logger.info( 'Running job {} with command "{}" from directory {}'.format( self.runtime.proc_cmd, submit_cmd, os.getcwd())) ret = subprocess.call( submit_cmd, shell=True, stdout=open(self.runtime.proc_out, 'w'), stderr=open(self.runtime.proc_err, 'w'), cwd=self.working_dir) if ret != 0: try: env.unlock(self.runtime.proc_out, str(os.getpid())) except: pass # if ret < 0: raise RuntimeError( "Failed to submit job {} due to signal {} (submitter='{}')" .format(self.runtime.proc_cmd, -ret, self.submitter)) elif ret > 0: if os.path.isfile(self.runtime.proc_err): with open(self.runtime.proc_err) as err: msg = err.read() else: msg = '' raise RuntimeError( "Failed to submit job {} using submiter '{}': {}".format( self.runtime.proc_cmd, self.submitter, msg)) else: t = MonitorThread(target=self._monitor) t.daemon = True t.start() if self.output[0] in self.pipeline.THREADS: raise RuntimeError( 'Two spawned jobs have the same self.output[0] file {}' .format(self.output[0])) self.pipeline.THREADS[self.output[0]] = t def _execute(self, ifiles, pipeline=None): # substitute cmd by input_files and output_files if pipeline.jobs > 1 and self.submitter is not None and not self.output: env.logger.warning( 'Fail to execute in parallel because no output is specified.') # self.pipeline = pipeline # Submit the job on a cluster system # 1. if there is output, otherwise we cannot track the status of the job # 2. if a submit command is specified # 3. if --jobs with a value greater than 1 is used. if self.output and pipeline.jobs > 1 and self.submitter is not None: self._submit_command() return False else: self._run_command() return True class ExecutePythonCode(PipelineAction): '''This action execute a piece of Python code under the pipeline namespace, which means all pipeline variables will be available to the code. This action provides a way to implement pipeline actions on the fly. Arbitary parameters can be passed and be made available to the script. Pipeline variables are also available to the script as a variabe "pvars". File Flow: Input passthrough if no output file is specified. INPUT ====> INPUT Generate output if one or more output files are specified. INPUT ==> CMD ==> OUTPUT Raises: Raises an error if the python code fails to execute. ''' def __init__(self, script='', output=[], modules=[], export=None, **kwargs): '''This action accepts one or a list of strings and execute it as a piece of Python code. Pipeline variables are made available as a dictionary "pvars". Parameters: script (string or list of strings): One or more strings to execute. List of strings will be concatenated by new lines. modules (string or list of strings): Modules to import for this step. It is similar to action ImportModules but the imported symbols are available to this action only. export (None or filename): A filename to which the execute code will be exported. This option is useful for debugging because pipeline variables will be prepended to the script so that the exported script could be executed with no or minimal modification. kwargs (additional parameters): Any additional kwargs will be passed to the function executed. ''' if not script: env.logger.warning('No valid script is specified.') script = '' if isinstance(script, str): self.script = script else: self.script = '\n'.join(script) # m = hashlib.md5() m.update(self.script.encode('utf-8')) # self.kwargs = kwargs self.modules = modules self.export = export # PipelineAction.__init__( self, cmd='python -e {} {}'.format(m.hexdigest(), kwargs), output=output) def _execute(self, ifiles, pipeline=None): if self.export is not None: with open(self.export, 'w') as exported_script: exported_script.write('#!/usr/env python\n') exported_script.write( '#\n#Script exported by action ExecutePythonCode\n') # modules? exported_script.write(''.join([ 'import {}\n'.format(x) for x in ('sys', 'os', 're', 'glob') ])) exported_script.write( '\nfrom variant_tools.pipeline import *\n') for module in self.modules: exported_script.write('import {}\n'.format( os.path.basename(module)[:-3] if module .endswith('.py') else module)) # # pipeline variables exported_script.write('pvars=') pprint.pprint(pipeline.VARS.dict(), stream=exported_script) exported_script.write('\n') # exported_script.write( '# Pipeline variables are case insensitive\n') exported_script.write( 'pvars.update({x.upper():y for x,y in pvars.items()})\n') exported_script.write( 'pvars.update({x.lower():y for x,y in pvars.items()})\n') # # script exported_script.write(self.script) env.logger.info('Python code exported to ``{}``'.format( self.export)) for module in self.modules: # this is a path to a .py file if module.endswith('.py'): if os.path.isfile(module): pyfile = module # if the .py file locates in the same directory as the pipeline file elif pipeline is not None \ and os.path.isfile(os.path.join(os.path.split(pipeline.spec_file)[0], module)): pyfile = os.path.join( os.path.split(pipeline.spec_file)[0], module) else: # try to download it from online try: pyfile = downloadFile('simulation/{}'.format(module)) except Exception: try: pyfile = downloadFile('pipeline/{}'.format(module)) except Exception as e: raise ValueError( 'Failed to download required python module {}: {}' .format(module, e)) try: p, f = os.path.split( os.path.abspath(os.path.expanduser(pyfile))) sys.path.append(p) local_dict = __import__(f[:-3] if f.endswith('.py') else f, globals(), locals(), module.split('.', 1)[-1:]) env.logger.info( '{} symbols are imported form module {}'.format( len(local_dict.__dict__), module)) pipeline.GLOBALS.update(local_dict.__dict__) except Exception as e: raise RuntimeError('Failed to import module {}: {}'.format( module, e)) # now a system module else: try: # allow loading from current directory sys.path.append(os.getcwd()) local_dict = __import__(module, globals(), locals(), module.split('.', 1)[-1:]) env.logger.info( '{} symbols are imported form module {}'.format( len(local_dict.__dict__), module)) pipeline.GLOBALS.update(local_dict.__dict__) except ImportError as e: raise RuntimeError('Failed to import module {}: {}'.format( module, e)) env.logger.info('Executing Python script:\n{}'.format(self.script)) try: globals().update(pipeline.GLOBALS) local_dict = self.kwargs local_dict['pvars'] = pipeline.VARS exec(self.script, globals(), local_dict) except Exception as e: ex_type, ex, tb = sys.exc_info() traceback.print_tb(tb) raise RuntimeError('Failed to execute script: {}'.format(e)) return True class ExecuteScript(PipelineAction): """This action execute specified in-line script with specified command (bash, python, perl etc). If the pipeline is running in parallel mode and a submitter is specified, it will use the submitter command to execute the commands in a separate job. This action is the base action for ExecuteRScript, ExecutePerlScript and ExecutePythonScript and is usually not used directly. File Flow: Input passthrough if no output file is specified. INPUT ====> INPUT Generate output if one or more output files are specified. INPUT ==> CMD ==> OUTPUT Raises: Raises an error if an command fails to execute. """ def __init__(self, script='', interpreter='', args='', output=[], working_dir=None, export=None, submitter=None, suffix=None, wait=True): '''This action accepts one or a list of strings, write them to a temporary file and executes them by a interpreter, possibly as a separate job. Parameters: script (string or list of strings): One or more strings to execute. List of strings will be concatenated by new lines. The complete script will be written to a temporary file to be executed by an interpreter. interpreter (string): An interpreter that will be used to execute the script. It is usually just a command but more complex command line is allowed with '{}' replaced by the path to the temporary script. args (string or list of strings): Command line arguments which can be a single string or a list of strings. Filenames will be properly quoted if needed. output (string or list of strings): Expected output files of the action. If specified, the execution signature will be created to record the input, output and command of the action, and ignore the action if the signature matches of a previous run. working_dir (None or string): Working directory of the command. Variant Tools will change to this directory before executing the commands if a valid directory is passed. export (None or string): A filename to which the script will be exported before execution. This option makes it easier to debug the script because the script in the spec file might contain pipeline variables. submitter (None or string): If a submitter is specified and the pipeline is executed in multi-job mode (e.g. --jobs 2), the script will be submitted by the submitter. The submitter command will be executed with ``{}`` in parameter ``submitter`` replaced by the name of shell script. For example, submitter='sh {} &' will run the job as a background job, and submitter='qsub -q long < {}' will submit the shell script to the long queue of a cluster system. Because the pipeline will be terminated if the submitter command fails, `qsub new_job ... && false` can be used to replace the running process by start a new job and terminate the existing process intentionally. suffix (None or string): An optional suffix (file extension) to the temporary script. wait (True, False, or number of seconds): If a job is submitted, whether or not wait it to be completed. The default is True, meaning that the master thread will continue to execute until is will be waiting for the outcome of this command. If you set this parameter to False, the pipeline execute will be stopped and you can re-run the pipeline till the subcommand is completed. You can also set a number to let the master thread wait for a pre-determined period of time. This option is useful if the subprocess might die. ''' self.interpreter = interpreter self.submitter = submitter self.working_dir = working_dir self.wait = wait if type(output) == str: self.output = [os.path.expanduser(output)] else: self.output = [os.path.expanduser(x) for x in output] # self.runtime = RuntimeFiles(self.output) self.start_time = time.time() if not script: raise ValueError('No valid script is specified.') if isinstance(script, str): self.script = script else: self.script = '\n'.join(script) # self.args = args env.logger.info('Executing\n{}'.format(self.script)) # m = hashlib.md5() m.update(self.script.encode('utf-8')) # self.script_file = tempfile.NamedTemporaryFile( mode='w+t', suffix=suffix, delete=False).name with open(self.script_file, 'w') as script_file: script_file.write(self.script) # if export is not None: with open(export, 'w') as exported_script: exported_script.write(self.script) env.logger.info('Script exported to ``{}``'.format(export)) PipelineAction.__init__( self, cmd='{} {}'.format(interpreter, m.hexdigest()), output=output) def __del__(self): try: os.remove(self.script_file) except Exception as e: env.logger.debug( 'Failed to remove temporary script file {}: {}'.format( self.script_file, e)) def _elapsed_time(self): '''Return the elapsed time in human readable format since start time''' second_elapsed = int(time.time() - self.start_time) days_elapsed = second_elapsed // 86400 return ('{} days '.format(days_elapsed) if days_elapsed else '') + \ time.strftime('%H:%M:%S', time.gmtime(second_elapsed % 86400)) def _run_command(self): '''Call a list of external command cmd, raise an error if any of them fails. ''' if self.runtime.proc_lck: env.lock(self.runtime.proc_lck, str(os.getpid())) if '{}' in self.interpreter: cmd = self.interpreter.replace('{}', pipes.quote(self.script_file)) else: cmd = self.interpreter + ' ' + pipes.quote(self.script_file) + \ (self.args if isinstance(self.args, str) else ' '.join(pipes.quote(x) for x in self.args)) env.logger.info('Running ``{}``'.format(cmd)) ret = subprocess.call( cmd, shell=True, stdout=None if self.runtime.proc_out is None else open( self.runtime.proc_out, 'w'), stderr=None if self.runtime.proc_err is None else open( self.runtime.proc_err, 'w'), cwd=self.working_dir) if ret < 0: if self.output: try: env.unlock(self.runtime.proc_lck, str(os.getpid())) except: pass raise RuntimeError( "Command '{}' was terminated by signal {} after executing {}" .format(cmd, -ret, self._elapsed_time())) elif ret > 0: if self.output: with open(self.runtime.proc_err) as err: for line in err.read().split('\n')[-50:]: env.logger.error(line) try: env.unlock(self.runtime.proc_lck, str(os.getpid())) except: pass raise RuntimeError( "Execution of command '{}' failed after {} (return code {})." .format(cmd, self._elapsed_time(), ret)) else: # write standard out to terminal if self.runtime.proc_out: with open(self.runtime.proc_out) as proc_out: for line in proc_out: env.logger.info(line.rstrip()) if self.runtime.proc_err: with open(self.runtime.proc_err) as proc_err: for line in proc_err: env.logger.warning(line.rstrip()) def _monitor(self): start_time = time.time() prog_time = None while True: if os.path.isfile(self.runtime.proc_prog): if prog_time is None: env.logger.trace('Job started with progress file {}'.format( self.runtime.proc_prog)) os.remove(self.runtime.proc_prog) prog_time = time.time() # if prog_time is None: # if the job has not been started for 10 minutes, quite if time.time() - start_time > 600: return ( 'Background job has not been started after 10 minutes.') else: if time.time() - prog_time > 60: return ( 'Background job has not updated it progress for 1 minutes.' ) if os.path.isfile(self.runtime.proc_done): break else: if self.wait is False: return if self.wait is not True and isinstance( self.wait, int) and prog_time is not None and time.time( ) - prog_time > self.wait: return ('Quitted after waiting {} seconds.'.format( self.wait)) time.sleep(10) try: env.unlock(self.runtime.proc_lck, str(os.getpid())) except: env.logger.warning('Failed to remove lock for file {}'.format( self.output[0])) pass with open(self.runtime.proc_done) as done: ret = int(done.read().strip()) # if ret < 0: return ( "Command '{}' was terminated by signal {} after executing {}" .format('; '.join(self.cmd), -ret, self._elapsed_time())) elif ret > 0: if self.output: with open(self.runtime.proc_err) as err: for line in err.read().split('\n')[-50:]: env.logger.error(line) return ( "Execution of command '{}' failed after {} (return code {})." .format('; '.join(self.cmd), self._elapsed_time(), ret)) # remove the .done file if not self.output[0] in self.pipeline.THREADS: return ('Output is not waited by any threads') # DO NOT POP FROM ANOTHER THREAD, this will cause race condition # (unless we use thread safe dictionry). In this case, we only need # to monitor the status of threads from the master threads. # self.pipeline.THREADS.pop(self.output[0]) # # the thread will end here env.logger.info('{} has been successfully generated.'.format( self.output[0])) self.runtime.clear(['done']) return ('') def _submit_command(self): '''Submit a job and wait for its completion.''' # use full path because the command might be submitted to a remote machine # if os.path.isfile(self.runtime.proc_done): os.remove(self.runtime.proc_done) if self.runtime.proc_lck: env.lock(self.runtime.proc_lck, str(os.getpid())) # if os.path.isfile(self.runtime.proc_cmd): with open(self.runtime.proc_cmd) as old_cmd: old_script = old_cmd.read() else: old_script = None # create a batch file for execution with open(self.runtime.proc_cmd, 'w') as sh_file: sh_file.write('#PBS -o {}\n'.format( os.path.abspath(self.runtime.proc_out))) sh_file.write('#PBS -e {}\n'.format( os.path.abspath(self.runtime.proc_err))) sh_file.write('#PBS -N {}\n'.format( os.path.basename(self.output[0]))) # sh_file.write('#PBS -N {}.{}_{}\n'.format(self.runtime.proc_err)) sh_file.write('#PBS -V\n') # we try to reproduce the environment as much as possible becaus ehte # script might be executed in a different environment for k, v in os.environ.items(): if any([k.startswith(x) for x in ('SSH', 'PBS', '_') ]) or not k.replace('_', '').isalpha(): continue sh_file.write('export {}="{}"\n'.format(k, v.replace('\n', '\\n'))) # sh_file.write('\ncd {}\n'.format(os.path.abspath(os.getcwd()))) if self.working_dir is not None: sh_file.write('[ -d {0} ] || mkdir -p {0}\ncd {0}\n'.format( os.path.abspath(self.working_dir))) # sh_file.write(''' progress() {{ while true do touch {} sleep 30 done }} progress & MYSELF=$! '''.format(self.runtime.proc_prog)) # interpreter if '{}' in self.interpreter: sh_file.write( self.interpreter.replace('{}', pipes.quote( self.script_file)) + (self.args if isinstance(self.args, str) else ' '.join( pipes.quote(x) for x in self.args)) + '\n') else: sh_file.write( self.interpreter + ' ' + pipes.quote(self.script_file) + (self.args if isinstance(self.args, str) else ' '.join( pipes.quote(x) for x in self.args)) + '\n') # sh_file.write( '\n\nCMD_RET=$?\nif [ $CMD_RET == 0 ]; then vtools admin --record_exe_info {} {}; fi\n' .format(os.getpid(), ' '.join(self.output))) # a signal to show the successful completion of the job sh_file.write( '\nrm -f {}\nkill $MYSELF >/dev/null 2>&1\necho $CMD_RET > {}\n' .format(self.runtime.proc_prog, self.runtime.proc_done)) # # try to submit command if '{}' in self.submitter: submit_cmd = self.submitter.replace('{}', self.runtime.proc_cmd) else: submit_cmd = self.submitter # if old_script is not None: # with open(self.runtime.proc_cmd) as new_cmd: # if old_script == new_cmd.read(): # if there is no change in command other_prog = glob.glob( os.path.abspath(self.output[0]) + '.working_*') if other_prog: for op in other_prog: if time.time() - os.path.getmtime(op) < 120: env.logger.info( 'Another process appears to be working on {}, checking ...' .format(self.output[0])) last_time = os.path.getmtime(op) time.sleep(60) # if the working file does not change after 60 seconds if os.path.getmtime(op) != last_time: raise RuntimeError( 'Failed to submit job because a job is currently running or has been failed within 2 minutes. Status file is {} (pid is {})' .format(op, os.getpid())) env.logger.info( 'Running job {} with command "{}" from directory {}'.format( self.runtime.proc_cmd, submit_cmd, os.getcwd())) ret = subprocess.call( submit_cmd, shell=True, stdout=open(self.runtime.proc_out, 'w'), stderr=open(self.runtime.proc_err, 'w'), cwd=self.working_dir) if ret != 0: try: env.unlock(self.runtime.proc_out, str(os.getpid())) except: pass # if ret < 0: raise RuntimeError( "Failed to submit job {} due to signal {} (submitter='{}')" .format(self.runtime.proc_cmd, -ret, self.submitter)) elif ret > 0: if os.path.isfile(self.runtime.proc_err): with open(self.runtime.proc_err) as err: msg = err.read() else: msg = '' raise RuntimeError( "Failed to submit job {} using submiter '{}': {}".format( self.runtime.proc_cmd, self.submitter, msg)) else: t = MonitorThread(target=self._monitor) t.daemon = True t.start() if self.output[0] in self.pipeline.THREADS: raise RuntimeError( 'Two spawned jobs have the same self.output[0] file {}' .format(self.output[0])) self.pipeline.THREADS[self.output[0]] = t def _execute(self, ifiles, pipeline=None): # substitute cmd by input_files and output_files if pipeline.jobs > 1 and self.submitter is not None and not self.output: env.logger.warning( 'Fail to execute in parallel because no output is specified.') # self.pipeline = pipeline # Submit the job on a cluster system # 1. if there is output, otherwise we cannot track the status of the job # 2. if a submit command is specified # 3. if --jobs with a value greater than 1 is used. if self.output and pipeline.jobs > 1 and self.submitter is not None: self._submit_command() return False else: self._run_command() return True class ExecuteRScript(ExecuteScript): '''Execute in-line R script using Rscript as interpreter. Please check action ExecuteScript for more details. ''' def __init__(self, script='', args='', output=[], export=None, working_dir=None, submitter=None, wait=True): ExecuteScript.__init__( self, script=script, interpreter='Rscript', args=args, output=output, export=export, working_dir=working_dir, submitter=submitter, suffix='.R', wait=wait) class ExecuteShellScript(ExecuteScript): '''Execute in-line shell script using bash as interpreter. Please check action ExecuteScript for more details. ''' def __init__(self, script='', args='', output=[], export=None, working_dir=None, submitter=None, wait=True): ExecuteScript.__init__( self, script=script, interpreter='bash', args=args, output=output, export=export, working_dir=working_dir, submitter=submitter, suffix='.sh', wait=wait) class ExecuteCShellScript(ExecuteScript): '''Execute in-line shell script using bash as interpreter. Please check action ExecuteScript for more details. ''' def __init__(self, script='', args='', output=[], export=None, working_dir=None, submitter=None, wait=True): ExecuteScript.__init__( self, script=script, interpreter='tcsh', args=args, output=output, export=export, working_dir=working_dir, submitter=submitter, suffix='.csh', wait=wait) class ExecutePythonScript(ExecuteScript): '''Execute in-line python script using python as interpreter. Please check action ExecuteScript for more details. ''' def __init__(self, script='', args='', output=[], export=None, working_dir=None, submitter=None, wait=True): ExecuteScript.__init__( self, script=script, interpreter='python', args=args, output=output, export=export, working_dir=working_dir, submitter=submitter, suffix='.py', wait=wait) class ExecutePython3Script(ExecuteScript): '''Execute in-line python script using python3 as interpreter. Please check action ExecuteScript for more details. ''' def __init__(self, script='', args='', output=[], export=None, working_dir=None, submitter=None, wait=True): ExecuteScript.__init__( self, script=script, interpreter='python3', args=args, output=output, export=export, working_dir=working_dir, submitter=submitter, suffix='.py', wait=wait) class ExecutePerlScript(ExecuteScript): '''Execute in-line perl script using perl as interpreter. Please check action ExecuteScript for more details. ''' def __init__(self, script='', args='', output=[], export=None, working_dir=None, submitter=None, wait=True): ExecuteScript.__init__( self, script=script, interpreter='perl', args=args, output=output, export=export, working_dir=working_dir, submitter=submitter, suffix='.perl', wait=wait) class ExecuteRubyScript(ExecuteScript): '''Execute in-line perl script using perl as interpreter. Please check action ExecuteScript for more details. ''' def __init__(self, script='', args='', output=[], export=None, working_dir=None, submitter=None, wait=True): ExecuteScript.__init__( self, script=script, interpreter='ruby', args=args, output=output, export=export, working_dir=working_dir, submitter=submitter, suffix='.rb', wait=wait) class CheckRLibraries(ExecuteRScript): '''Check the existence of specified R libraries. If an package is not available, it will try to install it using "install.package" and from bioconductor. The pipeline will raise an error if one of the library is not available and cannot be installed. File Flow: Input passthrough. INPUT ====> INPUT Raises: A RuntimeError will be raised if a R library is not installed. Examples: action=CheckRLibraries('edgeR') action=CheckRLibraries(['edgeR', 'AIMS']) ''' def __init__(self, libraries): ''' Parameters: libraries (string or list of strings): Name of one of more R libraries to be checked. ''' PipelineAction.__init__(self) if type(libraries) == type(''): self.libraries = [libraries] else: self.libraries = libraries # script # get temp filename self.output_file = tempfile.NamedTemporaryFile( mode='w+t', suffix='.txt', delete=False).name script = r''' for (package in c({0})) {{ if (require(package, character.only=TRUE, quietly=TRUE)) {{ write(paste(package, "AVAILABLE"), file="{1}", append=TRUE) next }} else {{ install.packages(package, repos="http://cran.us.r-project.org", quiet=TRUE) }} # if the package still does not exist if (!require(package, character.only=TRUE, quietly=TRUE)) {{ source("http://bioconductor.org/biocLite.R") biocLite(package, ask=FALSE) }} # if it still does not exist, write the package name to output if (require(package, character.only=TRUE, quietly=TRUE)) {{ write(paste(package, "INSTALLED"), file="{1}", append=TRUE) }} else {{ write(paste(package, "MISSING"), file="{1}", append=TRUE) }} }} '''.format(', '.join(['"{}"'.format(lib) for lib in self.libraries]), self.output_file) ExecuteRScript.__init__(self, script=script, output=self.output_file) def __call__(self, ifiles, pipeline=None): ExecuteRScript.__call__(self, ifiles, pipeline) with open(self.output_file) as tmp: count = 0 for line in tmp: lib, status = line.split() if status.strip() == "MISSING": env.logger.error( 'R Library {} is not available and cannot be installed.' .format(lib)) count += 1 elif status.strip() == 'AVAILABLE': env.logger.info('R library {} is available'.format(lib)) elif status.strip() == 'INSTALLED': env.logger.info( 'R library {} has been installed'.format(lib)) else: raise RuntimeError( 'This should not happen: {}'.format(line)) # try: os.remove(self.output_file) except: pass if count > 0: raise RuntimeError("One or more R libraries are not available.") return ifiles class DecompressFiles(PipelineAction): '''This action gets a list of input files from input file, decompressing input files (.tar.gz, .zip, etc) if necessary. The decompressed files are returned as output. One particular feature of this action is that it records content of large tar or tar.gz files to a manifest file and ignores the step if the manifest file exists. File Flow: Decompress input files INPUT ==> Decompress ==> OUTPUT Examples: action=DecompressFiles() ''' def __init__(self, dest_dir=None): ''' Parameters: dest_dir (None or string): Destination directory, default to current directory. ''' self.dest_dir = dest_dir if dest_dir else '.' PipelineAction.__init__(self) def _decompress(self, filename): '''If the file ends in .tar.gz, .tar.bz2, .bz2, .gz, .tgz, .tbz2, decompress it to dest_dir (current directory if unspecified), and return a list of files. Uncompressed files will be returned untouched. If the destination files exist and newer, this function will return immediately.''' mode = None if filename.lower().endswith('.tar.gz') or filename.lower().endswith( '.tar.bz2'): mode = 'r:gz' elif filename.lower().endswith('.tbz2') or filename.lower().endswith( '.tgz'): mode = 'r:bz2' elif filename.lower().endswith('.tar'): mode = 'r' elif filename.lower().endswith('.gz'): dest_file = os.path.join(self.dest_dir, os.path.basename(filename)[:-3]) if existAndNewerThan(ofiles=dest_file, ifiles=filename): env.logger.info( 'Using existing decompressed file {}'.format(dest_file)) else: env.logger.info('Decompressing {} to {}'.format( filename, dest_file)) with gzip.open(filename, 'rb') as gzinput, open(TEMP(dest_file), 'wb') as output: content = gzinput.read(10000000) while content: output.write(content) content = gzinput.read(10000000) # only rename the temporary file to the right one after finishing everything # this avoids corrupted files os.rename(TEMP(dest_file), dest_file) return [dest_file] elif filename.lower().endswith('.bz2'): dest_file = os.path.join(self.dest_dir, os.path.basename(filename)[:-4]) if existAndNewerThan(ofiles=dest_file, ifiles=filename): env.logger.warning( 'Using existing decompressed file {}'.format(dest_file)) else: env.logger.info('Decompressing {} to {}'.format( filename, dest_file)) with bz2.BZ2File(filename, 'rb') as bzinput, open(TEMP(dest_file), 'wb') as output: content = bzinput.read(10000000) while content: output.write(content) content = bzinput.read(10000000) # only rename the temporary file to the right one after finishing everything # this avoids corrupted files os.rename(TEMP(dest_file), dest_file) return [dest_file] elif filename.lower().endswith('.zip'): bundle = zipfile.ZipFile(filename) bundle.extractall(self.dest_dir) env.logger.info('Decompressing {} to {}'.format( filename, self.dest_dir)) return [ os.path.join(self.dest_dir, name) for name in bundle.namelist() ] # # if it is a tar file if mode is not None: env.logger.info( 'Extracting fastq sequences from tar file {}'.format(filename)) # # MOTE: open a compressed tar file can take a long time because it needs to scan # the whole file to determine its content. I am therefore creating a manifest # file for the tar file in the dest_dir, and avoid re-opening when the tar file # is processed again. manifest = RuntimeFiles(filename).manifest all_extracted = False dest_files = [] if existAndNewerThan(ofiles=manifest, ifiles=filename): all_extracted = True for f in [x.strip() for x in open(manifest).readlines()]: dest_file = os.path.join(self.dest_dir, os.path.basename(f)) if existAndNewerThan(ofiles=dest_file, ifiles=filename): dest_files.append(dest_file) env.logger.info( 'Using existing extracted file {}'.format( dest_file)) else: all_extracted = False # if all_extracted: return dest_files # # create a temporary directory to avoid corrupted file due to interrupted decompress try: os.mkdir(os.path.join(self.dest_dir, 'tmp')) except: # directory might already exist pass # dest_files = [] with tarfile.open(filename, mode) as tar: # only extract files files = [x.name for x in tar.getmembers() if x.isfile()] # save content to a manifest with open(manifest, 'w') as manifest: for f in files: manifest.write(f + '\n') for f in files: # if there is directory structure within tar file, decompress all to the current directory dest_file = os.path.join(self.dest_dir, os.path.basename(f)) dest_files.append(dest_file) if existAndNewerThan(ofiles=dest_file, ifiles=filename): env.logger.info( 'Using existing extracted file {}'.format( dest_file)) else: env.logger.info('Extracting {} to {}'.format( f, dest_file)) tar.extract(f, os.path.join(self.dest_dir, 'tmp')) # move to the top directory with the right name only after the file has been properly extracted shutil.move( os.path.join(self.dest_dir, 'tmp', f), dest_file) # set dest_files to the same modification time. This is used to # mark the right time when the files are created and avoid the use # of archieved but should-not-be-used files that might be generated later [os.utime(x, None) for x in dest_files] return dest_files # return source file if nothing needs to be decompressed return [filename] def __call__(self, ifiles, pipeline=None): # decompress input files and return a list of output files filenames = [] for filename in ifiles: filenames.extend(self._decompress(filename)) filenames.sort() return filenames class RemoveIntermediateFiles(PipelineAction): '''This action removes specified files (not the step input files) and replaces them with their signature (file size, md5 signature etc). A pipeline can bypass completed steps with these files as input or output by checking the signatures. In contrast, the steps would have to be re-run if the files are removed from the file system. File Flow: Input passthrough. Specified files are replaced by their signature. INPUT ====> INPUT Examples: action=RemoveIntermediateFiles('${OUTPUT200}') action=RemoveIntermediateFiles('${OUTPUT200} ${OUTPUT330}') action=RemoveIntermediateFiles(['${OUTPUT200}', '${OUTPUT330}']) ''' def __init__(self, files): '''Replace ``files`` with their signatures. This pipeline passes its input to output and does not change the flow of pipeline. Parameters: files (string or list of strings) One or more files to be removed. Multiple files can be specified in the same string if they are separated by spaces. ''' if isinstance(files, str): self.files_to_remove = [files] else: self.files_to_remove = files PipelineAction.__init__(self) def _getFiles(self): for name in self.files_to_remove: files = shlex.split(name) for f in files: yield f def __call__(self, ifiles, pipeline=None): env.logger.trace('Remove intermediate files {}'.format(' '.join( self.files_to_remove))) for f in self._getFiles(): if not os.path.isfile(f): if os.path.isfile(f + '.file_info'): env.logger.info('Keeping existing {}.file_info.'.format(f)) else: raise RuntimeError( 'Failed to create {}.file_info: Missing input file.' .format(f)) else: FileInfo(f).save() env.logger.info('Replace {0} with {0}.file_info'.format(f)) try: os.remove(f) except Exception: env.logger.warning( 'Failed to remove intermediate file {}'.format(f)) return ifiles class LinkToDir(PipelineAction): '''Create hard links of input files to a specified directory. This is usually used to link input files to a common cache directory so that all operations can be performed on that directory. File Flow: Link input files to specified destination directory. INPUT == LINK ==> DEST_DIR/INPUT Examples: action=LinkToDir('cache') ''' def __init__(self, dest_dir): ''' Parameters: dest_dir (string): A directory to which input files will be linked to. The directory will be created if it does not exist. ''' self.dest = dest_dir if not os.path.isdir(self.dest): env.logger.info('Creating directory {}'.format(self.dest)) try: os.makedirs(self.dest) except Exception as e: raise RuntimeError('Failed to create directory {}: {}'.format( self.dest, e)) if not os.path.isdir(self.dest): raise RuntimeError('Failed to create directory {}'.format( self.dest)) PipelineAction.__init__(self) def __call__(self, ifiles, pipeline=None): ofiles = [] for filename in ifiles: path, basename = os.path.split(filename) if not os.path.isfile(filename): if os.path.isfile(filename + '.file_info'): dest_file = os.path.join(self.dest, basename) + '.file_info' if os.path.isfile(dest_file): if not os.path.samefile(filename + '.file_info', dest_file): os.remove(dest_file) env.logger.info('Linking {} to {}'.format( filename, self.dest)) os.link( filename + '.file_info', os.path.join(self.dest, basename) + '.file_info') else: env.logger.trace( 'Reusing existing linked file_info file: {}' .format( os.path.join(self.dest, basename) + '.file_info')) else: env.logger.info('Linking {} to {}'.format( filename, self.dest)) os.link( filename + '.file_info', os.path.join(self.dest, basename) + '.file_info') else: raise RuntimeError( 'Failed to link {} to directory {}: file does not exist' .format(filename, self.dest)) else: dest_file = os.path.join(self.dest, basename) if os.path.isfile(dest_file): if not os.path.samefile(filename, dest_file): os.remove(dest_file) env.logger.info('Linking {} to {}'.format( filename, self.dest)) os.link(filename, dest_file) else: env.logger.trace( 'Reusing existing linked file: {}'.format( dest_file)) else: env.logger.info('Linking {} to {}'.format( filename, self.dest)) os.link(filename, dest_file) ofiles.append(os.path.join(self.dest, basename)) return ofiles class DownloadResource(PipelineAction): '''Download resources to specified destination directory. dest_dir can be a full path name or a directory relative to $local_resource/pipeline_resource where $local_resource is the local resource directory of the project (default to ~/.variant_tools, see runtime option local_resource for details). The default pipeline resource directory is $local_resource/pipeline_resource/NAME where NAME is the name of the pipeline. File Flow: Input passthrough. INPUT ====> INPUT Examples: action=DownloadResource(resource='ftp://igenome:[email protected]/Homo_sapiens/UCSC/hg19/Homo_sapiens_UCSC_hg19.tar.gz', dest_dir="${LOCAL_RESOURCE}/iGenomes") action=DownloadResource(resource='ftp://[email protected]/bundle/2.8/hg19/1000G_omni2.5.hg19.sites.vcf.gz ftp://[email protected]/bundle/2.8/hg19/1000G_omni2.5.hg19.sites.vcf.gz.md5', dest_dir='${LOCAL_RESOURCE/GATK') NOTE: 1. If FILE.md5 file is downloaded, it will be used to validate FILE. 2. The resources will be automatically decompressed if decompress=True (default). You would get both FILE and FILE.gz if you downloaded FILE.gz. ''' def __init__(self, resource, dest_dir, output=[], decompress=True): '''Download resources from specified URLs in ``resource``. Parameters: dest_dir: Directory where the downloaded resources will be placed. ''' self.resource = [x for x in resource.split() if x] if not dest_dir or type(dest_dir) != str: raise ValueError('Invalid resource directory {}'.format(dest_dir)) else: self.pipeline_resource = os.path.expanduser(dest_dir) try: if not os.path.isdir(self.pipeline_resource): os.makedirs(self.pipeline_resource) except: raise RuntimeError( 'Failed to create pipeline resource directory '.format( self.pipeline_resource)) self.decompress = decompress PipelineAction.__init__( self, cmd='Download Resource {} to {}'.format(resource, dest_dir), output=output) def __call__(self, ifiles, pipeline=None): saved_dir = os.getcwd() os.chdir(self.pipeline_resource) ofiles, md5files = self._downloadFiles(ifiles) self._validate(md5files) os.chdir(saved_dir) return ofiles def _validate(self, md5_files): if md5_files: prog = ProgressBar('Validating md5 signature', sum([x[1] for x in md5_files])) mismatched_files = [] for filename, s in md5_files: try: downloaded_md5 = open(filename + '.md5').readline().split()[0] calculated_md5 = calculateMD5(filename, partial=False) if downloaded_md5 != calculated_md5: mismatched_files.append(filename) except Exception as e: env.logger.warning( 'Failed to verify md5 signature of {}: {}'.format( filename[:-4], e)) prog.update(prog.count + s) prog.done() if mismatched_files: env.logger.warning('md5 signature of {} mismatch. ' 'Please remove {} and try again.'.format( ', '.join(mismatched_files), 'this file' if len(mismatched_files) == 1 else 'these files')) def _downloadFiles(self, ifiles): '''Download resource''' # decompress all .gz files skipped = [] md5_files = [] for cnt, URL in enumerate(sorted(self.resource)): filename = URL.rsplit('/', 1)[-1] dest_file = os.path.join(self.pipeline_resource, filename) try: if os.path.isfile(dest_file): skipped.append(filename) else: downloadURL( URL, dest_file, False, message='{}/{} {}'.format(cnt + 1, len(self.resource), filename)) except KeyboardInterrupt as e: raise e except Exception as e: raise RuntimeError('Failed to download {}: {} {}'.format( filename, type(e).__name__, e)) # if filename.endswith('.tar.gz'): manifest_file = RuntimeFiles(filename).manifest env.logger.trace('Checking manifest {}'.format(manifest_file)) decompress = not os.path.isfile(manifest_file) if not decompress: with open(manifest_file) as mf: for item in mf: if not os.path.isfile(item.strip()): decompress = True break if decompress: with tarfile.open(filename, 'r:gz') as tar: s = delayedAction(env.logger.info, 'Extracting {}'.format(filename)) tar.extractall(self.pipeline_resource) del s # only extract files files = [x.name for x in tar.getmembers() if x.isfile()] # save content to a manifest with open(manifest_file, 'w') as manifest: for f in files: manifest.write(f + '\n') elif filename.endswith('.gz'): if not existAndNewerThan(ofiles=filename[:-3], ifiles=filename): s = delayedAction(env.logger.info, 'Decompressing {}'.format(filename)) decompressGzFile(filename, inplace=False, force=True) del s elif filename.endswith('.zip'): manifest_file = RuntimeFiles(filename).manifest env.logger.trace('Checking manifest {}'.format(manifest_file)) decompress = not os.path.isfile(manifest_file) if not decompress: with open(manifest_file) as mf: for item in mf: if not os.path.isfile(item.strip()): decompress = True break if decompress: env.logger.trace('Decompressing {}'.format(filename)) s = delayedAction(env.logger.info, 'Decompressing {}'.format(filename)) bundle = zipfile.ZipFile(filename) bundle.extractall(os.path.dirname(filename)) with open(manifest_file, 'w') as manifest: for f in bundle.namelist(): manifest.write(f + '\n') del s # if filename.endswith('.md5') and os.path.isfile(filename[:-4]): md5_files.append( [filename[:-4], os.path.getsize(filename[:-4])]) if skipped: env.logger.info('Using {} existing resource files under {}.'.format( ', '.join(skipped), self.pipeline_resource)) return ifiles, md5_files class _CaseInsensitiveDict(MutableMapping): """A case-insensitive ``dict``-like object that 1. limits the type of items to string or list of strings. 2. returns '' is the key does not exist (yield a warning) 3. allows attribute-like access """ def __init__(self, data=None, **kwargs): self._store = dict() if data is None: data = {} self.update(data, **kwargs) def __setitem__(self, key, value): # Use the uppercased key for lookups, but store the actual # key alongside the value. # and value != self._store[key.upper()][1] else 'set' reset = 'reset' if key.upper() in self._store else 'set' if not isinstance(value, (str, list, tuple)): value = str(value) env.logger.warning( 'Pipeline variable {} is converted to "{}"'.format(key, value)) if isinstance( value, (list, tuple)) and not all([isinstance(x, str) for x in value]): raise ValueError( 'Only string or list of strings are allowed for pipeline variables: {} for key {}' .format(value, key)) self._store[key.upper()] = (key, value) if isinstance(value, str) or len(value) <= 2 or len(str(value)) < 50: # if not inputXXX, outputXXX ... (these variables are not recommended to use) if re.match('^(input|INPUT|output|OUTPUT)\d+$', key) is None: env.logger.debug( 'Pipeline variable ``{}`` is {} to ``{}``'.format( key, reset, str(value))) else: # should be a list or tuple val = str(value).split(' ')[0] + \ ' ...] ({} items)'.format(len(value)) # if not inputXXX, outputXXX ... (these variables are not recommended to use) if re.match('^(input|INPUT|output|OUTPUT)\d+$', key) is None: env.logger.debug( 'Pipeline variable ``{}`` is {} to ``{}``'.format( key, reset, val)) def __contains__(self, key): return key.upper() in self._store def dict(self): return {x: y for x, y in self._store.values()} def __setattr__(self, key, value): if key == '_store': self.__dict__[key] = value else: self.__setitem__(key, value) def __getattr__(self, key): return self.__getitem__(key) def __getitem__(self, key): return self._store[key.upper()][1] def __delitem__(self, key): del self._store[key.upper()] def __iter__(self): return (casedkey for casedkey, mappedvalue in self._store.values()) def __len__(self): return len(self._store) def upper_items(self): """Like iteritems(), but with all uppercase keys.""" return ( (upperkey, keyval[1]) for (upperkey, keyval) in self._store.items()) def __eq__(self, other): if isinstance(other, collections.Mapping): other = _CaseInsensitiveDict(other) else: return NotImplemented # Compare insensitively return dict(self.upper_items()) == dict(other.upper_items()) # Copy is required def copy(self): return _CaseInsensitiveDict(self._store.values()) def __repr__(self): return '%s(%r)' % (self.__class__.__name__, dict(self.items())) class Pipeline: '''The Variant Tools pipeline class. Its instance will be passed to each action to provide runtime information. An action should not change any attribute of the pipeline, except for setting additional variables through its ``VARS`` dictionary. Note that VARS is a case-insensitive dictionary but it is generally recommended to use CAPTICAL names for pipeline variables. ''' def __init__(self, name, extra_args=[], pipeline_type='pipeline', verbosity=None, jobs=1): self.pipeline = PipelineDescription(name, extra_args, pipeline_type) self.spec_file = self.pipeline.spec_file self.verbosity = verbosity self.jobs = jobs def limit_steps(self, psteps, allowed_steps): '''Restrict steps of a pipeline using allowed_steps''' all_steps = {int(x.index): False for x in psteps} # for item in allowed_steps.split(','): # remove space item = ''.join([x for x in item if x != ' ']) if item.isdigit(): # pipeline:100 all_steps[int(item)] = True elif '-' in item and item.count('-') == 1: l, u = item.split('-') if (l and not l.isdigit()) or (u and not u.isdigit()) or \ (l and u and int(l) > int(u)): raise ValueError( 'Invalid pipeline step item {}'.format(item)) # pipeline:-100, pipeline:100+ or pipeline:10-100 if not l: l = min(all_steps.keys()) if not u: u = max(all_steps.keys()) # for key in all_steps.keys(): if key >= int(l) and key <= int(u): all_steps[key] = True else: raise ValueError('Invalid pipeline step item {}'.format(item)) # # disable limited steps for idx in range(len(psteps)): if not all_steps[int(psteps[idx].index)]: psteps[idx].options.append('skip') env.logger.warning('Steps {} are skipped due to restriction {}'.format( ','.join([str(x) for x in all_steps.keys() if not all_steps[x]]), allowed_steps)) def execute(self, pname, **kwargs): allowed_steps = None if not pname: pname = '' else: if ':' in pname: pname, allowed_steps = pname.split(':', 1) if not pname: if len(self.pipeline.pipelines) == 1: pname = list(self.pipeline.pipelines.keys())[0] elif 'default' in self.pipeline.pipelines: pname = 'default' else: raise ValueError( 'Name of pipeline should be specified because ' '{}.pipeline defines more than one pipelines without a default one. ' 'Available pipelines are: {}.'.format( self.pipeline.name, ', '.join(self.pipeline.pipelines.keys()))) elif pname not in self.pipeline.pipelines.keys(): raise ValueError('Pipeline {} is undefined in configuraiton file ' '{}. Available pipelines are: {}'.format( pname, self.pipeline.name, ', '.join(self.pipeline.pipelines.keys()))) # psteps = self.pipeline.pipelines[pname] if allowed_steps is not None: self.limit_steps(psteps, allowed_steps) # # the project will be opened when needed with Project( mode=['ALLOW_NO_PROJ', 'READ_ONLY'], verbosity=self.verbosity) as proj: self.VARS = _CaseInsensitiveDict( home=os.path.expanduser('~'), temp_dir=env.temp_dir, cache_dir=env.cache_dir, local_resource=env.local_resource, ref_genome_build=proj.build if proj.build is not None else '', pipeline_name=pname, spec_file=self.spec_file, model_name=pname, vtools_version=proj.version, working_dir=os.getcwd(), pipeline_format=self.pipeline.pipeline_format) if not os.path.isdir(env.cache_dir): os.makedirs(env.cache_dir) # these are command line options if float(self.pipeline.pipeline_format) <= 1.0: if 'cmd_input' in self.pipeline.commandline_opts: if not self.pipeline.commandline_opts['cmd_input']: self.pipeline.commandline_opts['cmd_input'] = [] else: self.pipeline.commandline_opts[ 'cmd_input'] = self.pipeline.commandline_opts[ 'cmd_input'].split(',') if 'cmd_output' in self.pipeline.commandline_opts: if not self.pipeline.commandline_opts['cmd_output']: self.pipeline.commandline_opts['cmd_output'] = [] else: self.pipeline.commandline_opts[ 'cmd_output'] = self.pipeline.commandline_opts[ 'cmd_output'].split(',') self.VARS.update(self.pipeline.commandline_opts) self.VARS.update({k: str(v) for k, v in kwargs.items()}) if 'cmd_input' not in self.VARS: self.VARS['cmd_input'] = [] if 'cmd_output' not in self.VARS: self.VARS['cmd_output'] = [] # if there is a output file, write log to .log if self.VARS['cmd_output'] and 'logfile' not in self.VARS: self.VARS['logfile'] = self.VARS['cmd_output'][0] + '.log' # self.GLOBALS = {} self.GLOBALS.update(globals()) self.THREADS = {} # we need to put self.pipeline.pipeline_vars in self.VARS because # they might refer to each other self.VARS.update(self.pipeline.pipeline_vars) for key, val in self.pipeline.pipeline_vars.items(): # if key in ('vtools_version', 'spec_file', 'home', 'pipeline_name', 'model_name'): # raise ValueError('Cannot reset read-only pipeline variable {}'.format(key)) self.VARS[key] = substituteVars( val, self.VARS, self.GLOBALS, asString=False) for key, val in self.VARS.items(): if key == 'working_dir' and val != os.getcwd(): env.logger.warning( 'Changing working directory to {}'.format(val)) os.chdir(val) if key == 'cache_dir' and val != env.cache_dir: env.logger.warning('Changing cache directory to {}'.format(val)) env.cache_dir = val # if 'logfile' in self.VARS: env.logger.info('Logging information is saved to {}'.format( self.VARS['logfile'])) if '/' in self.VARS['logfile']: d = os.path.split(self.VARS['logfile'])[0] if not os.path.isdir(d): env.logger.info( 'Making directory {} for output file'.format(d)) os.makedirs(d) ch = logging.FileHandler(self.VARS['logfile'].lstrip('>'), mode='a') ch.setLevel(logging.DEBUG) ch.setFormatter( logging.Formatter('%(asctime)s: %(levelname)s: %(message)s')) env.logger.addHandler(ch) # ifiles = self.VARS['cmd_input'] step_index = 0 rewind_count = 0 while True: # step_index can jump back and forth depending on the # execution status of each step command = psteps[step_index] if 'skip' in command.options: step_index += 1 env.logger.info('Step {}.{}_{} is skipped'.format( self.pipeline.name, pname, command.index)) if step_index == len(psteps): break step_output = [] continue self.VARS['pipeline_step'] = command.index env.logger.info('Executing ``{}.{}_{}``: {}'.format( self.pipeline.name, pname, command.index, ' '.join(command.comment.split()))) # init for key, val in command.init_action_vars: self.VARS[key] = substituteVars( val, self.VARS, self.GLOBALS, asString=False) # substitute ${} variables emitter = None if 'no_input' in command.options or 'independent' in command.options: step_input = [] step_named_input = [] elif command.input is None or not command.input.strip(): step_input = ifiles step_named_input = [['', ifiles]] else: command_input_line = substituteVars(command.input, self.VARS, self.GLOBALS) if ':' in command_input_line: input_line, emitter_part = command_input_line.split(':', 1) else: input_line = command_input_line emitter_part = '' # if not input_line.strip(): step_input = ifiles step_named_input = [['', ifiles]] else: # look for pattern of name=filenames pieces = re.split('([\w\d_]+\s*=)', input_line) step_named_input = [] for piece in pieces: if not piece.strip(): continue if piece.endswith('='): step_named_input.append([piece[:-1].strip(), []]) else: expanded_files = sum([ glob.glob(os.path.expanduser(x)) for x in shlex.split(piece) ], []) if not expanded_files: raise ValueError( '{} does not expand to any valid file.' .format(piece)) if not step_named_input: step_named_input.append(['', expanded_files]) else: step_named_input[-1][1].extend(expanded_files) # step_input = sum( [x[1] for x in step_named_input if x[0] == ''], []) if not step_input: step_input = ifiles if emitter_part: try: # remove ${INPUT} because it is determined by the emitter if 'input' in self.VARS: self.VARS.pop('input') emitter = eval( 'EmitInput({})'.format( substituteVars(emitter_part, self.VARS, self.GLOBALS)), globals(), self.GLOBALS) except Exception as e: raise ValueError( 'Failed to interpret input emit options "{}"' .format(e)) # # self.VARS['input{}'.format(command.index)] = step_input self.VARS['input'] = step_input self.step_dependent_files = [] for n, f in step_named_input: if n: self.VARS['input{}_{}'.format(command.index, n)] = f self.VARS['input_{}'.format(n)] = f self.step_dependent_files.extend(f) if self.step_dependent_files: env.logger.debug('Step dependent files are {}'.format(', '.join( self.step_dependent_files))) # saved_dir = os.getcwd() for opt in command.options: matched = re.match('^input_alias\s*=\s*([\w\d_]+)$', opt) if matched: self.VARS[matched.group(1)] = step_input for n, f in step_named_input: if n: self.VARS['{}_{}'.format(matched.group(1), n)] = f matched = re.match('^working_dir\s*=\s*(\S+)$', opt) if matched: working_dir = os.path.expanduser(matched.group(1)) if not os.path.isdir(working_dir): raise ValueError( 'Invalid working directory: {}'.format(working_dir)) env.logger.info( 'Use working directory ``{}`` for {}_{}'.format( working_dir, pname, command.index)) os.chdir(working_dir) # env.logger.trace('INPUT of step {}_{}: {}'.format( pname, command.index, step_input)) # # now, group input files if not command.input_emitter: if emitter is None: # if not defined in input: emitter = EmitInput() else: if emitter is not None: raise ValueError( 'Cannot define input emitter in both input and input_emitter' ) try: # ${CMD_INPUT} etc can be used. emitter = eval( substituteVars(command.input_emitter, self.VARS, self.GLOBALS), globals(), self.GLOBALS) except Exception as e: raise RuntimeError( 'Failed to group input files: {}'.format(e)) # pass Pipeline itself to emitter igroups, ivars, step_output = emitter(step_input, self) try: for ig, iv in zip(igroups, ivars): if ig != self.VARS['input']: self.VARS['input'] = ig if not ig and float(self.pipeline.pipeline_format) <= 1.0: env.logger.trace( 'Step skipped due to no input file (for pipeline format < 1.0 only)' ) continue for key, val in iv.items(): self.VARS[key] = substituteVars( val, self.VARS, self.GLOBALS, asString=False) # pre action variables are evaluated for each ig because they might involve # changing ${input} for key, val in command.pre_action_vars: self.VARS[key] = substituteVars( val, self.VARS, self.GLOBALS, asString=False) action = substituteVars(command.action, self.VARS, self.GLOBALS) env.logger.trace('Emitted input of step {}_{}: {}'.format( pname, command.index, ig)) env.logger.trace('Action of step {}_{}: {}'.format( pname, command.index, action)) # check if the input file is ready. This is used for # parallel execution of the pipeline while the input file # might be worked on by another job for ifile in ig: # is ifile in any of the output files? if ifile in self.THREADS: # wait for the thread to complete env.logger.info( 'Waiting for the input file {} to be available.' .format(ifile)) # while self.THREADS[ifile].isAlive(): ret = self.THREADS[ifile].join() # thread closed, remove from self.THREADS self.THREADS.pop(ifile) if ret: raise RuntimeError( 'Failed to generate {}: {}'.format( ifile, ret)) if not (os.path.isfile(ifile) or os.path.isfile(ifile + '.file_info')): #raise RewindExecution(ifile) raise RuntimeError( 'Non-existent input file {} due to ongoing or failed background job' .format(ifile)) # if not action.strip(): action = 'NullAction()' action = eval('(' + action + ')', globals(), self.GLOBALS) if isinstance(action, (tuple, list)): action = SequentialActions(action) if not issubclass(action.__class__, PipelineAction): env.logger.warning( 'Pipeline action {} is not a subclass of PipelineAction' .format(action.__class__)) # pass the Pipeline object itself to action # this allows the action to have access to pipeline variables # and other options if 'blocking' in command.options: self.runtime = RuntimeFiles('{}_{}'.format( pname, command.index)) with SharedProcess(self.runtime): ofiles = action(ig, self) else: ofiles = action(ig, self) if type(ofiles) == str: step_output.append(ofiles) else: step_output.extend(ofiles) # wait for all pending jobs to finish self.VARS['output{}'.format(command.index)] = step_output self.VARS['output'.format(command.index)] = step_output for opt in command.options: matched = re.match('^output_alias\s*=\s*([\w\d_]+)$', opt) if matched: env.logger.debug('Setting variable {} to {}'.format( matched.group(1), step_output)) self.VARS[matched.group(1)] = step_output env.logger.trace('OUTPUT of step {}_{}: {}'.format( pname, command.index, step_output)) for f in step_output: if not (os.path.isfile(f) or os.path.isfile(f + '.file_info') or f in self.THREADS): raise RuntimeError( 'Output file {} does not exist after ' 'completion of step {}_{} (working directory: {})' .format(f, pname, command.index, os.getcwd())) for key, val in command.post_action_vars: self.VARS[key] = substituteVars( val, self.VARS, self.GLOBALS, asString=False) # # In case of passthrough, the original input files will be passed to # the next step regardless what has been produced during the step. if 'independent' not in command.options: ifiles = step_output # this step is successful, go to next os.chdir(saved_dir) step_index += 1 env.logger.debug( 'Step {}.{}_{} is executed successfully.'.format( self.pipeline.name, pname, command.index)) if step_index == len(psteps): break except RewindExecution: rewind_count += 1 if rewind_count >= 3: raise RuntimeError( 'Failed to execute pipeline {}.{}: excessive ' 'rewind during execution.'.format( self.pipeline.name, pname)) # unfortunately, an input file has been removed (replaced by .file_info) but # a later steps need it. We will have to figure out how to create this # file by looking backward ... to_be_regenerated = [ x for x in step_input if not os.path.isfile(x) ] # we need to check if this file is actually generated at all before # otherwise a misspecified input file would cause the whole pipline # to start from step 1 again and again all_input_and_output_files = [] for k, v in self.VARS.items(): if ((k.startswith('INPUT') and k[5:].isdigit() and int(k[5:]) < int(command.index)) or (k.startswith('OUTPUT') and k[6:].isdigit() and int(k[6:]) < int(command.index))) and \ isinstance(v, (tuple, list)): all_input_and_output_files.extend(v) # for x in to_be_regenerated: if x not in all_input_and_output_files: raise RuntimeError( 'Specified input file "{}" does not exist and is not ' 'generated by any previous step.'.format(x)) # remove all fony files so that they will be re-generated if os.path.isfile(x + '.file_info'): os.remove(x + '.file_info') remaining = [x for x in to_be_regenerated] env.logger.debug('Missing input file {}'.format( ', '.join(remaining))) while step_index > 0: step_index -= 1 command = psteps[step_index] # remove all fony files so that they will be re-generated for x in self.VARS['input{}'.format(command.index)]: if os.path.isfile(x + '.file_info'): env.logger.debug( 'Remove file info {}'.format(x + '.file_info')) os.remove(x + '.file_info') # if any of the input file does not exist, go back further if not all([ os.path.isfile(x) for x in self.VARS['input{}'.format(command.index)] ]): env.logger.debug( 'Not all input files are available: {}'.format( ', '.join(self.VARS['input{}'.format( command.index)]))) continue # check if a real file can be generated at this step remaining = [ x for x in remaining if x not in self.VARS['output{}'.format(command.index)] ] if not remaining: break if step_index > 1: ifiles = self.VARS['output{}'.format(psteps[step_index - 1].index)] else: ifiles = self.VARS['cmd_input'] env.logger.warning( 'Rewinding to ``{}.{}_{}``: input files {} need to be re-generated.' .format(self.pipeline.name, pname, command.index, ', '.join(to_be_regenerated))) os.chdir(saved_dir) except Exception as e: env.logger.debug('Failed to execute step {}.{}_{}.'.format( self.pipeline.name, pname, command.index)) raise RuntimeError('Failed to execute step {}_{}: {}'.format( pname, command.index, e)) # # clear variables that are local to step for n, f in step_named_input: if n: self.VARS.pop('input_{}'.format(n)) # # at the end of pipeline wait for all threads to complete if self.THREADS: for k, v in self.THREADS.items(): env.logger.trace('Waiting for {} to be completed.'.format(k)) while v.isAlive(): v.join(5) # thread closed, remove from self.THREADS self.THREADS.pop(ifile) env.logger.info( 'Execution of pipeline {}.{} is successful with output {}'.format( self.pipeline.name, pname, ', '.join(step_output))) def executeArguments(parser): parser.add_argument( 'specfile', metavar='SPECFILE', help='''Name of a pipeline configuration file, which can be a path to a .pipeline file (with or without extension) or one of the online pipelines listed by command "vtools show pipelines". For backward compatibility, if no input and output files are specified (options --input and --output), values of this option is treated as a SQL query that will be executed against the project database, with project genotype database attached as "genotype" and annotation databases attached by their names.''') parser.add_argument( 'pipelines', nargs='*', metavar='PIPELINES', help='''Name of one or more pipelines defined in SPECFILE, which can be ignored if the SPECFILE only defines one pipeline. One or more steps can be specified in the form of 'pipeline:5' (step_5 only), 'pipeline:-5' (up to step 5), 'pipeline:5-' (from step 5), 'pipeline:2,5' (step 2 and 5), 'pipeline:2-5' (step 2 to 5). This essentially adds an option "skip" to the unselected pipeline steps and it is up to the user to ensure that the pipeline is executable with only a subset of steps. Please use command "vtools show pipeline SPECFILE" for details of available pipelines in SPECFILE, including pipeline-specific parameters that could be used to change the default behavior of the pipelines.''') parser.add_argument( '-j', '--jobs', default=1, type=int, help='''Execute the pipeline in parallel model if a number other than 1 is specified. In this mode, the RunCommand action will create a shell script and submit the job using a command specified by option ``submitter``, if this parameter is defined.''') parser.add_argument( '-d', '--delimiter', default='\t', help='''Delimiter used to output results of a SQL query.''') def execute(args): # to keep backward compatibility, the vtools execute command # can execute a SQL query and a pipeline def executeQuery(): with Project(verbosity=args.verbosity) as proj: # if there is no output, if proj.store == "sqlite": proj.db.attach('{}_genotype'.format(proj.name), 'genotype') # for backward compatibility proj.db.attach('{}_genotype'.format(proj.name)) cur = proj.db.cursor() # query = args.specfile + ' ' + ' '.join(args.pipelines) if query.upper().startswith('SELECT'): env.logger.trace('Analyze statement: "{}"'.format(query)) cur.execute('EXPLAIN QUERY PLAN ' + query) for rec in cur: env.logger.trace('\t'.join([str(x) for x in rec])) # really execute the query try: cur.execute(query) except Exception as e: raise RuntimeError( 'Failed to execute SQL query "{}": {}'.format(query, e)) proj.db.commit() sep = args.delimiter for rec in cur: print(sep.join(['{}'.format(x) for x in rec])) # def executePipeline(): pipeline = Pipeline( args.specfile, extra_args=args.unknown_args, verbosity=args.verbosity, jobs=args.jobs) # unspecified if not args.pipelines: pipeline.execute(None) else: for name in args.pipelines: pipeline.execute(name) # try: env.verbosity = args.verbosity env.logger = None # definitely a pipeline if args.specfile.endswith('.pipeline') or args.unknown_args: executePipeline() # definitely a sql query elif args.delimiter != '\t': executeQuery() else: try: # try to execute as a SQL query executeQuery() except (RuntimeError, ValueError) as e: env.logger.debug('Failed to execute {} as SQL query: {}'.format( ' '.join(args.pipelines), e)) executePipeline() except Exception as e: env.unlock_all() env.logger.error(e) sys.exit(1) # # vtools simulate is implemented using the pipeline execution mechanism. It # essentially execute a pipeline that calls various simulation functions # to simulate data. # def simulateArguments(parser): parser.add_argument( 'specfile', metavar='SPECFILE', help='''Name of a model specification file, which can be the name of an online specification file, or path to a local .pipeline file. Please use command "vtools show simulations" to get a list all available simulation models.''') parser.add_argument( 'models', nargs='*', metavar='MODELS', help='''Name of one or more simulation models defined in SPECFILE, which can be ignored if the SPECFILE only defines one simulation model. Please use command "vtools show simulation SPECFILE" for details of available models in SPECFILE, including model-specific parameters that could be used to change the default behavior of these models.''' ) parser.add_argument( '--seed', type=int, help='''Random seed for the simulation. A random seed will be used by default but a specific seed could be used to reproduce a previously executed simulation.''') parser.add_argument( '--replicates', default=1, type=int, help='''Number of consecutive replications to simulate''') parser.add_argument( '-j', '--jobs', default=1, type=int, help='''Maximum number of concurrent jobs to execute, for steps of a pipeline that allows multi-processing.''') def simulate_replicate(args, rep): try: env.verbosity = args.verbosity env.logger = None # step 1, create a simulation configuration file. model_name = os.path.basename(args.specfile).split('.', 1)[0] if args.seed is None: args.seed = random.randint(1, 2**32 - 1) if not os.path.isdir(env.cache_dir): os.mkdir(env.cache_dir) # set random seed of simulators random.seed(args.seed + rep) if not args.models: cfg_file = '{}/{}_{}.cfg'.format(env.cache_dir, model_name, args.seed + rep) else: cfg_file = '{}/{}_{}_{}.cfg'.format(env.cache_dir, model_name, '_'.join(args.models), args.seed + rep) # with open(cfg_file, 'w') as cfg: cfg.write('model={} {}\n'.format(args.specfile, ' '.join(args.models))) cfg.write('seed={}\n'.format(args.seed + rep)) if '--seed' in sys.argv: # skip the seed option so to stop pipeline from distinguishing the two commands cmd_args = sys.argv[:sys.argv.index( '--seed')] + sys.argv[sys.argv.index('--seed') + 2:] cfg.write("command=vtools {}\n".format( subprocess.list2cmdline(cmd_args[1:]))) else: cfg.write("command={}\n".format(env.command_line)) # env.logger.info('Starting simulation ``{}``'.format(cfg_file)) opt = args.unknown_args opt.extend(['--input'] + [cfg_file]) pipeline = Pipeline( args.specfile, extra_args=opt, pipeline_type='simulation', verbosity=args.verbosity, jobs=args.jobs) # using a pool of simulators if not args.models: pipeline.execute(None, seed=args.seed + rep) else: for name in args.models: pipeline.execute(name, seed=args.seed + rep) except Exception as e: env.logger.error( 'Failed to simulate replicate {} of model {}: {}'.format( rep, model_name, e)) sys.exit(1) def simulate(args): # try: ret = 0 if args.replicates <= 0: raise ValueError('No replication studies is requested.') # for rep in range(args.replicates): p = Process(target=simulate_replicate, args=(args, rep)) p.start() p.join() if ret == 0: ret = p.exitcode # return fail if any of the replicates fails sys.exit(ret) except Exception as e: env.logger.error(e) sys.exit(1) if __name__ == '__main__': pass
PypiClean
/django_graphql_jwt_using_rest_framework_jwt-1.2.0-py3-none-any.whl/graphql_jwt/refresh_token/models.py
import binascii import os from calendar import timegm from django.conf import settings from django.db import models from django.utils import timezone from django.utils.translation import gettext_lazy as _ from ..settings import jwt_settings from . import managers, signals class AbstractRefreshToken(models.Model): id = models.BigAutoField(primary_key=True) user = models.ForeignKey( settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name="refresh_tokens", verbose_name=_("user"), ) token = models.CharField(_("token"), max_length=255, editable=False) created = models.DateTimeField(_("created"), auto_now_add=True) revoked = models.DateTimeField(_("revoked"), null=True, blank=True) objects = managers.RefreshTokenQuerySet.as_manager() class Meta: abstract = True verbose_name = _("refresh token") verbose_name_plural = _("refresh tokens") unique_together = ("token", "revoked") def __str__(self): return self.token def save(self, *args, **kwargs): if not self.token: self.token = self._cached_token = self.generate_token() super().save(*args, **kwargs) def generate_token(self): return binascii.hexlify( os.urandom(jwt_settings.JWT_REFRESH_TOKEN_N_BYTES), ).decode() def get_token(self): if hasattr(self, "_cached_token"): return self._cached_token return self.token def is_expired(self, request=None): orig_iat = timegm(self.created.timetuple()) return jwt_settings.JWT_REFRESH_EXPIRED_HANDLER(orig_iat, request) def revoke(self, request=None): self.revoked = timezone.now() self.save(update_fields=["revoked"]) signals.refresh_token_revoked.send( sender=AbstractRefreshToken, request=request, refresh_token=self, ) def reuse(self, request=None): self.token = "" self.created = timezone.now() self.save(update_fields=["token", "created"]) class RefreshToken(AbstractRefreshToken): """RefreshToken default model"""
PypiClean
/zope.html-2.4.2.zip/zope.html-2.4.2/src/zope/html/fckeditor/2.6.4.1/fckeditor/editor/_source/classes/fckw3crange.js
/* * FCKeditor - The text editor for Internet - http://www.fckeditor.net * Copyright (C) 2003-2009 Frederico Caldeira Knabben * * == BEGIN LICENSE == * * Licensed under the terms of any of the following licenses at your * choice: * * - GNU General Public License Version 2 or later (the "GPL") * http://www.gnu.org/licenses/gpl.html * * - GNU Lesser General Public License Version 2.1 or later (the "LGPL") * http://www.gnu.org/licenses/lgpl.html * * - Mozilla Public License Version 1.1 or later (the "MPL") * http://www.mozilla.org/MPL/MPL-1.1.html * * == END LICENSE == * * This class partially implements the W3C DOM Range for browser that don't * support the standards (like IE): * http://www.w3.org/TR/DOM-Level-2-Traversal-Range/ranges.html */ var FCKW3CRange = function( parentDocument ) { this._Document = parentDocument ; this.startContainer = null ; this.startOffset = null ; this.endContainer = null ; this.endOffset = null ; this.collapsed = true ; } FCKW3CRange.CreateRange = function( parentDocument ) { // We could opt to use the Range implementation of the browsers. The problem // is that every browser have different bugs on their implementations, // mostly related to different interpretations of the W3C specifications. // So, for now, let's use our implementation and pray for browsers fixings // soon. Otherwise will go crazy on trying to find out workarounds. /* // Get the browser implementation of the range, if available. if ( parentDocument.createRange ) { var range = parentDocument.createRange() ; if ( typeof( range.startContainer ) != 'undefined' ) return range ; } */ return new FCKW3CRange( parentDocument ) ; } FCKW3CRange.CreateFromRange = function( parentDocument, sourceRange ) { var range = FCKW3CRange.CreateRange( parentDocument ) ; range.setStart( sourceRange.startContainer, sourceRange.startOffset ) ; range.setEnd( sourceRange.endContainer, sourceRange.endOffset ) ; return range ; } FCKW3CRange.prototype = { _UpdateCollapsed : function() { this.collapsed = ( this.startContainer == this.endContainer && this.startOffset == this.endOffset ) ; }, // W3C requires a check for the new position. If it is after the end // boundary, the range should be collapsed to the new start. It seams we // will not need this check for our use of this class so we can ignore it for now. setStart : function( refNode, offset ) { this.startContainer = refNode ; this.startOffset = offset ; if ( !this.endContainer ) { this.endContainer = refNode ; this.endOffset = offset ; } this._UpdateCollapsed() ; }, // W3C requires a check for the new position. If it is before the start // boundary, the range should be collapsed to the new end. It seams we // will not need this check for our use of this class so we can ignore it for now. setEnd : function( refNode, offset ) { this.endContainer = refNode ; this.endOffset = offset ; if ( !this.startContainer ) { this.startContainer = refNode ; this.startOffset = offset ; } this._UpdateCollapsed() ; }, setStartAfter : function( refNode ) { this.setStart( refNode.parentNode, FCKDomTools.GetIndexOf( refNode ) + 1 ) ; }, setStartBefore : function( refNode ) { this.setStart( refNode.parentNode, FCKDomTools.GetIndexOf( refNode ) ) ; }, setEndAfter : function( refNode ) { this.setEnd( refNode.parentNode, FCKDomTools.GetIndexOf( refNode ) + 1 ) ; }, setEndBefore : function( refNode ) { this.setEnd( refNode.parentNode, FCKDomTools.GetIndexOf( refNode ) ) ; }, collapse : function( toStart ) { if ( toStart ) { this.endContainer = this.startContainer ; this.endOffset = this.startOffset ; } else { this.startContainer = this.endContainer ; this.startOffset = this.endOffset ; } this.collapsed = true ; }, selectNodeContents : function( refNode ) { this.setStart( refNode, 0 ) ; this.setEnd( refNode, refNode.nodeType == 3 ? refNode.data.length : refNode.childNodes.length ) ; }, insertNode : function( newNode ) { var startContainer = this.startContainer ; var startOffset = this.startOffset ; // If we are in a text node. if ( startContainer.nodeType == 3 ) { startContainer.splitText( startOffset ) ; // Check if it is necessary to update the end boundary. if ( startContainer == this.endContainer ) this.setEnd( startContainer.nextSibling, this.endOffset - this.startOffset ) ; // Insert the new node it after the text node. FCKDomTools.InsertAfterNode( startContainer, newNode ) ; return ; } else { // Simply insert the new node before the current start node. startContainer.insertBefore( newNode, startContainer.childNodes[ startOffset ] || null ) ; // Check if it is necessary to update the end boundary. if ( startContainer == this.endContainer ) { this.endOffset++ ; this.collapsed = false ; } } }, deleteContents : function() { if ( this.collapsed ) return ; this._ExecContentsAction( 0 ) ; }, extractContents : function() { var docFrag = new FCKDocumentFragment( this._Document ) ; if ( !this.collapsed ) this._ExecContentsAction( 1, docFrag ) ; return docFrag ; }, // The selection may be lost when cloning (due to the splitText() call). cloneContents : function() { var docFrag = new FCKDocumentFragment( this._Document ) ; if ( !this.collapsed ) this._ExecContentsAction( 2, docFrag ) ; return docFrag ; }, _ExecContentsAction : function( action, docFrag ) { var startNode = this.startContainer ; var endNode = this.endContainer ; var startOffset = this.startOffset ; var endOffset = this.endOffset ; var removeStartNode = false ; var removeEndNode = false ; // Check the start and end nodes and make the necessary removals or changes. // Start from the end, otherwise DOM mutations (splitText) made in the // start boundary may interfere on the results here. // For text containers, we must simply split the node and point to the // second part. The removal will be handled by the rest of the code . if ( endNode.nodeType == 3 ) endNode = endNode.splitText( endOffset ) ; else { // If the end container has children and the offset is pointing // to a child, then we should start from it. if ( endNode.childNodes.length > 0 ) { // If the offset points after the last node. if ( endOffset > endNode.childNodes.length - 1 ) { // Let's create a temporary node and mark it for removal. endNode = FCKDomTools.InsertAfterNode( endNode.lastChild, this._Document.createTextNode('') ) ; removeEndNode = true ; } else endNode = endNode.childNodes[ endOffset ] ; } } // For text containers, we must simply split the node. The removal will // be handled by the rest of the code . if ( startNode.nodeType == 3 ) { startNode.splitText( startOffset ) ; // In cases the end node is the same as the start node, the above // splitting will also split the end, so me must move the end to // the second part of the split. if ( startNode == endNode ) endNode = startNode.nextSibling ; } else { // If the start container has children and the offset is pointing // to a child, then we should start from its previous sibling. // If the offset points to the first node, we don't have a // sibling, so let's use the first one, but mark it for removal. if ( startOffset == 0 ) { // Let's create a temporary node and mark it for removal. startNode = startNode.insertBefore( this._Document.createTextNode(''), startNode.firstChild ) ; removeStartNode = true ; } else if ( startOffset > startNode.childNodes.length - 1 ) { // Let's create a temporary node and mark it for removal. startNode = startNode.appendChild( this._Document.createTextNode('') ) ; removeStartNode = true ; } else startNode = startNode.childNodes[ startOffset ].previousSibling ; } // Get the parent nodes tree for the start and end boundaries. var startParents = FCKDomTools.GetParents( startNode ) ; var endParents = FCKDomTools.GetParents( endNode ) ; // Compare them, to find the top most siblings. var i, topStart, topEnd ; for ( i = 0 ; i < startParents.length ; i++ ) { topStart = startParents[i] ; topEnd = endParents[i] ; // The compared nodes will match until we find the top most // siblings (different nodes that have the same parent). // "i" will hold the index in the parents array for the top // most element. if ( topStart != topEnd ) break ; } var clone, levelStartNode, levelClone, currentNode, currentSibling ; if ( docFrag ) clone = docFrag.RootNode ; // Remove all successive sibling nodes for every node in the // startParents tree. for ( var j = i ; j < startParents.length ; j++ ) { levelStartNode = startParents[j] ; // For Extract and Clone, we must clone this level. if ( clone && levelStartNode != startNode ) // action = 0 = Delete levelClone = clone.appendChild( levelStartNode.cloneNode( levelStartNode == startNode ) ) ; currentNode = levelStartNode.nextSibling ; while( currentNode ) { // Stop processing when the current node matches a node in the // endParents tree or if it is the endNode. if ( currentNode == endParents[j] || currentNode == endNode ) break ; // Cache the next sibling. currentSibling = currentNode.nextSibling ; // If cloning, just clone it. if ( action == 2 ) // 2 = Clone clone.appendChild( currentNode.cloneNode( true ) ) ; else { // Both Delete and Extract will remove the node. currentNode.parentNode.removeChild( currentNode ) ; // When Extracting, move the removed node to the docFrag. if ( action == 1 ) // 1 = Extract clone.appendChild( currentNode ) ; } currentNode = currentSibling ; } if ( clone ) clone = levelClone ; } if ( docFrag ) clone = docFrag.RootNode ; // Remove all previous sibling nodes for every node in the // endParents tree. for ( var k = i ; k < endParents.length ; k++ ) { levelStartNode = endParents[k] ; // For Extract and Clone, we must clone this level. if ( action > 0 && levelStartNode != endNode ) // action = 0 = Delete levelClone = clone.appendChild( levelStartNode.cloneNode( levelStartNode == endNode ) ) ; // The processing of siblings may have already been done by the parent. if ( !startParents[k] || levelStartNode.parentNode != startParents[k].parentNode ) { currentNode = levelStartNode.previousSibling ; while( currentNode ) { // Stop processing when the current node matches a node in the // startParents tree or if it is the startNode. if ( currentNode == startParents[k] || currentNode == startNode ) break ; // Cache the next sibling. currentSibling = currentNode.previousSibling ; // If cloning, just clone it. if ( action == 2 ) // 2 = Clone clone.insertBefore( currentNode.cloneNode( true ), clone.firstChild ) ; else { // Both Delete and Extract will remove the node. currentNode.parentNode.removeChild( currentNode ) ; // When Extracting, mode the removed node to the docFrag. if ( action == 1 ) // 1 = Extract clone.insertBefore( currentNode, clone.firstChild ) ; } currentNode = currentSibling ; } } if ( clone ) clone = levelClone ; } if ( action == 2 ) // 2 = Clone. { // No changes in the DOM should be done, so fix the split text (if any). var startTextNode = this.startContainer ; if ( startTextNode.nodeType == 3 ) { startTextNode.data += startTextNode.nextSibling.data ; startTextNode.parentNode.removeChild( startTextNode.nextSibling ) ; } var endTextNode = this.endContainer ; if ( endTextNode.nodeType == 3 && endTextNode.nextSibling ) { endTextNode.data += endTextNode.nextSibling.data ; endTextNode.parentNode.removeChild( endTextNode.nextSibling ) ; } } else { // Collapse the range. // If a node has been partially selected, collapse the range between // topStart and topEnd. Otherwise, simply collapse it to the start. (W3C specs). if ( topStart && topEnd && ( startNode.parentNode != topStart.parentNode || endNode.parentNode != topEnd.parentNode ) ) { var endIndex = FCKDomTools.GetIndexOf( topEnd ) ; // If the start node is to be removed, we must correct the // index to reflect the removal. if ( removeStartNode && topEnd.parentNode == startNode.parentNode ) endIndex-- ; this.setStart( topEnd.parentNode, endIndex ) ; } // Collapse it to the start. this.collapse( true ) ; } // Cleanup any marked node. if( removeStartNode ) startNode.parentNode.removeChild( startNode ) ; if( removeEndNode && endNode.parentNode ) endNode.parentNode.removeChild( endNode ) ; }, cloneRange : function() { return FCKW3CRange.CreateFromRange( this._Document, this ) ; } } ;
PypiClean
/jk_svg-0.2021.1.2.tar.gz/jk_svg-0.2021.1.2/jk_svg/_GroupElementsMixin.py
from .AbstractSVGElement import AbstractSVGElement from .SVGGenericElement import SVGGenericElement from .SVGLine import SVGLine from .SVGEllipse import SVGEllipse from .SVGCircle import SVGCircle from .SVGRect import SVGRect from .SVGPolygon import SVGPolygon from .SVGPolyline import SVGPolyline from .SVGPath import SVGPath from .SVGText import SVGText class _GroupElementsMixin: ################################################################################################################################ ## Constructor ################################################################################################################################ ################################################################################################################################ ## Public Properties ################################################################################################################################ ################################################################################################################################ ## Helper Methods ################################################################################################################################ ################################################################################################################################ ## Public Methods ################################################################################################################################ def createElement(self, tagName:str) -> SVGGenericElement: assert isinstance(tagName, str) assert tagName if tagName == "path": return self.createPath() elif tagName == "line": return self.createLine() elif tagName == "ellipse": return self.createEllipse() elif tagName == "circle": return self.createCircle() elif tagName == "rect": return self.createRect() elif tagName == "polygon": return self.createPolygon() elif tagName == "polyline": return self.createPolyline() elif tagName == "text": return self.createText() ret = SVGGenericElement(tagName) self._children.append(ret) return ret # def createPath(self) -> SVGPath: ret = SVGPath() self._children.append(ret) return ret # def createLine(self) -> SVGLine: ret = SVGLine() self._children.append(ret) return ret # def createEllipse(self) -> SVGEllipse: ret = SVGEllipse() self._children.append(ret) return ret # def createCircle(self) -> SVGCircle: ret = SVGCircle() self._children.append(ret) return ret # def createRect(self) -> SVGRect: ret = SVGRect() self._children.append(ret) return ret # def createPolygon(self) -> SVGPolygon: ret = SVGPolygon() self._children.append(ret) return ret # def createPolyline(self) -> SVGPolyline: ret = SVGPolyline() self._children.append(ret) return ret # def createText(self) -> SVGText: ret = SVGText() self._children.append(ret) return ret # #
PypiClean
/hh_applicant_tool-0.2.1.tar.gz/hh_applicant_tool-0.2.1/hh_applicant_tool/main.py
from __future__ import annotations import argparse import logging import sys from abc import ABCMeta, abstractmethod from importlib import import_module from os import getenv from pathlib import Path from pkgutil import iter_modules from typing import Sequence from .color_log import ColorHandler from .utils import Config DEFAULT_CONFIG_PATH = ( Path(getenv("XDG_CONFIG_PATH", Path.home() / ".config")) / __package__.replace("_", "-") / "config.json" ) logger = logging.getLogger(__package__) class BaseOperation(metaclass=ABCMeta): def setup_parser(self, parser: argparse.ArgumentParser) -> None: ... @abstractmethod def run(self, args: argparse.Namespace) -> None | int: ... OPERATIONS = "operations" class Namespace(argparse.Namespace): config: Config verbosity: int class HHApplicantTool: """Утилита для автоматизации действий соискателя на сайте hh.ru. Описание, исходники и предложения: <https://github.com/s3rgeym/hh-applicant-tool>. """ def create_parser(self) -> argparse.ArgumentParser: parser = argparse.ArgumentParser( description=self.__doc__, ) parser.add_argument( "-c", "--config", help="config path", type=Config, default=Config(DEFAULT_CONFIG_PATH), ) parser.add_argument( "-v", "--verbosity", help="increase verbosity", action="count", default=0, ) subparsers = parser.add_subparsers(help="commands") package_dir = Path(__file__).resolve().parent / OPERATIONS for _, module_name, _ in iter_modules([str(package_dir)]): mod = import_module(f"{__package__}.{OPERATIONS}.{module_name}") op: BaseOperation = mod.Operation() op_parser = subparsers.add_parser( module_name.replace("_", "-"), description=op.__doc__ ) op_parser.set_defaults(run=op.run) op.setup_parser(op_parser) parser.set_defaults(run=None) return parser def run(self, argv: Sequence[str] | None) -> None | int: parser = self.create_parser() args = parser.parse_args(argv, namespace=Namespace()) log_level = max(logging.DEBUG, logging.WARNING - args.verbosity * 10) logger.setLevel(log_level) handler = ColorHandler() # [C] Critical Error Occurred handler.setFormatter( logging.Formatter("[%(levelname).1s] %(message)s") ) logger.addHandler(handler) if args.run: try: return args.run(args) except Exception as e: logger.exception(e) return 1 parser.print_help(file=sys.stderr) return 2 def main(argv: Sequence[str] | None = None) -> None | int: return HHApplicantTool().run(argv)
PypiClean
/p-template-generator-0.1.79.tar.gz/p-template-generator-0.1.79/template_generator/binary.py
import sys import os import subprocess import json import random from pathlib import Path import shutil import zipfile import stat import requests import hashlib import logging def getOssResource(rootDir, url, md5, name): localFile = os.path.join(rootDir, name) localFileIsRemote = False if os.path.exists(localFile): with open(localFile, 'rb') as fp: file_data = fp.read() fp.close() file_md5 = hashlib.md5(file_data).hexdigest() if file_md5 == md5: localFileIsRemote = True if localFileIsRemote == False: #download if os.path.exists(localFile): os.remove(localFile) s = requests.session() s.keep_alive = False print(f"download {url} ") file = s.get(url, verify=False) with open(localFile, "wb") as c: c.write(file.content) c.close() s.close() fname = name[0:name.index(".")] fext = name[name.index("."):] unzipDir = os.path.join(rootDir, fname) if os.path.exists(unzipDir): shutil.rmtree(unzipDir) print(f"unzip {url} -> {unzipDir}") def readDirChecksum(dir): f = os.path.join(dir, "checksum.txt") txt = "" if os.path.exists(f): with open(f, "r", encoding="UTF-8") as f1: txt = f1.read() f1.close() return txt def writeDirChecksum(dir, zipFile): if os.path.exists(zipFile) == False: return with open(zipFile, 'rb') as fp: fdata = fp.read() fp.close() fmd5 = hashlib.md5(fdata).hexdigest() with open(os.path.join(dir, "checksum.txt"), "w") as f: f.write(fmd5) f.close() def checkFileMd5(rootDir): data = { "fonts.zip.py" : "b1f190ba1cea49177eccde2eb2a6cb13", "subEffect.zip.py" : "08651251e4351fd8cd5829b2ef65a8b9" } for key in data: fpath = os.path.join(rootDir, key) if os.path.exists(fpath): with open(fpath, 'rb') as fp: fdata = fp.read() fp.close() fmd5 = hashlib.md5(fdata).hexdigest() fname = key[0:key.index(".")] fext = key[key.index("."):] fdirpath = os.path.join(rootDir, fname) if os.path.exists(fdirpath) and fmd5 != readDirChecksum(fdirpath): logging.info(f"remove old {fdirpath}") shutil.rmtree(fdirpath) def updateBin(rootDir): getOssResource(rootDir, "https://m.mecordai.com/res/ffmpeg.zip", "a9e6b05ac70f6416d5629c07793b4fcf", "ffmpeg.zip.py") getOssResource(rootDir, "https://m.mecordai.com/res/skymedia_20230825.zip", "bd305b4c3c5caa6f754276e1986f2146", "skymedia.zip.py") getOssResource(rootDir, "https://m.mecordai.com/res/randomTemplates_20230625.zip", "e0cf7eaed4a90d59fe82f41b02f3d17e", "randomTemplates.zip.py") checkFileMd5(rootDir) for root,dirs,files in os.walk(rootDir): for file in files: if file.find(".") <= 0: continue name = file[0:file.index(".")] ext = file[file.index("."):] if ext == ".zip.py" and os.path.exists(os.path.join(root, name)) == False: print(f"unzip {os.path.join(root, name)}") with zipfile.ZipFile(os.path.join(root, file), "r") as zipf: zipf.extractall(os.path.join(root, name)) writeDirChecksum(os.path.join(root, name), os.path.join(root, file)) if root != files: break def realBinPath(searchPath): binDir = "" if len(searchPath) <= 0 or os.path.exists(searchPath) == False: binDir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "bin") updateBin(binDir) else: binDir = searchPath return binDir def ffmpegPath(searchPath): return os.path.join(realBinPath(searchPath), "ffmpeg") def skymediaPath(searchPath): return os.path.join(realBinPath(searchPath), "skymedia") def subEffectPath(searchPath): return os.path.join(realBinPath(searchPath), "subEffect") def randomEffectPath(searchPath): return os.path.join(realBinPath(searchPath), "randomTemplates") def fontPath(searchPath): return os.path.join(realBinPath(searchPath), "fonts")
PypiClean
/pbt_service-5.0.0-py3-none-any.whl/service/log/formatter.py
import os import logging import json from collections.abc import Mapping from pathlib import Path from . import logger from . import term def formatter_from_event_type(event_type: int): """Map formatter to event.""" log_styles = { logging.NOTSET: term.StyleDefault, logging.DEBUG: term.StyleDebug, logging.INFO: term.StyleInfo, logging.WARNING: term.StyleWarning, logging.CRITICAL: term.StyleError, logging.ERROR: term.StyleError, logging.FATAL: term.StyleError, } try: return log_styles[event_type] except (AttributeError, KeyError, IndexError): return term.StyleDefault class Formatter(logger.JSONFormatter): """Default log formatter.""" def format(self, record: logger.LogRecord): """Format.""" super().format(record) message = { field_name: logger.formatter_value(record, field_value) for field_name, field_value in self.fields.items() } if isinstance(record.msg, Mapping): message.update(record.msg) else: message[self.message_field_name] = super().formatMessage(record) message.update(logger.log_extra_attrs(record)) message['msg'] = record.msg or record.message if record.name == 'sqlalchemy.engine.Engine': _no_sql = ('\n', '\t') for _spec in _no_sql: message['msg'] = str(message['msg']).replace(_spec, ' ') caller = Path(record.pathname) message['path'] = '../{parent}/{name}/{script}'.format( parent=caller.parent.parent.name, name=caller.parent.name, script=caller.name, ) _debug = 'true' in str(os.environ.get('DEBUG', '')).lower() if record.exc_info: message['exception'] = { 'type': record.exc_info[0].__name__, 'msg': str(record.exc_info[1]), } if _debug: message['exception'].update({ 'stack': logger.Traceback(record.exc_info[2]).as_dict(), }) if len(message) == 1 and self.message_field_name in message: return super().formatMessage(record) _fmt_style = formatter_from_event_type(record.levelno) return term.highlight( json.dumps( message, default=logger.default_converter, indent=4, ensure_ascii=False, ), lexer=term.JsonLexer(), formatter=term.Terminal256Formatter(style=_fmt_style), ) class RPCFormatter(Formatter): """RPC Log formatter.""" pass __all__ = ('Formatter', 'RPCFormatter')
PypiClean
/skytime-0.16.1-py3-none-any.whl/build/lib/build/lib/sktime/_contrib/distance_based/_proximity_forest.py
# linkedin.com/goastler; github.com/goastler # github.com/moradisten __author__ = ["gastler", "Morad A. Azaz"] __all__ = ["ProximityForest", "_CachedTransformer", "ProximityStump", "ProximityTree"] from logging import exception import numpy as np import pandas as pd from joblib import Parallel, delayed from scipy import stats from sklearn.preprocessing import LabelEncoder, normalize from sklearn.utils import check_random_state from sktime.classification.base import BaseClassifier from sktime.datatypes._panel._convert import ( from_nested_to_2d_array, from_nested_to_3d_numpy, ) from sktime.distances import ( # twe_distance, dtw_distance, erp_distance, lcss_distance, msm_distance, wdtw_distance, ) from sktime.transformations.base import _PanelToPanelTransformer from sktime.transformations.panel.summarize import DerivativeSlopeTransformer from sktime.utils.validation.panel import check_X, check_X_y # todo unit tests / sort out current unit tests # todo logging package rather than print to screen # todo get params avoid func pointer - use name # todo set params use func name or func pointer # todo constructor accept str name func / pointer # todo duck-type functions def stdp(X): """Proximity forest util function (deprecated).""" warn( "This function has moved to classification/distance_based/_proximity_forest as " "a private function. This version will be removed in V0.10", FutureWarning, ) sum = 0 sum_sq = 0 num_instances = X.shape[0] num_dimensions = X.shape[1] num_values = 0 for instance_index in range(0, num_instances): for dimension_index in range(0, num_dimensions): instance = X.iloc[instance_index, dimension_index] for value in instance: num_values += 1 sum += value sum_sq += value**2 # todo missing values NaN messes # this up! mean = sum / num_values stdp = np.math.sqrt(sum_sq / num_values - mean**2) return stdp # find index of min value in array, randomly breaking ties def _arg_min(array, rand, getter=None): """Proximity forest util function.""" return rand.choice(arg_mins(array, getter)) def max_instance_length(X): """Proximity forest util function.""" max_length = len(X.iloc[0, 0]) # max = -1 # for dimension in range(0, instances.shape[1]): # length = max_instance_dimension_length(instances, dimension) # if length > max: # max = length return max_length class _CachedTransformer(_PanelToPanelTransformer): """Transformer that transforms data and adds the transformed version to a cache. If the transformation is called again on already seen data the data is fetched from the cache rather than performing the expensive transformation. Parameters ---------- transformer : the transformer to transform uncached data Attributes ---------- cache : location to store transforms seen before for fast look up """ _required_parameters = ["transformer"] def __init__(self, transformer): self.cache = {} self.transformer = transformer super(_CachedTransformer, self).__init__() def clear(self): """Clear the cache.""" self.cache = {} def transform(self, X, y=None): """ Fit transformer, creating a cache for transformation. Parameters ---------- X : pandas DataFrame of shape [n_instances, n_features] Input data y : pandas Series, shape (n_instances), optional Targets for supervised learning. Returns ------- cached_instances. """ # for each instance, get transformed instance from cache or # transform and add to cache cached_instances = {} uncached_indices = [] for index in X.index.values: try: cached_instances[index] = self.cache[index] except Exception: uncached_indices.append(index) if len(uncached_indices) > 0: uncached_instances = X.loc[uncached_indices, :] transformed_uncached_instances = self.transformer.fit_transform( uncached_instances ) transformed_uncached_instances.index = uncached_instances.index transformed_uncached_instances = transformed_uncached_instances.to_dict( "index" ) self.cache.update(transformed_uncached_instances) cached_instances.update(transformed_uncached_instances) cached_instances = pd.DataFrame.from_dict(cached_instances, orient="index") return cached_instances def __str__(self): """Transform string.""" return self.transformer.__str__() def _derivative_distance(distance_measure, transformer): """Take derivative before conducting distance measure. :param distance_measure: the distance measure to use :param transformer: the transformer to use :return: a distance measure function with built in transformation """ def distance(instance_a, instance_b, **params): df = pd.DataFrame([instance_a, instance_b]) df = transformer.transform(X=df) instance_a = df.iloc[0, :] instance_b = df.iloc[1, :] return distance_measure(instance_a, instance_b, **params) return distance def distance_predefined_params(distance_measure, **params): """Conduct distance measurement with a predefined set of parameters. :param distance_measure: the distance measure to use :param params: the parameters to use in the distance measure :return: a distance measure with no parameters """ def distance(instance_a, instance_b): return distance_measure(instance_a, instance_b, **params) return distance def numba_wrapper(distance_measure): """Wrap a distance measure in cython conversion. (to 1 column per dimension format) :param distance_measure: distance measure to wrap :return: a distance measure which automatically formats data for cython distance measures """ def distance(instance_a, instance_b, **params): # find distance instance_a = from_nested_to_2d_array( instance_a, return_numpy=True ) # todo use specific # dimension rather than whole # thing? instance_b = from_nested_to_2d_array( instance_b, return_numpy=True ) # todo use specific # dimension rather than whole thing? instance_a = np.transpose(instance_a) instance_b = np.transpose(instance_b) return distance_measure(instance_a, instance_b, **params) return distance def pure(y): """Test whether a set of class labels are pure (i.e. all the same). Parameters ---------- y : 1d array like array of class labels Returns ------- result : boolean whether the set of class labels is pure """ # get unique class labels unique_class_labels = np.unique(np.array(y)) # if more than 1 unique then not pure return len(unique_class_labels) <= 1 def gini_gain(y, y_subs): """Get gini score of a split, i.e. the gain from parent to children. Parameters ---------- y : 1d array like array of class labels at parent y_subs : list of 1d array like list of array of class labels, one array per child Returns ------- score : float gini score of the split from parent class labels to children. Note a higher score means better gain, i.e. a better split """ y = np.array(y) # find number of instances overall parent_n_instances = y.shape[0] # if parent has no instances then is pure if parent_n_instances == 0: for child in y_subs: if len(child) > 0: raise ValueError("children populated but parent empty") return 0.5 # find gini for parent node score = gini(y) # sum the children's gini scores for index in range(len(y_subs)): child_class_labels = y_subs[index] # ignore empty children if len(child_class_labels) > 0: # find gini score for this child child_score = gini(child_class_labels) # weight score by proportion of instances at child compared to # parent child_size = len(child_class_labels) child_score *= child_size / parent_n_instances # add to cumulative sum score -= child_score return score def gini(y): """Get gini score at a specific node. Parameters ---------- y : 1d numpy array array of class labels Returns ------- score : float gini score for the set of class labels (i.e. how pure they are). A larger score means more impurity. Zero means pure. """ y = np.array(y) # get number instances at node n_instances = y.shape[0] if n_instances > 0: # count each class unique_class_labels, class_counts = np.unique(y, return_counts=True) # subtract class entropy from current score for each class class_counts = np.divide(class_counts, n_instances) class_counts = np.power(class_counts, 2) class_counts_sum = np.sum(class_counts) return 1 - class_counts_sum else: # y is empty, therefore considered pure raise ValueError(" y empty") def get_one_exemplar_per_class_proximity(proximity): """Unpack proximity object into X, y and random_state for picking exemplars. Parameters ---------- proximity : Proximity object Proximity like object containing the X, y and random_state variables required for picking exemplars. Returns ------- result : function function choosing one exemplar per class """ return get_one_exemplar_per_class(proximity.X, proximity.y, proximity.random_state) def get_one_exemplar_per_class(X, y, random_state): """Pick one exemplar instance per class in the dataset. Parameters ---------- X : array-like or sparse matrix of shape = [n_instances, n_columns] The training input samples. If a Pandas data frame is passed, the column _dim_to_use is extracted y : array-like, shape = [n_samples] or [n_samples, n_outputs] The class labels. random_state : numpy RandomState a random state for sampling random numbers Returns ------- chosen_instances : list list of the chosen exemplar instances. chosen_class_labels : array list of corresponding class labels for each of the chosen exemplar instances. """ # find unique class labels unique_class_labels = np.unique(y) n_unique_class_labels = len(unique_class_labels) chosen_instances = [None] * n_unique_class_labels # for each class randomly choose and instance for class_label_index in range(n_unique_class_labels): class_label = unique_class_labels[class_label_index] # filter class labels for desired class and get indices indices = np.argwhere(y == class_label) # flatten numpy output indices = np.ravel(indices) # random choice index = random_state.choice(indices) # record exemplar instance and class label instance = X.iloc[index, :] chosen_instances[class_label_index] = instance # convert lists to numpy arrays return chosen_instances, unique_class_labels def dtw_distance_measure_getter(X): """Generate the dtw distance measure. :param X: dataset to derive parameter ranges from :return: distance measure and parameter range dictionary """ return { "distance_measure": [numba_wrapper(dtw_distance)], "w": stats.uniform(0, 0.25), } def msm_distance_measure_getter(X): """Generate the msm distance measure. :param X: dataset to derive parameter ranges from :return: distance measure and parameter range dictionary """ n_dimensions = 1 # todo use other dimensions return { "distance_measure": [numba_wrapper(dtw_distance)], "dim_to_use": stats.randint(low=0, high=n_dimensions), "c": [ 0.01, 0.01375, 0.0175, 0.02125, 0.025, 0.02875, 0.0325, 0.03625, 0.04, 0.04375, 0.0475, 0.05125, 0.055, 0.05875, 0.0625, 0.06625, 0.07, 0.07375, 0.0775, 0.08125, 0.085, 0.08875, 0.0925, 0.09625, 0.1, 0.136, 0.172, 0.208, 0.244, 0.28, 0.316, 0.352, 0.388, 0.424, 0.46, 0.496, 0.532, 0.568, 0.604, 0.64, 0.676, 0.712, 0.748, 0.784, 0.82, 0.856, 0.892, 0.928, 0.964, 1, 1.36, 1.72, 2.08, 2.44, 2.8, 3.16, 3.52, 3.88, 4.24, 4.6, 4.96, 5.32, 5.68, 6.04, 6.4, 6.76, 7.12, 7.48, 7.84, 8.2, 8.56, 8.92, 9.28, 9.64, 10, 13.6, 17.2, 20.8, 24.4, 28, 31.6, 35.2, 38.8, 42.4, 46, 49.6, 53.2, 56.8, 60.4, 64, 67.6, 71.2, 74.8, 78.4, 82, 85.6, 89.2, 92.8, 96.4, 100, ], } def erp_distance_measure_getter(X): """Generate the erp distance measure. :param X: dataset to derive parameter ranges from :return: distance measure and parameter range dictionary """ stdp = _stdp(X) instance_length = max_instance_length(X) # todo should this use the max instance # length for unequal length dataset instances? max_raw_warping_window = np.floor((instance_length + 1) / 4) n_dimensions = 1 # todo use other dimensions return { "distance_measure": [numba_wrapper(erp_distance)], "dim_to_use": stats.randint(low=0, high=n_dimensions), "g": stats.uniform(0.2 * stdp, 0.8 * stdp - 0.2 * stdp), "band_size": stats.randint(low=0, high=max_raw_warping_window + 1) # scipy stats randint is exclusive on the max value, hence + 1 } def lcss_distance_measure_getter(X): """Generate the lcss distance measure. :param X: dataset to derive parameter ranges from :return: distance measure and parameter range dictionary """ stdp = _stdp(X) instance_length = max_instance_length(X) # todo should this use the max instance # length for unequal length dataset instances? max_raw_warping_window = np.floor((instance_length + 1) / 4) n_dimensions = 1 # todo use other dimensions return { "distance_measure": [numba_wrapper(lcss_distance)], "dim_to_use": stats.randint(low=0, high=n_dimensions), "epsilon": stats.uniform(0.2 * stdp, stdp - 0.2 * stdp), # scipy stats randint is exclusive on the max value, hence + 1 "delta": stats.randint(low=0, high=max_raw_warping_window + 1), } def twe_distance_measure_getter(X): """Generate the twe distance measure. :param X: dataset to derive parameter ranges from :return: distance measure and parameter range dictionary """ return { "distance_measure": [numba_wrapper(twe_distance)], "penalty": [ 0, 0.011111111, 0.022222222, 0.033333333, 0.044444444, 0.055555556, 0.066666667, 0.077777778, 0.088888889, 0.1, ], "stiffness": [0.00001, 0.0001, 0.0005, 0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1], } def wdtw_distance_measure_getter(X): """Generate the wdtw distance measure. :param X: dataset to derive parameter ranges from :return: distance measure and parameter range dictionary """ return { "distance_measure": [numba_wrapper(weighted_dtw_distance)], "g": stats.uniform(0, 1), } def euclidean_distance_measure_getter(X): """Generate the ed distance measure. :param X: dataset to derive parameter ranges from :return: distance measure and parameter range dictionary """ return {"distance_measure": [numba_wrapper(dtw_distance)], "w": [0]} def setup_wddtw_distance_measure_getter(transformer): """Generate the wddtw distance measure by baking the derivative transformer. into the wdtw distance measure :param transformer: the transformer to use :return: a getter to produce the distance measure """ def getter(X): return { "distance_measure": [ _derivative_distance(numba_wrapper(weighted_dtw_distance), transformer) ], "g": stats.uniform(0, 1), } return getter def setup_ddtw_distance_measure_getter(transformer): """Generate the ddtw distance measure by baking the derivative transformer. into the dtw distance measure :param transformer: the transformer to use :return: a getter to produce the distance measure """ def getter(X): return { "distance_measure": [ _derivative_distance(numba_wrapper(dtw_distance), transformer) ], "w": stats.uniform(0, 0.25), } return getter def setup_all_distance_measure_getter(proximity): """Set all distance measure getter functions from a proximity object. :param proximity: a PT / PF / PS :return: a list of distance measure getters """ transformer = _CachedTransformer(DerivativeSlopeTransformer()) distance_measure_getters = [ euclidean_distance_measure_getter, dtw_distance_measure_getter, setup_ddtw_distance_measure_getter(transformer), wdtw_distance_measure_getter, setup_wddtw_distance_measure_getter(transformer), msm_distance_measure_getter, lcss_distance_measure_getter, erp_distance_measure_getter, # twe_distance_measure_getter, ] def pick_rand_distance_measure(proximity): """Generate a distance measure from a range of parameters. :param proximity: proximity object containing distance measures, ranges and dataset :return: a distance measure with no parameters """ random_state = proximity.random_state X = proximity.X distance_measure_getter = random_state.choice(distance_measure_getters) distance_measure_perm = distance_measure_getter(X) param_perm = pick_rand_param_perm_from_dict(distance_measure_perm, random_state) distance_measure = param_perm["distance_measure"] del param_perm["distance_measure"] return distance_predefined_params(distance_measure, **param_perm) return pick_rand_distance_measure def pick_rand_param_perm_from_dict(param_pool, random_state): """Pick a parameter permutation. Given a list of dictionaries contain potential values OR a list of values OR a distribution of values (a distribution must have the .rvs() function to sample values) param_pool : list of dicts OR list OR distribution parameters in the same format as GridSearchCV from scikit-learn. example: param_grid = [ {'C': [1, 10, 100, 1000], 'kernel': ['linear']}, {'C': [1, 10, 100, 1000], 'gamma': [{'C': [1, 10, 100, 1000], 'kernel': ['linear']}], 'kernel': ['rbf']}, ] Returns ------- param_perm : dict distance measure and corresponding parameters in dictionary format """ # construct empty permutation param_perm = {} # for each parameter for param_name, param_values in param_pool.items(): # if it is a list if isinstance(param_values, list): # randomly pick a value param_value = param_values[random_state.randint(len(param_values))] # if the value is another dict then get a random parameter # permutation from that dict (recursive over # 2 funcs) # if isinstance(param_value, dict): # no longer require # recursive param perms # param_value = _pick_param_permutation(param_value, # random_state) # else if parameter is a distribution elif hasattr(param_values, "rvs"): # sample from the distribution param_value = param_values.rvs(random_state=random_state) else: # otherwise we don't know how to obtain a value from the parameter raise Exception("unknown type of parameter pool") # add parameter name and value to permutation param_perm[param_name] = param_value return param_perm def pick_rand_param_perm_from_list(params, random_state): """Get a random parameter permutation. Providing a distance measure and corresponding parameters params : list of dicts parameters in the same format as GridSearchCV from scikit-learn. example: param_grid = [ {'C': [1, 10, 100, 1000], 'kernel': ['linear']}, {'C': [1, 10, 100, 1000], 'gamma': [{'C': [1, 10, 100, 1000], 'kernel': ['linear']}], 'kernel': ['rbf']}, ] Returns ------- permutation : dict distance measure and corresponding parameters in dictionary format """ # param_pool = random_state.choice(params) permutation = pick_rand_param_perm_from_dict(param_pool, random_state) return permutation def best_of_n_stumps(n): """Generate the function to pick the best of n stump evaluations. Parameters ---------- n : int the number of stumps to evaluate before picking the best. Must be 1 or more. Returns ------- find_best_stump : func function to find the best of n stumps. """ if n < 1: raise ValueError("n cannot be less than 1") def find_best_stump(proximity): """Pick the best of n stump evaluations. Parameters ---------- proximity : Proximity like object the proximity object to split data from. Returns ------- stump : ProximityStump the best stump / split of data of the n attempts. """ stumps = [] # for n stumps for _ in range(n): # duplicate tree configuration stump = ProximityStump( random_state=proximity.random_state, get_exemplars=proximity.get_exemplars, distance_measure=proximity.distance_measure, setup_distance_measure=proximity.setup_distance_measure, get_distance_measure=proximity.get_distance_measure, get_gain=proximity.get_gain, verbosity=proximity.verbosity, n_jobs=proximity.n_jobs, ) # grow the stump stump.fit(proximity.X, proximity.y) stump.grow() stumps.append(stump) # pick the best stump based upon gain stump = _max(stumps, proximity.random_state, lambda stump: stump.entropy) return stump return find_best_stump class ProximityStump(BaseClassifier): """Proximity Stump.""" np.random.seed(1234) def __init__( self, random_state=0, setup_distance_measure=setup_all_distance_measure_getter, get_distance_measure=None, distance_measure=dtw_distance, X=None, y=None, label=None, verbosity=0, n_stumps=5, n_jobs=1, ): self.setup_distance_measure = setup_distance_measure self.random_state = random_state self.n_stumps = n_stumps self.get_distance_measure = get_distance_measure self.distance_measure = distance_measure self.verbosity = verbosity self.n_jobs = n_jobs # set in fit self.num_children = None self.label_encoder = None # exemplars self.y_exemplar = None self.X_exemplar = None # temp_exemplars self.temp_exemplar = dict() # best_splits self.X_best_splits = None self.y_best_splits = None # Datasets self.X = X self.y = y # splits self.children = list() self.is_leaf = False self.classes_ = dict() self.label = label self.entropy = None super(ProximityStump, self).__init__() def set_X(self, X): """Set X.""" self.X = X def set_y(self, y): """Set y.""" self.y = y @staticmethod def gini_gain(y, y_subs): """Get gini score of a split, i.e. the gain from parent to children. Parameters ---------- y : 1d array like array of class labels at parent y_subs : list of 1d array like list of array of class labels, one array per child Returns ------- score : float gini score of the split from parent class labels to children. Note a higher score means better gain, i.e. a better split """ y = np.array(y) # find number of instances overall parent_n_instances = y.shape[0] # if parent has no instances then is pure if parent_n_instances == 0: for child in y_subs: if len(child) > 0: raise ValueError("children populated but parent empty") return 0.5 # find gini for parent node score = ProximityStump.gini(y) # sum the children's gini scores for index in range(len(y_subs)): child_class_labels = y_subs[index] # ignore empty children if len(child_class_labels) > 0: # find gini score for this child child_score = ProximityStump.gini(child_class_labels) # weight score by proportion of instances at child compared to # parent child_size = len(child_class_labels) child_score *= child_size / parent_n_instances # add to cumulative sum score -= child_score return score @staticmethod def gini(y): """Get gini score at a specific node. Parameters ---------- y : 1d numpy array array of class labels Returns ------- score : float gini score for the set of class labels (i.e. how pure they are). A larger score means more impurity. Zero means pure. """ y = np.array(y) # get number instances at node try: n_instances = y.shape[0] except exception: n_instances = 0 if n_instances > 0: # count each class unique_class_labels, class_counts = np.unique(y, return_counts=True) # subtract class entropy from current score for each class class_counts = np.divide(class_counts, n_instances) class_counts = np.power(class_counts, 2) sum = np.sum(class_counts) return 1 - sum else: # y is empty, therefore considered pure return 0 # raise ValueError(' y empty') @staticmethod def split_X_per_class(X, y): """Split by class. :param X: Array-like containing instances :param y: Array-like containing class labels :return: Returns a dictionary {Label: [sub_X]} in which sub_X contains all instances that match that label """ split_class_x = dict() y_size = len(y) for index in range(y_size): label = y[index] if not split_class_x.keys().__contains__(label): split_class_x[label] = list() if X.shape == 3: split_class_x[label].append(X[index][0]) else: split_class_x[label].append(X[index]) return split_class_x @staticmethod def calculate_dist_to_exemplars_inst(exemplars, instance, distance_measure): """Calculate distance to exemplars.""" distances = list() indices = list() if len(exemplars) == 0: return None for index in range(len(exemplars)): exemplar = exemplars[index] try: distance = distance_measure(instance, exemplar) except exception: distance = np.inf distances.append(distance) indices.append(index) return distances, indices @staticmethod def find_closest_distances_inst(exemplars, instance, distance_measure): """Find closest distance instance.""" distances = list() indices = list() min_distance = np.math.inf if (exemplars is None) or len(exemplars) == 0: return None, None for index in range(len(exemplars)): exemplar = exemplars[index][0] try: distance = distance_measure(instance, exemplar) except Exception: distance = np.inf if len(indices) == 0: min_distance = distance distances.append(distance) indices.append(index) else: if distance < min_distance: min_distance = distance distances.clear() distances.append(distance) indices.clear() indices.append(index) elif distance == min_distance: distances.append(distance) indices.append(index) return distances, indices @staticmethod def find_closest_distance(exemplars, instance, distance_measure): """Find closest distance.""" distance, indices = ProximityStump.find_closest_distances_inst( exemplars, instance, distance_measure ) if distance is None: return -1, -1 elif len(distance) == 1: return distance[0], indices[0] else: r = np.random.randint(0, len(distance)) return distance[r], indices[r] def find_closest_distance_(self, instance, distance_measure): """Find closest distance.""" return ProximityStump.find_closest_distance( self.X_exemplar, instance, distance_measure ) def find_closest_exemplar_indices(self, X): """Find closes exemplar indices.""" check_X(X) # todo make checks optional and propogate from forest downwards n_instances = X.shape[0] distances = self.distance_to_exemplars(X) indices = np.empty(X.shape[0], dtype=int) for index in range(n_instances): exemplar_distances = distances[index] closest_exemplar_index = arg_mins(exemplar_distances, self.random_state) indices[index] = closest_exemplar_index[0] return indices def split_stump(self, X, y, dataset_per_class): """Split stump.""" splits_x = dict() # {index: x_list} splits_y = dict() # {index: y_list} label_branch = 0 for label in dataset_per_class.keys(): if len(dataset_per_class[label]) == 0: continue else: sub_X = dataset_per_class[ label ] # sub_X is a list of series/arrays who belong to a label r = np.random.randint(0, len(sub_X)) # select a random element in sub_X splits_x[label_branch] = list() splits_y[label_branch] = list() self.temp_exemplar[label_branch] = sub_X[r] # sub_X[r] is a serie self.classes_[label_branch] = label label_branch = label_branch + 1 for j in range(X.shape[0]): instance = self.X[j][0] closest_distance, index = ProximityStump.find_closest_distance( self.temp_exemplar, instance, self.distance_measure ) if closest_distance == -1: return splits_x, splits_y splits_x[index].append(X[j][0]) splits_y[index].append(y[j]) return splits_x, splits_y # <index, list_x>, <index, list_y> def find_best_stumps(self, X, y): """Find best stumps.""" x_per_label = self.split_X_per_class(X, y) best_weighted_gini = np.inf x_size = len(X) for _ in range(self.n_stumps): splits_x, splits_y = self.split_stump(X, y, x_per_label) if len(splits_x) == 0: return self.X_best_splits, self.y_best_splits weighted_gini = self.weighted_gini(x_size, splits_x, splits_y) if weighted_gini < best_weighted_gini: best_weighted_gini = weighted_gini self.X_best_splits = splits_x self.y_best_splits = splits_y self.X_exemplar = self.temp_exemplar self.y_exemplar = self.temp_exemplar.keys() self.num_children = len(self.X_best_splits) return self.X_best_splits, self.y_best_splits @staticmethod def weighted_gini(x_size, splits_x, splits_y): """Find weighted Gini.""" wgini = 0.0 for index in range(len(splits_x)): spt_x = splits_x[index] spt_y = splits_y[index] wgini = wgini + (len(spt_x) / x_size) * ProximityStump.gini(spt_y) return wgini def calculate_distance_to_exemplars(self, X): """Find distance to exemplars. :param X: the dataset containing a list of instances :return: 2d numpy array of distances from each instance to each exemplar (instance by exemplar) """ check_X(X) if self.n_jobs > 1 or self.n_jobs < 0: parallel = Parallel(self.n_jobs) distances = parallel( delayed(self.calculate_dist_to_exemplars_inst)( self.X_exemplar, X[0][index, :], self.distance_measure ) for index in range(X.shape[0]) ) else: distances = [ self.calculate_dist_to_exemplars_inst( self.X_exemplar, X[index, :][0], self.distance_measure ) for index in range(X.shape[0]) ] return distances def distance_to_exemplars(self, X): """Distance to exemplars.""" distances = self.calculate_distance_to_exemplars(X) distances = [x[0][0] for x in distances] distances = np.vstack(np.array(distances)) return distances def distance_to_exemplars_indices(self, X): """Distance to exemplars indices.""" distances_indices = self.calculate_distance_to_exemplars(X) indices = [x[1][0] for x in distances_indices] return indices def fit(self, X=None, y=None): """Fit.""" if X is None: X = self.X if y is None: y = self.y if len(y) == 0: return gini = ProximityStump.gini(y) if gini == 0: self.label = int(y[0]) self.is_leaf = True return if len(X.shape) == 2: X = X.reshape((X.shape[0], 1, X.shape[1])) self.find_best_stumps(X, y) if len(self.X_best_splits) > 0: for i in range(0, len(self.X_best_splits.values())): self.children.append( ProximityStump( X=X, y=y, label=y[i], distance_measure=self.distance_measure ) ) counter = 0 for index in self.X_best_splits.keys(): x_branches = self.X_best_splits[index] y_branches = self.y_best_splits[index] try: splits_x = np.array(x_branches) splits_y = np.array(y_branches) if splits_x.shape == 2: splits_x = splits_x.reshape( (splits_x.shape[0], 1, splits_x.shape[1]) ) self.children[counter].fit(splits_x, splits_y) except RecursionError: return counter = counter + 1 def predict_proba(self, X): """Find probability estimates for each class for all cases in X. Parameters ---------- X : array-like or sparse matrix of shape = [n_instances, n_columns] The training input samples. If a Pandas data frame is passed (sktime format) If a Pandas data frame is passed, a check is performed that it only has one column. If not, an exception is thrown, since this classifier does not yet have multivariate capability. Returns ------- output : array of shape = [n_instances, n_classes] of probabilities """ X = check_X(X, enforce_univariate=True) distances = self.distance_to_exemplars(X) ones = np.ones(distances.shape) distances = np.add(distances, ones) distributions = np.divide(ones, distances) normalize(distributions, copy=False, norm="l1") return distributions class ProximityTree(BaseClassifier): """Proximity Tree class to model a distance based decision tree. Attributes ---------- label_encoder: label encoder to change string labels to numeric indices classes_: unique list of classes random_state: the random state get_exemplars: function to extract exemplars from a dataframe and class value list setup_distance_measure: function to setup the distance measure getters from dataframe and class value list get_distance_measure: distance measure getters distance_measure: distance measures get_gain: function to score the quality of a split verbosity: logging verbosity n_jobs: number of jobs to run in parallel *across threads" find_stump: function to find the best split of data max_depth: max tree depth depth: current depth of tree, as each node is a tree itself, therefore can have a depth of >=0 X: train data y: train data labels stump: the stump used to split data at this node branches: the partitions of data driven by the stump """ def __init__( self, # note: any changes of these params must be reflected in # the fit method for building trees / clones random_state=0, get_exemplars=get_one_exemplar_per_class_proximity, distance_measure=dtw_distance, get_distance_measure=None, setup_distance_measure=setup_all_distance_measure_getter, get_gain=gini_gain, max_depth=np.math.inf, is_leaf=pure, verbosity=0, n_jobs=1, n_stump_evaluations=5, find_stump=None, ): """Build a Proximity Tree object. :param random_state: the random state :param get_exemplars: get the exemplars from a given dataframe and list of class labels :param distance_measure: distance measure to use :param get_distance_measure: method to get the distance measure if no already set :param setup_distance_measure: method to setup the distance measures based upon the dataset given :param get_gain: method to find the gain of a data split :param max_depth: maximum depth of the tree :param is_leaf: function to decide when to mark a node as a leaf node :param verbosity: number reflecting the verbosity of logging :param n_jobs: number of parallel threads to use while building :param find_stump: method to find the best split of data / stump at a node :param n_stump_evaluations: number of stump evaluations to do if find_stump method is None """ super().__init__() self.verbosity = verbosity self.n_stump_evaluations = n_stump_evaluations self.find_stump = find_stump self.max_depth = max_depth self.get_distance_measure = distance_measure self.random_state = check_random_state(random_state) self.get_distance_measure = get_distance_measure self.setup_distance_measure = setup_all_distance_measure_getter self.get_gain = get_gain self.n_jobs = n_jobs self.depth = 0 # below set in fit method self.label_encoder = None self.distance_measure = distance_measure self.root_stump = None self.branches = None self.X = None self.y = None self._is_fitted = False self.classes_ = None def fit(self, X, y, random_state=0): """Fit.""" self.classes_ = np.unique(y) self.root_stump = ProximityStump( X=X, y=y, n_stumps=self.n_stump_evaluations, distance_measure=self.distance_measure, random_state=self.random_state, ) self.root_stump.fit() self._is_fitted = True def predict_class_label(self, query): """Predict the label of a query. :param query: :return: """ stump = self.root_stump while not stump.is_leaf: child_index = stump.find_closest_distance_(query, self.distance_measure)[1] if child_index == -1: stump.is_leaf = True continue stump = stump.children[child_index] return stump.label def predict_proba(self, X): """Predict proba.""" n_instances = X.shape[0] predictions = np.zeros(n_instances, dtype=int) for index in range(n_instances): query = X[index][0] class_label = self.predict_class_label(query) predictions[index] = class_label return predictions class ProximityForest(BaseClassifier): """Proximity Forest class to model a decision tree forest. Which uses distance measures to partition data. @article{lucas19proximity, title={Proximity Forest: an effective and scalable distance-based classifier for time series}, author={B. Lucas and A. Shifaz and C. Pelletier and L. O’Neill and N. Zaidi and B. Goethals and F. Petitjean and G. Webb}, journal={Data Mining and Knowledge Discovery}, volume={33}, number={3}, pages={607--635}, year={2019} } Attributes ---------- label_encoder: label encoder to change string labels to numeric indices classes_: unique list of classes random_state: the random state get_exemplars: function to extract exemplars from a dataframe and class value list setup_distance_measure_getter: function to setup the distance measure getters from dataframe and class value list get_distance_measure: distance measure getters distance_measure: distance measures get_gain: function to score the quality of a split verbosity: logging verbosity n_jobs: number of jobs to run in parallel *across threads" find_stump: function to find the best split of data max_depth: max tree depth X: train data y: train data labels trees: list of trees in the forest """ def __init__( self, random_state=0, n_estimators=100, distance_measure=dtw_distance, get_distance_measure=None, get_exemplars=get_one_exemplar_per_class_proximity, get_gain=gini_gain, verbosity=0, max_depth=np.math.inf, is_leaf=pure, n_jobs=1, n_stump_evaluations=5, find_stump=None, setup_distance_measure_getter=setup_all_distance_measure_getter, ): """Build a Proximity Forest object. :param random_state: the random state :param get_exemplars: get the exemplars from a given dataframe and list of class labels :param distance_measure: distance measure to use :param get_distance_measure: method to get the distance measure if no already set :param setup_distance_measure_getter: method to setup the distance measures based upon the dataset given :param get_gain: method to find the gain of a data split :param max_depth: maximum depth of the tree :param is_leaf: function to decide when to mark a node as a leaf node :param verbosity: number reflecting the verbosity of logging :param n_jobs: number of parallel threads to use while building :param find_stump: method to find the best split of data / stump at a node :param n_stump_evaluations: number of stump evaluations to do if find_stump method is None :param n_estimators: number of trees to construct """ self.verbosity = verbosity self.max_depth = max_depth self.get_exemplars = get_exemplars self.get_gain = get_gain self.random_state = random_state self.n_estimators = n_estimators self.n_jobs = n_jobs self.n_stump_evaluations = n_stump_evaluations self.get_distance_measure = get_distance_measure self.setup_distance_measure_getter = setup_distance_measure_getter self.distance_measure = distance_measure self.find_stump = find_stump # set in fit method self.label_encoder = None self.X = None self.y = None self.classes_ = None self.trees = list() self.num_classes_predicted = dict() for _ in range(self.n_estimators): self.trees.append( ProximityTree( n_stump_evaluations=self.n_stump_evaluations, distance_measure=self.distance_measure, ) ) super(ProximityForest, self).__init__() def _fit_tree(self, X, y, index, random_state=0): self.trees[index].fit(X, y, random_state) return self.trees[index] @staticmethod def _predict_proba_tree(X, tree): return tree.predict_proba(X) def fit_tree(self, X, y, index, random_state): """Build the classifier on the training set (X, y). X : array-like or sparse matrix of shape = [n_instances,n_columns] The training input samples. If a Pandas data frame is passed, column 0 is extracted. y : array-like, shape = [n_instances] The class labels. index : index of the tree to be constructed random_state: random_state to send to the tree to be constructed Returns ------- self : object """ if self.verbosity > 0: print("tree " + str(index) + " building") # noqa tree = ProximityTree( random_state=random_state, verbosity=self.verbosity, distance_measure=self.distance_measure, get_distance_measure=self.get_distance_measure, max_depth=self.max_depth, n_jobs=1, find_stump=self.find_stump, n_stump_evaluations=self.n_stump_evaluations, ) tree.fit(X, y) return tree def fit(self, X, y): """Fit.""" X, y = check_X_y(X, y, enforce_univariate=True) X = from_nested_to_3d_numpy(X) self.random_state = check_random_state(self.random_state) # setup label encoding if self.label_encoder is None: self.label_encoder = LabelEncoder() y = self.label_encoder.fit_transform(y) self.y = y self.classes_ = self.label_encoder.classes_ if self.distance_measure is None: if self.get_distance_measure is None: self.get_distance_measure = self.setup_distance_measure_getter(self) self.distance_measure = self.get_distance_measure(self) if self.n_jobs > 1 or self.n_jobs < 0: parallel = Parallel(self.n_jobs) self.trees = parallel( delayed(self._fit_tree)( X, y, index, self.random_state.randint(0, self.n_estimators) ) for index in range(self.n_estimators) ) else: self.trees = [ self._fit_tree( X, y, index, self.random_state.randint(0, self.n_estimators) ) for index in range(self.n_estimators) ] self._is_fitted = True return self def predict_proba(self, X): """Find probability estimates for each class for all cases in X. Parameters ---------- X : array-like or sparse matrix of shape = [n_instances, n_columns] The training input samples. If a Pandas data frame is passed (sktime format) If a Pandas data frame is passed, a check is performed that it only has one column. If not, an exception is thrown, since this classifier does not yet have multivariate capability. Returns ------- output : array of shape = [n_instances, n_classes] of probabilities """ X = from_nested_to_3d_numpy(X) X = check_X(X, enforce_univariate=True) if self.n_jobs > 1 or self.n_jobs < 0: parallel = Parallel(self.n_jobs) predictions_per_tree = parallel( delayed(self._predict_proba_tree)(X, tree) for tree in self.trees ) else: predictions_per_tree = [ self._predict_proba_tree(X, tree) for tree in self.trees ] distributions = self.calculate_distributions(predictions_per_tree, X.shape[0]) distributions = np.array(distributions) normalize(distributions, copy=False, norm="l1") return distributions def calculate_prediction_counts(self, predictions): """Pick an array of labels predicted by the trees. Reorganize it into a dictionary {label: times predicted} :param predictions: Array-like which contains the label predicted by each tree :return: """ arr = np.array(predictions) count_arr = np.bincount(arr) prediction_counts = dict() for label in np.sort(np.unique(self.y)): try: prediction_counts[label] = count_arr[label] except IndexError: prediction_counts[label] = 0 return prediction_counts def calculate_distributions(self, predictions_per_tree, size): """Find probability estimates for each class for all cases in X. :param predictions_per_tree: Array-like of shape [n_instances,[n_tree_estimators,labels]] which contains an array of labels predicted by each tree for each instance. :param size: Size of X dataset containing the instances to predict :return: """ distributions = np.zeros((size, len(np.unique(self.classes_)))) for index in range(size): predicted_classes = list() for predict_tree in predictions_per_tree: predicted_classes.append(predict_tree[index]) prediction_counts = self.calculate_prediction_counts(predicted_classes) prediction_counts = list(prediction_counts.items()) prediction_counts_to_array = np.array(prediction_counts) sub_distribution = prediction_counts_to_array[ np.argsort(prediction_counts_to_array[:, 0]) ][:, 1] np.add.at(distributions, index, sub_distribution) return distributions
PypiClean
/alipay-sdk-python-pycryptodome-3.3.202.tar.gz/alipay-sdk-python-pycryptodome-3.3.202/alipay/aop/api/request/ZhimaCreditEpSceneRatingInitializeRequest.py
import json from alipay.aop.api.FileItem import FileItem from alipay.aop.api.constant.ParamConstants import * from alipay.aop.api.domain.ZhimaCreditEpSceneRatingInitializeModel import ZhimaCreditEpSceneRatingInitializeModel class ZhimaCreditEpSceneRatingInitializeRequest(object): def __init__(self, biz_model=None): self._biz_model = biz_model self._biz_content = None self._version = "1.0" self._terminal_type = None self._terminal_info = None self._prod_code = None self._notify_url = None self._return_url = None self._udf_params = None self._need_encrypt = False @property def biz_model(self): return self._biz_model @biz_model.setter def biz_model(self, value): self._biz_model = value @property def biz_content(self): return self._biz_content @biz_content.setter def biz_content(self, value): if isinstance(value, ZhimaCreditEpSceneRatingInitializeModel): self._biz_content = value else: self._biz_content = ZhimaCreditEpSceneRatingInitializeModel.from_alipay_dict(value) @property def version(self): return self._version @version.setter def version(self, value): self._version = value @property def terminal_type(self): return self._terminal_type @terminal_type.setter def terminal_type(self, value): self._terminal_type = value @property def terminal_info(self): return self._terminal_info @terminal_info.setter def terminal_info(self, value): self._terminal_info = value @property def prod_code(self): return self._prod_code @prod_code.setter def prod_code(self, value): self._prod_code = value @property def notify_url(self): return self._notify_url @notify_url.setter def notify_url(self, value): self._notify_url = value @property def return_url(self): return self._return_url @return_url.setter def return_url(self, value): self._return_url = value @property def udf_params(self): return self._udf_params @udf_params.setter def udf_params(self, value): if not isinstance(value, dict): return self._udf_params = value @property def need_encrypt(self): return self._need_encrypt @need_encrypt.setter def need_encrypt(self, value): self._need_encrypt = value def add_other_text_param(self, key, value): if not self.udf_params: self.udf_params = dict() self.udf_params[key] = value def get_params(self): params = dict() params[P_METHOD] = 'zhima.credit.ep.scene.rating.initialize' params[P_VERSION] = self.version if self.biz_model: params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':')) if self.biz_content: if hasattr(self.biz_content, 'to_alipay_dict'): params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':')) else: params['biz_content'] = self.biz_content if self.terminal_type: params['terminal_type'] = self.terminal_type if self.terminal_info: params['terminal_info'] = self.terminal_info if self.prod_code: params['prod_code'] = self.prod_code if self.notify_url: params['notify_url'] = self.notify_url if self.return_url: params['return_url'] = self.return_url if self.udf_params: params.update(self.udf_params) return params def get_multipart_params(self): multipart_params = dict() return multipart_params
PypiClean
/pydda-1.4.0.tar.gz/pydda-1.4.0/doc/source/user_guide/dealiasing_velocities.rst
.. _dealiasing-velocities: Radar Data Quality Control - Dealiasing ======================================= In this notebook, we will showcase how to preform quality control of your radar files, specifically dealiasing velocities. By doing this we can provide PyDDA with quality controlled doppler velocities for dual doppler analysis. ------------- Read the Data ------------- For this example, we use test data found in Py-ART for two X-Band Scanning ARM Precipitation Radars (X-SAPR) found at the Atmospheric Radiation Measurment (ARM) Southern Great Plains (SGP) site in Oklahoma. For more information on reading the radar data, consult :ref:`reading-radar-data`. Get test data:: https://arm-doe.github.io/pyart/API/generated/pyart.testing.get_test_data.html Reading CF-Radial:: https://arm-doe.github.io/pyart/API/generated/pyart.io.read_cfradial.html .. code-block:: python # read in the data from both XSAPR radars xsapr_sw_file = get_test_data("swx_20120520_0641.nc") xsapr_se_file = get_test_data("sex_20120520_0641.nc") radar_sw = pyart.io.read_cfradial(xsapr_sw_file) radar_se = pyart.io.read_cfradial(xsapr_se_file) ++++++++++++++++++++++++++++ Plot Velocity of Both Radars ++++++++++++++++++++++++++++ .. code-block:: python fig = plt.figure(figsize=(16, 6)) ax = plt.subplot(121, projection=ccrs.PlateCarree()) # Plot the southwestern radar disp1 = pyart.graph.RadarMapDisplay(radar_sw) disp1.plot_ppi_map( "mean_doppler_velocity", sweep=1, ax=ax, vmin=-32, vmax=32, min_lat=36, max_lat=37, min_lon=-98, max_lon=-97, lat_lines=np.arange(36, 37.25, 0.25), lon_lines=np.arange(-98, -96.75, 0.25), cmap='twilight_shifted' ) # Plot the southeastern radar ax2 = plt.subplot(122, projection=ccrs.PlateCarree()) disp2 = pyart.graph.RadarMapDisplay(radar_se) disp2.plot_ppi_map( "mean_doppler_velocity", sweep=1, ax=ax2, vmin=-32, vmax=32, min_lat=36, max_lat=37, min_lon=-98, max_lon=-97, lat_lines=np.arange(36, 37.25, 0.25), lon_lines=np.arange(-98, -96.75, 0.25), cmap='twilight_shifted' ) .. plot:: import warnings import cartopy.crs as ccrs import matplotlib.pyplot as plt import numpy as np import pyart from pyart.testing import get_test_data warnings.filterwarnings("ignore") # read in the data from both XSAPR radars xsapr_sw_file = get_test_data("swx_20120520_0641.nc") xsapr_se_file = get_test_data("sex_20120520_0641.nc") radar_sw = pyart.io.read_cfradial(xsapr_sw_file) radar_se = pyart.io.read_cfradial(xsapr_se_file) fig = plt.figure(figsize=(16, 6)) ax = plt.subplot(121, projection=ccrs.PlateCarree()) # Plot the southwestern radar disp1 = pyart.graph.RadarMapDisplay(radar_sw) disp1.plot_ppi_map( "mean_doppler_velocity", sweep=1, ax=ax, vmin=-32, vmax=32, min_lat=36, max_lat=37, min_lon=-98, max_lon=-97, lat_lines=np.arange(36, 37.25, 0.25), lon_lines=np.arange(-98, -96.75, 0.25), cmap='twilight_shifted' ) # Plot the southeastern radar ax2 = plt.subplot(122, projection=ccrs.PlateCarree()) disp2 = pyart.graph.RadarMapDisplay(radar_se) disp2.plot_ppi_map( "mean_doppler_velocity", sweep=1, ax=ax2, vmin=-32, vmax=32, min_lat=36, max_lat=37, min_lon=-98, max_lon=-97, lat_lines=np.arange(36, 37.25, 0.25), lon_lines=np.arange(-98, -96.75, 0.25), cmap='twilight_shifted' ) ----------------------------------------------- Determining Artifacts within Doppler Velocities ----------------------------------------------- Before dealiasing the radar velocities, we need to remove noise and clutter from the radar objects. Utilizing Py-ART, we will accomplish this by calculating the velocity texture, or the standard deviation of velocity surrounding a gate. Py-ART's calculate_velocity_texture function:: https://arm-doe.github.io/pyart/API/generated/pyart.retrieve.calculate_velocity_texture.html .. code-block:: python # Calculate the Velocity Texture and apply the PyART GateFilter Utilityx vel_tex_sw = pyart.retrieve.calculate_velocity_texture(radar_sw, vel_field='mean_doppler_velocity', nyq=19 ) vel_tex_se = pyart.retrieve.calculate_velocity_texture(radar_se, vel_field='mean_doppler_velocity', nyq=19 ) ## Add velocity texture to the radar objects radar_sw.add_field('velocity_texture', vel_tex_sw, replace_existing=True) radar_se.add_field('velocity_texture', vel_tex_se, replace_existing=True) +++++++++++++++++++++++++ Velocity Texture Displays +++++++++++++++++++++++++ Let's see what this velocity texture looks like. Additionally, a histogram of velocity texture values will allow for the determination of a threshold to distingiush the hydrometeor signal from artifacts. .. code-block:: python # Display the calculated velocity texture fig = plt.figure(figsize=[8, 6]) display = pyart.graph.RadarDisplay(radar_sw) display.plot_ppi('velocity_texture', sweep=0, vmin=0, vmax=10, cmap=plt.get_cmap('twilight_shifted') ) # Plot a histogram of the velocity textures fig = plt.figure(figsize=[8, 8]) hist, bins = np.histogram(radar_sw.fields['velocity_texture']['data'], bins=150) bins = (bins[1:]+bins[:-1])/2.0 plt.plot(bins, hist, label='Velocity Texture Frequency' ) plt.axvline(3, color='r', label='Proposed Velocity Texture Threshold' ) plt.xlabel('Velocity texture') plt.ylabel('Count') plt.legend() .. plot:: import warnings import cartopy.crs as ccrs import matplotlib.pyplot as plt import numpy as np import pyart from pyart.testing import get_test_data warnings.filterwarnings("ignore") # read in the data from both XSAPR radars xsapr_sw_file = get_test_data("swx_20120520_0641.nc") xsapr_se_file = get_test_data("sex_20120520_0641.nc") radar_sw = pyart.io.read_cfradial(xsapr_sw_file) radar_se = pyart.io.read_cfradial(xsapr_se_file) # Calculate the Velocity Texture and apply the PyART GateFilter Utilityx vel_tex_sw = pyart.retrieve.calculate_velocity_texture(radar_sw, vel_field='mean_doppler_velocity', nyq=19 ) vel_tex_se = pyart.retrieve.calculate_velocity_texture(radar_se, vel_field='mean_doppler_velocity', nyq=19 ) ## Add velocity texture to the radar objects radar_sw.add_field('velocity_texture', vel_tex_sw, replace_existing=True) radar_se.add_field('velocity_texture', vel_tex_se, replace_existing=True) fig = plt.figure(figsize=(8, 6)) display = pyart.graph.RadarDisplay(radar_sw) display.plot_ppi('velocity_texture', sweep=0, vmin=0, vmax=10, cmap=plt.get_cmap('twilight_shifted') ) # Plot a histogram of the velocity textures fig = plt.figure(figsize=[8, 8]) hist, bins = np.histogram(radar_sw.fields['velocity_texture']['data'], bins=150) bins = (bins[1:]+bins[:-1])/2.0 plt.plot(bins, hist, label='Velocity Texture Frequency' ) plt.axvline(3, color='r', label='Proposed Velocity Texture Threshold' ) plt.xlabel('Velocity texture') plt.ylabel('Count') plt.legend() +++++++++++++++++++++++++++++++++ Filter Doppler Velocity Artifacts +++++++++++++++++++++++++++++++++ Now that we have determined which velocity texture values correspond to artifacts within the doppler velocity data, we utilize Py-ART's GateFilter to filter out these artifacts Py-ART's GateFilter function:: https://arm-doe.github.io/pyart/API/generated/pyart.filters.GateFilter.html .. code-block:: python # Apply a GateFilter gatefilter_sw = pyart.filters.GateFilter(radar_sw) gatefilter_sw.exclude_above('velocity_texture', 3) gatefilter_se = pyart.filters.GateFilter(radar_se) gatefilter_se.exclude_above('velocity_texture', 3) ---------------- Apply Dealiasing ---------------- Now that we have removed artifacts, we can proceed with dealiasing the doppler velocity data with Py-ART's Region-Based Dealiasing Algorithm. The Region-Based Dealiasing finds regions of similar velocities and unfolds and merges these pairs of regions until all data are unfolded. Py-ART's Region Based Dealiasing Correction:: https://arm-doe.github.io/pyart/API/generated/pyart.correct.dealias_region_based.html .. code-block:: python # Apply Region Based DeAlising Utiltiy vel_dealias_sw = pyart.correct.dealias_region_based(radar_sw, vel_field='mean_doppler_velocity', nyquist_vel=19, centered=True, gatefilter=gatefilter_sw ) # Apply Region Based DeAlising Utiltiy vel_dealias_se = pyart.correct.dealias_region_based(radar_se, vel_field='mean_doppler_velocity', nyquist_vel=19, centered=True, gatefilter=gatefilter_se ) # Add our data dictionary to the radar object radar_se.add_field('corrected_velocity', vel_dealias_se, replace_existing=True) radar_sw.add_field('corrected_velocity', vel_dealias_sw, replace_existing=True) +++++++++++++++++++++++++++++++++ Display Corrected Velocity Fields +++++++++++++++++++++++++++++++++ Let's check on our corrected velocity fields! .. code-block:: python fig = plt.figure(figsize=(16, 6)) # Plot the southwestern radar ax = plt.subplot(121, projection=ccrs.PlateCarree()) disp1 = pyart.graph.RadarMapDisplay(radar_sw) disp1.plot_ppi_map("corrected_velocity", sweep=1, ax=ax, vmin=-35, vmax=35, min_lat=36, max_lat=37, min_lon=-98, max_lon=-97, lat_lines=np.arange(36, 37.25, 0.25), lon_lines=np.arange(-98, -96.75, 0.25), cmap=plt.get_cmap('twilight_shifted') ) # Plot the southeastern radar ax2 = plt.subplot(122, projection=ccrs.PlateCarree()) disp2 = pyart.graph.RadarMapDisplay(radar_se) disp2.plot_ppi_map("corrected_velocity", sweep=1, ax=ax2, vmin=-35, vmax=35, min_lat=36, max_lat=37, min_lon=-98, max_lon=-97, lat_lines=np.arange(36, 37.25, 0.25), lon_lines=np.arange(-98, -96.75, 0.25), cmap=plt.get_cmap('twilight_shifted') ) .. plot:: import warnings import cartopy.crs as ccrs import matplotlib.pyplot as plt import numpy as np import pyart from pyart.testing import get_test_data warnings.filterwarnings("ignore") # read in the data from both XSAPR radars xsapr_sw_file = get_test_data("swx_20120520_0641.nc") xsapr_se_file = get_test_data("sex_20120520_0641.nc") radar_sw = pyart.io.read_cfradial(xsapr_sw_file) radar_se = pyart.io.read_cfradial(xsapr_se_file) # Calculate the Velocity Texture and apply the PyART GateFilter Utilityx vel_tex_sw = pyart.retrieve.calculate_velocity_texture(radar_sw, vel_field='mean_doppler_velocity', nyq=19 ) vel_tex_se = pyart.retrieve.calculate_velocity_texture(radar_se, vel_field='mean_doppler_velocity', nyq=19 ) ## Add velocity texture to the radar objects radar_sw.add_field('velocity_texture', vel_tex_sw, replace_existing=True) radar_se.add_field('velocity_texture', vel_tex_se, replace_existing=True) # Apply a GateFilter gatefilter_sw = pyart.filters.GateFilter(radar_sw) gatefilter_sw.exclude_above('velocity_texture', 3) gatefilter_se = pyart.filters.GateFilter(radar_se) gatefilter_se.exclude_above('velocity_texture', 3) # Apply Region Based DeAlising Utiltiy vel_dealias_sw = pyart.correct.dealias_region_based(radar_sw, vel_field='mean_doppler_velocity', nyquist_vel=19, centered=True, gatefilter=gatefilter_sw ) # Apply Region Based DeAlising Utiltiy vel_dealias_se = pyart.correct.dealias_region_based(radar_se, vel_field='mean_doppler_velocity', nyquist_vel=19, centered=True, gatefilter=gatefilter_se ) # Add our data dictionary to the radar object radar_se.add_field('corrected_velocity', vel_dealias_se, replace_existing=True) radar_sw.add_field('corrected_velocity', vel_dealias_sw, replace_existing=True) fig = plt.figure(figsize=(16, 6)) # Plot the southwestern radar ax = plt.subplot(121, projection=ccrs.PlateCarree()) disp1 = pyart.graph.RadarMapDisplay(radar_sw) disp1.plot_ppi_map("corrected_velocity", sweep=1, ax=ax, vmin=-35, vmax=35, min_lat=36, max_lat=37, min_lon=-98, max_lon=-97, lat_lines=np.arange(36, 37.25, 0.25), lon_lines=np.arange(-98, -96.75, 0.25), cmap=plt.get_cmap('twilight_shifted') ) # Plot the southeastern radar ax2 = plt.subplot(122, projection=ccrs.PlateCarree()) disp2 = pyart.graph.RadarMapDisplay(radar_se) disp2.plot_ppi_map("corrected_velocity", sweep=1, ax=ax2, vmin=-35, vmax=35, min_lat=36, max_lat=37, min_lon=-98, max_lon=-97, lat_lines=np.arange(36, 37.25, 0.25), lon_lines=np.arange(-98, -96.75, 0.25), cmap=plt.get_cmap('twilight_shifted') ) ------- Summary ------- Utilizing Py-ART, we read in two radar files within close proximity to each other. We then corrected the radar doppler velocities to remove artifacts and clutter. Finally, utilizing Py-ART, we applied a region-based dealiasing alogrithm to unfold the doppler velocities Now that we have corrected velocities, incorporating these radars into PyDDA will be shown in the next notebook.
PypiClean
/FiPy-3.4.4.tar.gz/FiPy-3.4.4/fipy/variables/faceGradVariable.py
from __future__ import division from __future__ import unicode_literals __docformat__ = 'restructuredtext' __all__ = [] from fipy.variables.faceVariable import FaceVariable from fipy.tools import numerix from fipy.tools import inline class _FaceGradVariable(FaceVariable): """ Test case for a vector cell variable >>> from fipy import * >>> m = Grid2D(nx=3, ny=3) >>> x, y = m.cellCenters >>> v = CellVariable(mesh=m, elementshape=(3,)) >>> v[0] = x >>> v[1] = y >>> v[2] = x**2 >>> v0 = CellVariable(mesh=m, value=x) >>> v1 = CellVariable(mesh=m, value=y) >>> v2 = CellVariable(mesh=m, value=x**2) >>> numerix.allequal(v.faceGrad.globalValue.shape, (2, 3, 24)) True >>> print(v0.faceGrad.allclose([[ 0.5, 1., 0.5, 0.5, 1., 0.5, 0.5, 1., 0.5, 0.5, 1., 0.5, 0., 1., 1., ... 0., 0., 1., 1., 0., 0., 1., 1., 0. ], ... [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., ... 0., 0., 0., 0., 0., 0., 0., 0., 0. ]])) True >>> print((v0.faceGrad.globalValue == v.faceGrad.globalValue[:, 0]).all()) True >>> print((v1.faceGrad.globalValue == v.faceGrad.globalValue[:, 1]).all()) True >>> print((v2.faceGrad.globalValue == v.faceGrad.globalValue[:, 2]).all()) True """ def __init__(self, var): FaceVariable.__init__(self, mesh=var.mesh, elementshape=(var.mesh.dim,) + var.shape[:-1]) self.var = self._requires(var) def _calcValue(self): if inline.doInline and self.var.rank == 0: return self._calcValueInline() else: return self._calcValueNoInline() def _calcValueInline(self): id1, id2 = self.mesh._adjacentCellIDs tangents1 = self.mesh._faceTangents1 tangents2 = self.mesh._faceTangents2 val = self._array.copy() faceNormals = self.mesh._orientedFaceNormals if numerix.MA.isMaskedArray(faceNormals): faceNormals = faceNormals.filled() inline._runIterateElementInline(""" int j; double t1grad1, t1grad2, t2grad1, t2grad2, N, N2; int ID1 = ITEM(id1, i, NULL); int ID2 = ITEM(id2, i, NULL); if ITEM(exteriorFaces, i, NULL) { N2 = ITEM(facevar, i, NULL); } else { N2 = ITEM(var, ID2, NULL); } N = (N2 - ITEM(var, ID1, NULL)) / ITEM(dAP, i, NULL); t1grad1 = t1grad2 = t2grad1 = t2grad2 = 0.; t1grad1 += ITEM(tangents1, i, vec) * ITEM(cellGrad, ID1, vec); t1grad2 += ITEM(tangents1, i, vec) * ITEM(cellGrad, ID2, vec); t2grad1 += ITEM(tangents2, i, vec) * ITEM(cellGrad, ID1, vec); t2grad2 += ITEM(tangents2, i, vec) * ITEM(cellGrad, ID2, vec); ITEM(val, i, vec) = ITEM(normals, i, vec) * N; ITEM(val, i, vec) += ITEM(tangents1, i, vec) * (t1grad1 + t1grad2) / 2.; ITEM(val, i, vec) += ITEM(tangents2, i, vec) * (t2grad1 + t2grad2) / 2.; """, tangents1 = tangents1, tangents2 = tangents2, cellGrad = self.var.grad.numericValue, normals = faceNormals, id1 = id1, id2 = id2, dAP = numerix.array(self.mesh._cellDistances), var = self.var.numericValue, facevar = self.var.faceValue.numericValue, exteriorFaces = self.mesh.exteriorFaces.numericValue, val = val, ni = tangents1.shape[1], shape=numerix.array(numerix.shape(tangents1))) return self._makeValue(value = val) def _calcValueNoInline(self): dAP = self.mesh._cellDistances id1, id2 = self.mesh._adjacentCellIDs N2 = numerix.take(self.var.value, id2, axis=-1) faceMask = numerix.array(self.mesh.exteriorFaces) ## The following conditional is required because empty ## indexing is not altogether functional. This ## numpy.empty((0,))[[]] and this numpy.empty((0,))[...,[]] ## both work, but this numpy.empty((3, 0))[...,[]] is ## broken. if self.var.faceValue.shape[-1] != 0: s = (Ellipsis, faceMask) else: s = (faceMask,) N2[s] = self.var.faceValue[s] N = (N2 - numerix.take(self.var, id1, axis=-1)) / dAP normals = self.mesh._orientedFaceNormals tangents1 = self.mesh._faceTangents1 tangents2 = self.mesh._faceTangents2 cellGrad = self.var.grad.numericValue grad1 = numerix.take(cellGrad, id1, axis=-1) grad2 = numerix.take(cellGrad, id2, axis=-1) s = (slice(0, None, None),) + (numerix.newaxis,) * (len(grad1.shape) - 2) + (slice(0, None, None),) t1grad1 = numerix.sum(tangents1[s] * grad1, 0) t1grad2 = numerix.sum(tangents1[s] * grad2, 0) t2grad1 = numerix.sum(tangents2[s] * grad1, 0) t2grad2 = numerix.sum(tangents2[s] * grad2, 0) T1 = (t1grad1 + t1grad2) / 2. T2 = (t2grad1 + t2grad2) / 2. return normals[s] * N[numerix.newaxis] + tangents1[s] * T1[numerix.newaxis] + tangents2[s] * T2[numerix.newaxis] def _test(): import fipy.tests.doctestPlus return fipy.tests.doctestPlus.testmod() if __name__ == "__main__": _test()
PypiClean
/js.ace-1.4.11.tar.gz/js.ace-1.4.11/js/ace/resources/mode-puppet.js
ace.define("ace/mode/puppet_highlight_rules",["require","exports","module","ace/lib/oop","ace/mode/text_highlight_rules"],function(e,t,n){"use strict";var r=e("../lib/oop"),i=e("./text_highlight_rules").TextHighlightRules,s=function(){this.$rules={start:[{token:["keyword.type.puppet","constant.class.puppet","keyword.inherits.puppet","constant.class.puppet"],regex:'^\\s*(class)(\\s+(?:[-_A-Za-z0-9".]+::)*[-_A-Za-z0-9".]+\\s*)(?:(inherits\\s*)(\\s+(?:[-_A-Za-z0-9".]+::)*[-_A-Za-z0-9".]+\\s*))?'},{token:["storage.function.puppet","name.function.puppet","punctuation.lpar"],regex:"(^\\s*define)(\\s+[a-zA-Z0-9_:]+\\s*)(\\()",push:[{token:"punctuation.rpar.puppet",regex:"\\)",next:"pop"},{include:"constants"},{include:"variable"},{include:"strings"},{include:"operators"},{defaultToken:"string"}]},{token:["language.support.class","keyword.operator"],regex:"\\b([a-zA-Z_]+)(\\s+=>)"},{token:["exported.resource.puppet","keyword.name.resource.puppet","paren.lparen"],regex:"(\\@\\@)?(\\s*[a-zA-Z_]*)(\\s*\\{)"},{token:"qualified.variable.puppet",regex:"(\\$([a-z][a-z0-9_]*)?(::[a-z][a-z0-9_]*)*::[a-z0-9_][a-zA-Z0-9_]*)"},{token:"singleline.comment.puppet",regex:"#(.)*$"},{token:"multiline.comment.begin.puppet",regex:"^\\s*\\/\\*\\s*$",push:"blockComment"},{token:"keyword.control.puppet",regex:"\\b(case|if|unless|else|elsif|in|default:|and|or)\\s+(?!::)"},{token:"keyword.control.puppet",regex:"\\b(import|default|inherits|include|require|contain|node|application|consumes|environment|site|function|produces)\\b"},{token:"support.function.puppet",regex:"\\b(lest|str2bool|escape|gsub|Timestamp|Timespan|with|alert|crit|debug|notice|sprintf|split|step|strftime|slice|shellquote|type|sha1|defined|scanf|reverse_each|regsubst|return|emerg|reduce|err|failed|fail|versioncmp|file|generate|then|info|realize|search|tag|tagged|template|epp|warning|hiera_include|each|assert_type|binary_file|create_resources|dig|digest|filter|lookup|find_file|fqdn_rand|hiera_array|hiera_hash|inline_epp|inline_template|map|match|md5|new|next)\\b"},{token:"constant.types.puppet",regex:"\\b(String|File|Package|Service|Class|Integer|Array|Catalogentry|Variant|Boolean|Undef|Number|Hash|Float|Numeric|NotUndef|Callable|Optional|Any|Regexp|Sensitive|Sensitive.new|Type|Resource|Default|Enum|Scalar|Collection|Data|Pattern|Tuple|Struct)\\b"},{token:"paren.lparen",regex:"[[({]"},{token:"paren.rparen",regex:"[\\])}]"},{include:"variable"},{include:"constants"},{include:"strings"},{include:"operators"},{token:"regexp.begin.string.puppet",regex:"\\s*(\\/(\\S)+)\\/"}],blockComment:[{regex:"^\\s*\\/\\*\\s*$",token:"multiline.comment.begin.puppet",push:"blockComment"},{regex:"^\\s*\\*\\/\\s*$",token:"multiline.comment.end.puppet",next:"pop"},{defaultToken:"comment"}],constants:[{token:"constant.language.puppet",regex:"\\b(false|true|running|stopped|installed|purged|latest|file|directory|held|undef|present|absent|link|mounted|unmounted)\\b"}],variable:[{token:"variable.puppet",regex:"(\\$[a-z0-9_{][a-zA-Z0-9_]*)"}],strings:[{token:"punctuation.quote.puppet",regex:"'",push:[{token:"punctuation.quote.puppet",regex:"'",next:"pop"},{include:"escaped_chars"},{defaultToken:"string"}]},{token:"punctuation.quote.puppet",regex:'"',push:[{token:"punctuation.quote.puppet",regex:'"',next:"pop"},{include:"escaped_chars"},{include:"variable"},{defaultToken:"string"}]}],escaped_chars:[{token:"constant.escaped_char.puppet",regex:"\\\\."}],operators:[{token:"keyword.operator",regex:"\\+\\.|\\-\\.|\\*\\.|\\/\\.|#|;;|\\+|\\-|\\*|\\*\\*\\/|\\/\\/|%|<<|>>|&|\\||\\^|~|<|>|<=|=>|==|!=|<>|<-|=|::|,"}]},this.normalizeRules()};r.inherits(s,i),t.PuppetHighlightRules=s}),ace.define("ace/mode/folding/cstyle",["require","exports","module","ace/lib/oop","ace/range","ace/mode/folding/fold_mode"],function(e,t,n){"use strict";var r=e("../../lib/oop"),i=e("../../range").Range,s=e("./fold_mode").FoldMode,o=t.FoldMode=function(e){e&&(this.foldingStartMarker=new RegExp(this.foldingStartMarker.source.replace(/\|[^|]*?$/,"|"+e.start)),this.foldingStopMarker=new RegExp(this.foldingStopMarker.source.replace(/\|[^|]*?$/,"|"+e.end)))};r.inherits(o,s),function(){this.foldingStartMarker=/([\{\[\(])[^\}\]\)]*$|^\s*(\/\*)/,this.foldingStopMarker=/^[^\[\{\(]*([\}\]\)])|^[\s\*]*(\*\/)/,this.singleLineBlockCommentRe=/^\s*(\/\*).*\*\/\s*$/,this.tripleStarBlockCommentRe=/^\s*(\/\*\*\*).*\*\/\s*$/,this.startRegionRe=/^\s*(\/\*|\/\/)#?region\b/,this._getFoldWidgetBase=this.getFoldWidget,this.getFoldWidget=function(e,t,n){var r=e.getLine(n);if(this.singleLineBlockCommentRe.test(r)&&!this.startRegionRe.test(r)&&!this.tripleStarBlockCommentRe.test(r))return"";var i=this._getFoldWidgetBase(e,t,n);return!i&&this.startRegionRe.test(r)?"start":i},this.getFoldWidgetRange=function(e,t,n,r){var i=e.getLine(n);if(this.startRegionRe.test(i))return this.getCommentRegionBlock(e,i,n);var s=i.match(this.foldingStartMarker);if(s){var o=s.index;if(s[1])return this.openingBracketBlock(e,s[1],n,o);var u=e.getCommentFoldRange(n,o+s[0].length,1);return u&&!u.isMultiLine()&&(r?u=this.getSectionRange(e,n):t!="all"&&(u=null)),u}if(t==="markbegin")return;var s=i.match(this.foldingStopMarker);if(s){var o=s.index+s[0].length;return s[1]?this.closingBracketBlock(e,s[1],n,o):e.getCommentFoldRange(n,o,-1)}},this.getSectionRange=function(e,t){var n=e.getLine(t),r=n.search(/\S/),s=t,o=n.length;t+=1;var u=t,a=e.getLength();while(++t<a){n=e.getLine(t);var f=n.search(/\S/);if(f===-1)continue;if(r>f)break;var l=this.getFoldWidgetRange(e,"all",t);if(l){if(l.start.row<=s)break;if(l.isMultiLine())t=l.end.row;else if(r==f)break}u=t}return new i(s,o,u,e.getLine(u).length)},this.getCommentRegionBlock=function(e,t,n){var r=t.search(/\s*$/),s=e.getLength(),o=n,u=/^\s*(?:\/\*|\/\/|--)#?(end)?region\b/,a=1;while(++n<s){t=e.getLine(n);var f=u.exec(t);if(!f)continue;f[1]?a--:a++;if(!a)break}var l=n;if(l>o)return new i(o,r,l,t.length)}}.call(o.prototype)}),ace.define("ace/mode/matching_brace_outdent",["require","exports","module","ace/range"],function(e,t,n){"use strict";var r=e("../range").Range,i=function(){};(function(){this.checkOutdent=function(e,t){return/^\s+$/.test(e)?/^\s*\}/.test(t):!1},this.autoOutdent=function(e,t){var n=e.getLine(t),i=n.match(/^(\s*\})/);if(!i)return 0;var s=i[1].length,o=e.findMatchingBracket({row:t,column:s});if(!o||o.row==t)return 0;var u=this.$getIndent(e.getLine(o.row));e.replace(new r(t,0,t,s-1),u)},this.$getIndent=function(e){return e.match(/^\s*/)[0]}}).call(i.prototype),t.MatchingBraceOutdent=i}),ace.define("ace/mode/puppet",["require","exports","module","ace/lib/oop","ace/mode/text","ace/mode/puppet_highlight_rules","ace/mode/behaviour/cstyle","ace/mode/folding/cstyle","ace/mode/matching_brace_outdent"],function(e,t,n){"use strict";var r=e("../lib/oop"),i=e("./text").Mode,s=e("./puppet_highlight_rules").PuppetHighlightRules,o=e("./behaviour/cstyle").CstyleBehaviour,u=e("./folding/cstyle").FoldMode,a=e("./matching_brace_outdent").MatchingBraceOutdent,f=function(){i.call(this),this.HighlightRules=s,this.$outdent=new a,this.$behaviour=new o,this.foldingRules=new u};r.inherits(f,i),function(){this.$id="ace/mode/puppet"}.call(f.prototype),t.Mode=f}); (function() { ace.require(["ace/mode/puppet"], function(m) { if (typeof module == "object" && typeof exports == "object" && module) { module.exports = m; } }); })();
PypiClean
/google-dataproc-templates-0.4.0b0.tar.gz/google-dataproc-templates-0.4.0b0/dataproc_templates/gcs/text_to_bigquery.py
from typing import Dict, Sequence, Optional, Any from logging import Logger import argparse import pprint from pyspark.sql import SparkSession from dataproc_templates import BaseTemplate from dataproc_templates.util.argument_parsing import add_spark_options from dataproc_templates.util.dataframe_reader_wrappers import ingest_dataframe_from_cloud_storage import dataproc_templates.util.template_constants as constants __all__ = ['TextToBigQueryTemplate'] class TextToBigQueryTemplate(BaseTemplate): """ Dataproc template implementing Text loads from GCS into BigQuery """ @staticmethod def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]: parser: argparse.ArgumentParser = argparse.ArgumentParser() parser.add_argument( f'--{constants.TEXT_BQ_INPUT_LOCATION}', dest=constants.TEXT_BQ_INPUT_LOCATION, required=True, help='Cloud Storage location of the input text files' ) add_spark_options(parser, constants.get_csv_input_spark_options("text.bigquery.input.")) parser.add_argument( f'--{constants.TEXT_BQ_OUTPUT_DATASET}', dest=constants.TEXT_BQ_OUTPUT_DATASET, required=True, help='BigQuery dataset for the output table' ) parser.add_argument( f'--{constants.TEXT_BQ_OUTPUT_TABLE}', dest=constants.TEXT_BQ_OUTPUT_TABLE, required=True, help='BigQuery output table name' ) parser.add_argument( f'--{constants.TEXT_BQ_LD_TEMP_BUCKET_NAME}', dest=constants.TEXT_BQ_LD_TEMP_BUCKET_NAME, required=True, help='Spark BigQuery connector temporary bucket' ) parser.add_argument( f'--{constants.TEXT_BQ_OUTPUT_MODE}', dest=constants.TEXT_BQ_OUTPUT_MODE, required=False, default=constants.OUTPUT_MODE_APPEND, help=( 'Output write mode ' '(one of: append,overwrite,ignore,errorifexists) ' '(Defaults to append)' ), choices=[ constants.OUTPUT_MODE_OVERWRITE, constants.OUTPUT_MODE_APPEND, constants.OUTPUT_MODE_IGNORE, constants.OUTPUT_MODE_ERRORIFEXISTS ] ) parser.add_argument( f'--{constants.TEXT_INPUT_COMPRESSION}', dest=constants.TEXT_INPUT_COMPRESSION, required=True, help='Input file compression format (one of: bzip2,deflate,lz4,gzip,None)', default=None, choices=[ constants.COMPRESSION_BZIP2, constants.COMPRESSION_GZIP, constants.COMPRESSION_DEFLATE, constants.COMPRESSION_LZ4, constants.COMPRESSION_NONE ] ) parser.add_argument( f'--{constants.TEXT_INPUT_DELIMITER}', dest=constants.TEXT_INPUT_DELIMITER, required=False, help=( 'Input column delimiter ' '(example: ",", ";", "|", "/","\" ) ' ) ) known_args: argparse.Namespace known_args, _ = parser.parse_known_args(args) return vars(known_args) def run(self, spark: SparkSession, args: Dict[str, Any]) -> None: logger: Logger = self.get_logger(spark=spark) # Arguments input_location: str = args[constants.TEXT_BQ_INPUT_LOCATION] big_query_dataset: str = args[constants.TEXT_BQ_OUTPUT_DATASET] big_query_table: str = args[constants.TEXT_BQ_OUTPUT_TABLE] bq_temp_bucket: str = args[constants.TEXT_BQ_LD_TEMP_BUCKET_NAME] output_mode: str = args[constants.TEXT_BQ_OUTPUT_MODE] # These options are redundant but left in place until this template is removed # via issue https://github.com/GoogleCloudPlatform/dataproc-templates/issues/721 # input_delimiter: str = args[constants.TEXT_INPUT_DELIMITER] # input_file_codec_format: str = args[constants.TEXT_INPUT_COMPRESSION] logger.info( "Starting GCS to Bigquery Spark job with parameters:\n" f"{pprint.pformat(args)}" ) # Read input_data = ingest_dataframe_from_cloud_storage( spark, args, input_location, constants.FORMAT_CSV, "text.bigquery.input.", ) # Write input_data.write \ .format(constants.FORMAT_BIGQUERY) \ .option(constants.TABLE, big_query_dataset + "." + big_query_table) \ .option(constants.TEXT_BQ_TEMP_BUCKET, bq_temp_bucket) \ .mode(output_mode) \ .save()
PypiClean
/shanhe-sdk-1.0.0.tar.gz/shanhe-sdk-1.0.0/shanhe/iaas/actions/volume.py
from shanhe.iaas import constants as const from shanhe.misc.utils import filter_out_none class VolumeAction(object): def __init__(self, conn): self.conn = conn def describe_volumes(self, volumes=None, volume_type=None, instance_id=None, status=None, owner=None, search_word=None, verbose=0, offset=None, limit=None, tags=None, **ignore): """ Describe volumes filtered by conditions @param volumes : the array of IDs of volumes. @param volume_type : the type of volume, 0 is high performance, 1 is high capacity @param instance_id: ID of the instance that volume is currently attached to, if has. @param status: pending, available, in-use, deleted. @param search_word: the combined search column. @param verbose: the number to specify the verbose level, larger the number, the more detailed information will be returned. @param offset: the starting offset of the returning results. @param limit: specify the number of the returning results. @param tags : the array of IDs of tags. """ valid_keys = ['volumes', 'instance_id', 'status', 'search_word', 'volume_type', 'verbose', 'offset', 'limit', 'tags', 'owner'] body = filter_out_none(locals(), valid_keys) if not self.conn.req_checker.check_params(body, required_params=[], integer_params=[ 'offset', 'limit', 'verbose'], list_params=[ 'volumes', 'status', 'tags'] ): return None return self.conn.send_request(const.ACTION_DESCRIBE_VOLUMES, body) def create_volumes(self, size, volume_name="", volume_type=0, count=1, target_user=None, **ignore): """ Create one or more volumes. @param size : the size of each volume. Unit is GB. @param volume_name : the short name of volume @param volume_type : the type of volume, 0 is high performance, 1 is high capacity @param count : the number of volumes to create. @param target_user: ID of user who will own this resource, should be one of your sub-accounts """ action = const.ACTION_CREATE_VOLUMES valid_keys = ['size', 'volume_name', 'volume_type', 'count', 'target_user'] body = filter_out_none(locals(), valid_keys) if not self.conn.req_checker.check_params(body, required_params=['size'], integer_params=['size', 'count'], list_params=[] ): return None return self.conn.send_request(action, body) def delete_volumes(self, volumes, **ignore): """ Delete one or more volumes. @param volumes : An array including IDs of the volumes you want to delete. """ action = const.ACTION_DELETE_VOLUMES body = {'volumes': volumes} if not self.conn.req_checker.check_params(body, required_params=['volumes'], integer_params=[], list_params=['volumes'] ): return None return self.conn.send_request(action, body) def attach_volumes(self, volumes, instance, **ignore): """ Attach one or more volumes to same instance @param volumes : an array including IDs of the volumes you want to attach. @param instance : the ID of instance the volumes will be attached to. """ action = const.ACTION_ATTACH_VOLUMES valid_keys = ['volumes', 'instance'] body = filter_out_none(locals(), valid_keys) if not self.conn.req_checker.check_params(body, required_params=[ 'volumes', 'instance'], integer_params=[], list_params=['volumes'] ): return None return self.conn.send_request(action, body) def detach_volumes(self, volumes, instance, **ignore): """ Detach one or more volumes from same instance. @param volumes : An array including IDs of the volumes you want to attach. @param instance : the ID of instance the volumes will be detached from. """ action = const.ACTION_DETACH_VOLUMES valid_keys = ['volumes', 'instance'] body = filter_out_none(locals(), valid_keys) if not self.conn.req_checker.check_params(body, required_params=[ 'volumes', 'instance'], integer_params=[], list_params=['volumes'] ): return None return self.conn.send_request(action, body) def resize_volumes(self, volumes, size, **ignore): """ Extend one or more volumes' size. @param volumes: The IDs of the volumes you want to resize. @param size : The new larger size of the volumes, unit is GB """ action = const.ACTION_RESIZE_VOLUMES valid_keys = ['volumes', 'size'] body = filter_out_none(locals(), valid_keys) if not self.conn.req_checker.check_params(body, required_params=[ 'volumes', 'size'], integer_params=['size'], list_params=['volumes'] ): return None return self.conn.send_request(action, body) def modify_volume_attributes(self, volume, volume_name=None, description=None, **ignore): """ Modify volume attributes. @param volume: the ID of volume whose attributes you want to modify. @param volume_name: Name of the volume. It's a short name for the volume that more meaningful than volume id. @param description: The detailed description of the resource. """ action = const.ACTION_MODIFY_VOLUME_ATTRIBUTES valid_keys = ['volume', 'volume_name', 'description'] body = filter_out_none(locals(), valid_keys) if not self.conn.req_checker.check_params(body, required_params=['volume'], integer_params=[], list_params=[] ): return None return self.conn.send_request(action, body) def clone_volumes(self, zone, volume, volume_name="", volume_type=None, count=1, **ignore): """ Clone an existed volume to one or more new volumes. @param zone: the ID of zone for new volume. @param volume: the ID of volume you want to clone. @param volume_name: name of the volume. It's a short name for the volume that more meaningful than volume id. @param volume_type: type of the volume. @param count: how many volumes will be created. """ action = const.ACTION_CLONE_VOLUMES valid_keys = ['zone', 'volume', 'volume_name', 'volume_type', 'count'] body = filter_out_none(locals(), valid_keys) if not self.conn.req_checker.check_params(body, required_params=['zone', 'volume'], integer_params=['count'], list_params=[] ): return None return self.conn.send_request(action, body)
PypiClean
/heppyfwk-3.0.1.tar.gz/heppyfwk-3.0.1/heppy/particles/isolation.py
from heppy.utils.deltar import deltaR2 class Area(object): '''Base Area interface.''' def is_inside(self, *args): '''returns True if *args describes a particle inside the EtaPhiCircle. *args may be the particle itself, assuming it has eta() and phi() methods, or eta, phi. ''' pass class EtaPhiCircle(Area): '''Circle in (eta, phi) space. When running on a lepton collider, eta is replaced by theta. ''' def __init__(self, R): '''Create a circle of radius R''' self.R = R self._R2 = R**2 def is_inside(self, *args): dR2 = deltaR2(*args) return dR2 < self._R2 class IsolationInfo(object): '''Holds the results of an isolation calculation.''' def __init__(self, label, lepton): '''Create an IsolationInfo. Attributes: lepton = the lepton particles = list of particles around the lepton used in the calculation. the following quantities are computed for these particles sumpt = total pT for the particles sume = total energy for the particles num = total number of particles ''' self.particles = [] self.label = label self.lepton = lepton self.sumpt = 0 self.sume = 0 self.num = 0 def add_particle(self, ptc): '''Add a new particle and update counters.''' self.particles.append(ptc) self.sumpt += ptc.pt() self.sume += ptc.e() self.num += 1 def rm_particle(self, ptc): '''Remove a particle and update counters (e.g. for FSR recovery)''' self.particles.remove(ptc) self.sumpt -= ptc.pt() self.sume -= ptc.e() self.num -= 1 def __iadd__(self, other): self.particles.extend(other.particles) self.sumpt += other.sumpt self.sume += other.sume self.num += other.num return self def __str__(self): return 'iso {label:>3}: sumpt = {sumpt:5.2f}, sume = {sume:5.2f}, num = {num}'.format( label = self.label, sumpt = self.sumpt, sume = self.sume, num = self.num ) class IsolationComputer(object): '''Computes isolation for a given lepton.''' def __init__(self, on_areas, off_areas=None, pt_thresh=0, e_thresh=0, label=''): '''Creates the isolation computer. Particles around the lepton are considered in the isolation if: - they pass both thresholds: pt_thresh : pt threshold e_thresh : energy threshold - they are in an active area around the lepton areas should on_areas and off_areas are lists of areas in which particles around the should be considered or ignored, respectively. for a given particle ''' self.on_areas = on_areas if off_areas is None: off_areas = [] self.off_areas = off_areas self.pt_thresh = pt_thresh self.e_thresh = e_thresh self.label = label def compute(self, lepton, particles): '''Compute the isolation for lepton, using particles. returns an IsolationInfo. ''' isolation = IsolationInfo(self.label, lepton) for ptc in particles: if ptc is lepton: continue if ptc.e()<self.e_thresh or \ ptc.pt()<self.pt_thresh: continue is_on = False for area in self.on_areas: if area.is_inside(lepton, ptc): is_on = True break if not is_on: continue for area in self.off_areas: if area.is_inside(lepton, ptc): is_on = False break if is_on: isolation.add_particle(ptc) return isolation
PypiClean
/practice_problems_builder_ktraff-0.0.14-py3-none-any.whl/ppb/workers.py
# Copyright (c) [year] [fullname] # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import logging import re import os import subprocess from pkg_resources import resource_filename from typing import List import yaml from jinja2 import Environment, PackageLoader from ppb.exceptions import NotImplementedException, DependenciesNotMetException from ppb.logging import setup_logger, set_log_level _log = setup_logger(__name__) _j2 = Environment( loader=PackageLoader('ppb', 'templates'), ) def _check_if_executables_exist(*executables): for executable in executables: _log.info(f'Checking whether {executable} is installed.') output = subprocess.run(['which', executable], stdout=subprocess.PIPE).stdout.decode('utf-8').strip() _log.debug(output) if executable not in output: raise DependenciesNotMetException(f'No suitable "{executable}" found. Please install and try again.') def _check_python_dependencies(python_version): _check_if_executables_exist(python_version, 'virtualenv', 'make') def _check_rust_dependencies(): _check_if_executables_exist('rustup', 'rustc', 'cargo') version_info = subprocess.run(['rustc', '--version'], stdout=subprocess.PIPE).stdout.decode('utf-8').strip() _log.debug(version_info) if not version_info: raise DependenciesNotMetException('rustc appears to be installed incorrectly.') def _check_golang_dependencies(): _check_if_executables_exist('go') def _setup_python(problem_path: str): _log.info('Creating virtual environment for python') _log.debug(subprocess.run( ['make', 'venv_create'], stdout=subprocess.PIPE, cwd=os.path.join(problem_path, 'python') ).stdout.decode('utf-8')) _log.info('Building practice problems') _log.debug(subprocess.run( ['make', 'build'], stdout=subprocess.PIPE, cwd=os.path.join(problem_path, 'python') ).stdout.decode('utf-8')) def _setup_rust(): pass def _setup_golang(): pass class Worker: def run(self): raise NotImplementedException() def _get_config(self, path: str): with open(os.path.join(path, 'config.yml'), 'r') as infile: return yaml.safe_load(infile.read()) class TemplateGenerator: """Helps workers to generate jinja templates. This class is intended to be used as a mixin for worker classes. """ def _include_templates(self, path: str): """By default, every template is included. Override this in your class to modify it's behavior.""" return True def _generate_templates( self, template_args: dict, template_path: str, destination_path: str, ) -> None: """Walks a directory containing templates and renders them to a destination path. Args: - template_args: a list of variables to be passed into jinja templates during rendering. - template_path: the absolute path to the root folder where templates exist. This path is relative to the root path of the `_j2` template loader. - destination_path: where to save the templates """ full_template_path = resource_filename('ppb', f'templates/{template_path}') for subdir, dirs, files in os.walk(full_template_path): relative_path = re.sub(re.compile(f'{full_template_path}/?'), '', subdir) # Substitute any variable names in paths interpolated_path = relative_path.format(**template_args) if self._include_templates(relative_path): for a_dir in dirs: if self._include_templates(os.path.join(relative_path, a_dir)): dir_path = os.path.join(destination_path, interpolated_path, a_dir.format(**template_args)) _log.debug(f'Creating directory {dir_path}') os.makedirs(dir_path) for a_file in files: template = _j2.get_template(os.path.join(template_path, relative_path, a_file)) # Remove the .j2 extension when saving the template to the destination dest_file = re.sub(r'.j2$', '', a_file) dest_path = os.path.join(destination_path, interpolated_path, dest_file) with open(dest_path, 'w') as outfile: _log.debug(f'Writing template {dest_path}') outfile.write(template.render(**template_args)) class LogWorker(Worker): def __init__(self, debug=False): super().__init__() self.debug = debug def run(self): if self.debug: set_log_level(logging.DEBUG) else: set_log_level(logging.INFO) _log.debug(f'Logging initialized for level {logging.getLevelName(_log.getEffectiveLevel())}') class LanguageSetupMixin: """Used to setup dependencies for workers that use multiple languages.""" def _check_dependencies(self): if 'python' in self.languages: _check_python_dependencies(self.python_version) if 'rust' in self.languages: _check_rust_dependencies() if 'golang' in self.languages: _check_golang_dependencies() def _setup_languages(self): """Configures each programming languages with necessary tools and environment""" if 'python' in self.languages: _setup_python(self.problem_path) if 'rust' in self.languages: _setup_rust() if 'golang' in self.languages: _setup_golang() class NewProblemWorker(Worker, TemplateGenerator, LanguageSetupMixin): """Creates a workspace for a new practice problem""" def __init__( self, problem_name: str, languages: List[str], target: str, title: str, description: str, difficulty: int, tags: List[str], resources: List[str], python_version: str, author_name: str, author_email: str, rust_compiler_edition: str, github_username: str ): super().__init__() self.problem_name = problem_name self.languages = languages self.target = target self.title = title self.description = description self.difficulty = difficulty self.tags = tags self.resources = resources self.python_version = python_version self.author_name = author_name self.author_email = author_email self.rust_compiler_edition = rust_compiler_edition self.github_username = github_username @property def problem_path(self): return f'{os.path.join(self.target, self.problem_name)}' def _include_templates(self, relative_path: str): """Returns True if the path to the provided template should be included when generating a new practice problem. Templates for languages that have not been included will be skipped. Arguments: relative_path (str): A path to a template directory, relative to the root of the template folder. This path shouldn't have any variable interpolation done on it, i.e. variable names in the path like "{problem_name}" should be passed without any substitutions made on them. """ match = re.match(r'\{problem_name\}\/(python|golang|rust)', relative_path) if match: # We are parsing a template folder for a specific language if match.group(1) in self.languages: return True return False return True def _create_workspace(self): """The main function for creating a new practice problem in the target destination""" template_args = { 'problem_name': self.problem_name, 'title': self.title, 'description': self.description, 'difficulty': self.difficulty, 'tags': self.tags, 'resources': self.resources, 'python_version': self.python_version, 'author_name': self.author_name, 'author_email': self.author_email, 'github_username': self.github_username, 'rust_compiler_edition': self.rust_compiler_edition, 'target': self.target, } self._generate_templates(template_args, 'create', self.target) def run(self): _log.info( f'Creating a new problem "{self.problem_name}" with {", ".join(self.languages)} ' f'into {self.problem_path}' ) self._check_dependencies() self._create_workspace() self._setup_languages() class AddProblemWorker(Worker, TemplateGenerator, LanguageSetupMixin): """Generates language template(s) for an existing practice problem.""" def __init__( self, languages: List[str], target: str, python_version: str, rust_compiler_edition: str, github_username: str ): super().__init__() self.languages = languages self.target = target self.python_version = python_version self.rust_compiler_edition = rust_compiler_edition self.github_username = github_username def _create_workspace(self): template_args = self._get_config(self.target) template_args['python_version'] = self.python_version template_args['rust_compiler_edition'] = self.rust_compiler_edition template_args['github_username'] = self.github_username template_args['target'] = self.target for language in self.languages: dest_path = os.path.join(self.target, language) os.makedirs(dest_path) self._generate_templates(template_args, f'create/{{problem_name}}/{language}', dest_path) def run(self): _log.info(f'Adding {", ".join(self.languages)} to {self.target}') self._check_dependencies() self._create_workspace() self._setup_languages()
PypiClean
/Mopidy-Spotmop-2.10.1.tar.gz/Mopidy-Spotmop-2.10.1/mopidy_spotmop/static/vendor/jquery.event.drop.js
;(function($){ // secure $ jQuery alias // Events: drop, dropstart, dropend // add the jquery instance method $.fn.drop = function( str, arg, opts ){ // figure out the event type var type = typeof str == "string" ? str : "", // figure out the event handler... fn = $.isFunction( str ) ? str : $.isFunction( arg ) ? arg : null; // fix the event type if ( type.indexOf("drop") !== 0 ) type = "drop"+ type; // were options passed opts = ( str == fn ? arg : opts ) || {}; // trigger or bind event handler return fn ? this.bind( type, opts, fn ) : this.trigger( type ); }; // DROP MANAGEMENT UTILITY // returns filtered drop target elements, caches their positions $.drop = function( opts ){ opts = opts || {}; // safely set new options... drop.multi = opts.multi === true ? Infinity : opts.multi === false ? 1 : !isNaN( opts.multi ) ? opts.multi : drop.multi; drop.delay = opts.delay || drop.delay; drop.tolerance = $.isFunction( opts.tolerance ) ? opts.tolerance : opts.tolerance === null ? null : drop.tolerance; drop.mode = opts.mode || drop.mode || 'intersect'; }; // local refs (increase compression) var $event = $.event, $special = $event.special, // configure the drop special event drop = $.event.special.drop = { // these are the default settings multi: 1, // allow multiple drop winners per dragged element delay: 20, // async timeout delay mode: 'overlap', // drop tolerance mode // internal cache targets: [], // the key name for stored drop data datakey: "dropdata", // prevent bubbling for better performance noBubble: true, // count bound related events add: function( obj ){ // read the interaction data var data = $.data( this, drop.datakey ); // count another realted event data.related += 1; }, // forget unbound related events remove: function(){ $.data( this, drop.datakey ).related -= 1; }, // configure the interactions setup: function(){ // check for related events if ( $.data( this, drop.datakey ) ) return; // initialize the drop element data var data = { related: 0, active: [], anyactive: 0, winner: 0, location: {} }; // store the drop data on the element $.data( this, drop.datakey, data ); // store the drop target in internal cache drop.targets.push( this ); }, // destroy the configure interaction teardown: function(){ var data = $.data( this, drop.datakey ) || {}; // check for related events if ( data.related ) return; // remove the stored data $.removeData( this, drop.datakey ); // reference the targeted element var element = this; // remove from the internal cache drop.targets = $.grep( drop.targets, function( target ){ return ( target !== element ); }); }, // shared event handler handler: function( event, dd ){ // local vars var results, $targets; // make sure the right data is available if ( !dd ) return; // handle various events switch ( event.type ){ // draginit, from $.event.special.drag case 'mousedown': // DROPINIT >> case 'touchstart': // DROPINIT >> // collect and assign the drop targets $targets = $( drop.targets ); if ( typeof dd.drop == "string" ) $targets = $targets.filter( dd.drop ); // reset drop data winner properties $targets.each(function(){ var data = $.data( this, drop.datakey ); data.active = []; data.anyactive = 0; data.winner = 0; }); // set available target elements dd.droppable = $targets; // activate drop targets for the initial element being dragged $special.drag.hijack( event, "dropinit", dd ); break; // drag, from $.event.special.drag case 'mousemove': // TOLERATE >> case 'touchmove': // TOLERATE >> drop.event = event; // store the mousemove event if ( !drop.timer ) // monitor drop targets drop.tolerate( dd ); break; // dragend, from $.event.special.drag case 'mouseup': // DROP >> DROPEND >> case 'touchend': // DROP >> DROPEND >> drop.timer = clearTimeout( drop.timer ); // delete timer if ( dd.propagates ){ $special.drag.hijack( event, "drop", dd ); $special.drag.hijack( event, "dropend", dd ); } break; } }, // returns the location positions of an element locate: function( elem, index ){ var data = $.data( elem, drop.datakey ), $elem = $( elem ), posi = $elem.offset() || {}, height = $elem.outerHeight(), width = $elem.outerWidth(), location = { elem: elem, width: width, height: height, top: posi.top, left: posi.left, right: posi.left + width, bottom: posi.top + height }; // drag elements might not have dropdata if ( data ){ data.location = location; data.index = index; data.elem = elem; } return location; }, // test the location positions of an element against another OR an X,Y coord contains: function( target, test ){ // target { location } contains test [x,y] or { location } return ( ( test[0] || test.left ) >= target.left && ( test[0] || test.right ) <= target.right && ( test[1] || test.top ) >= target.top && ( test[1] || test.bottom ) <= target.bottom ); }, // stored tolerance modes modes: { // fn scope: "$.event.special.drop" object // target with mouse wins, else target with most overlap wins 'intersect': function( event, proxy, target ){ return this.contains( target, [ event.pageX, event.pageY ] ) ? // check cursor 1e9 : this.modes.overlap.apply( this, arguments ); // check overlap }, // target with most overlap wins 'overlap': function( event, proxy, target ){ // calculate the area of overlap... return Math.max( 0, Math.min( target.bottom, proxy.bottom ) - Math.max( target.top, proxy.top ) ) * Math.max( 0, Math.min( target.right, proxy.right ) - Math.max( target.left, proxy.left ) ); }, // proxy is completely contained within target bounds 'fit': function( event, proxy, target ){ return this.contains( target, proxy ) ? 1 : 0; }, // center of the proxy is contained within target bounds 'middle': function( event, proxy, target ){ return this.contains( target, [ proxy.left + proxy.width * .5, proxy.top + proxy.height * .5 ] ) ? 1 : 0; } }, // sort drop target cache by by winner (dsc), then index (asc) sort: function( a, b ){ return ( b.winner - a.winner ) || ( a.index - b.index ); }, // async, recursive tolerance execution tolerate: function( dd ){ // declare local refs var i, drp, drg, data, arr, len, elem, // interaction iteration variables x = 0, ia, end = dd.interactions.length, // determine the mouse coords xy = [ drop.event.pageX, drop.event.pageY ], // custom or stored tolerance fn tolerance = drop.tolerance || drop.modes[ drop.mode ]; // go through each passed interaction... do if ( ia = dd.interactions[x] ){ // check valid interaction if ( !ia ) return; // initialize or clear the drop data ia.drop = []; // holds the drop elements arr = []; len = ia.droppable.length; // determine the proxy location, if needed if ( tolerance ) drg = drop.locate( ia.proxy ); // reset the loop i = 0; // loop each stored drop target do if ( elem = ia.droppable[i] ){ data = $.data( elem, drop.datakey ); drp = data.location; if ( !drp ) continue; // find a winner: tolerance function is defined, call it data.winner = tolerance ? tolerance.call( drop, drop.event, drg, drp ) // mouse position is always the fallback : drop.contains( drp, xy ) ? 1 : 0; arr.push( data ); } while ( ++i < len ); // loop // sort the drop targets arr.sort( drop.sort ); // reset the loop i = 0; // loop through all of the targets again do if ( data = arr[ i ] ){ // winners... if ( data.winner && ia.drop.length < drop.multi ){ // new winner... dropstart if ( !data.active[x] && !data.anyactive ){ // check to make sure that this is not prevented if ( $special.drag.hijack( drop.event, "dropstart", dd, x, data.elem )[0] !== false ){ data.active[x] = 1; data.anyactive += 1; } // if false, it is not a winner else data.winner = 0; } // if it is still a winner if ( data.winner ) ia.drop.push( data.elem ); } // losers... else if ( data.active[x] && data.anyactive == 1 ){ // former winner... dropend $special.drag.hijack( drop.event, "dropend", dd, x, data.elem ); data.active[x] = 0; data.anyactive -= 1; } } while ( ++i < len ); // loop } while ( ++x < end ) // loop // check if the mouse is still moving or is idle if ( drop.last && xy[0] == drop.last.pageX && xy[1] == drop.last.pageY ) delete drop.timer; // idle, don't recurse else // recurse drop.timer = setTimeout(function(){ drop.tolerate( dd ); }, drop.delay ); // remember event, to compare idleness drop.last = drop.event; } }; // share the same special event configuration with related events... $special.dropinit = $special.dropstart = $special.dropend = drop; })(jQuery); // confine scope
PypiClean
/zephyrus-sc2-parser-0.3.8.tar.gz/zephyrus-sc2-parser-0.3.8/zephyrus_sc2_parser/s2protocol_fixed/versions/protocol16755.py
from zephyrus_sc2_parser.s2protocol_fixed.decoders import * # Decoding instructions for each protocol type. typeinfos = [ ('_int',[(0,7)]), #0 ('_int',[(0,4)]), #1 ('_int',[(0,6)]), #2 ('_int',[(0,14)]), #3 ('_int',[(0,22)]), #4 ('_int',[(0,32)]), #5 ('_choice',[(0,2),{0:('m_uint6',2),1:('m_uint14',3),2:('m_uint22',4),3:('m_uint32',5)}]), #6 ('_blob',[(0,8)]), #7 ('_int',[(0,8)]), #8 ('_struct',[[('m_flags',8,0),('m_major',8,1),('m_minor',8,2),('m_revision',8,3),('m_build',5,4),('m_baseBuild',5,5)]]), #9 ('_int',[(0,3)]), #10 ('_struct',[[('m_signature',7,0),('m_version',9,1),('m_type',10,2),('m_elapsedGameLoops',5,3)]]), #11 ('_fourcc',[]), #12 ('_blob',[(0,7)]), #13 ('_int',[(0,64)]), #14 ('_struct',[[('m_region',8,0),('m_programId',12,1),('m_realm',5,2),('m_name',13,3),('m_id',14,4)]]), #15 ('_struct',[[('m_a',8,0),('m_r',8,1),('m_g',8,2),('m_b',8,3)]]), #16 ('_int',[(0,2)]), #17 ('_struct',[[('m_name',7,0),('m_toon',15,1),('m_race',7,2),('m_color',16,3),('m_control',8,4),('m_teamId',1,5),('m_handicap',0,6),('m_observe',17,7),('m_result',17,8)]]), #18 ('_array',[(0,5),18]), #19 ('_optional',[19]), #20 ('_blob',[(0,10)]), #21 ('_blob',[(0,11)]), #22 ('_struct',[[('m_file',22,0)]]), #23 ('_bool',[]), #24 ('_int',[(-9223372036854775808,64)]), #25 ('_blob',[(0,12)]), #26 ('_blob',[(40,0)]), #27 ('_array',[(0,4),27]), #28 ('_optional',[28]), #29 ('_struct',[[('m_playerList',20,0),('m_title',21,1),('m_difficulty',7,2),('m_thumbnail',23,3),('m_isBlizzardMap',24,4),('m_timeUTC',25,5),('m_timeLocalOffset',25,6),('m_description',26,7),('m_imageFilePath',22,8),('m_mapFileName',22,9),('m_cacheHandles',29,10),('m_miniSave',24,11),('m_gameSpeed',10,12),('m_defaultDifficulty',2,13)]]), #30 ('_optional',[8]), #31 ('_struct',[[('m_race',31,-1)]]), #32 ('_struct',[[('m_team',31,-1)]]), #33 ('_struct',[[('m_name',7,-7),('m_randomSeed',5,-6),('m_racePreference',32,-5),('m_teamPreference',33,-4),('m_testMap',24,-3),('m_testAuto',24,-2),('m_observe',17,-1)]]), #34 ('_array',[(0,5),34]), #35 ('_struct',[[('m_lockTeams',24,-11),('m_teamsTogether',24,-10),('m_advancedSharedControl',24,-9),('m_randomRaces',24,-8),('m_battleNet',24,-7),('m_amm',24,-6),('m_ranked',24,-5),('m_noVictoryOrDefeat',24,-4),('m_fog',17,-3),('m_observers',17,-2),('m_userDifficulty',17,-1)]]), #36 ('_int',[(0,5)]), #37 ('_int',[(1,4)]), #38 ('_int',[(1,5)]), #39 ('_int',[(1,8)]), #40 ('_bitarray',[(0,6)]), #41 ('_bitarray',[(0,8)]), #42 ('_bitarray',[(0,2)]), #43 ('_struct',[[('m_allowedColors',41,-5),('m_allowedRaces',42,-4),('m_allowedDifficulty',41,-3),('m_allowedControls',42,-2),('m_allowedObserveTypes',43,-1)]]), #44 ('_array',[(0,5),44]), #45 ('_struct',[[('m_randomValue',5,-23),('m_gameCacheName',21,-22),('m_gameOptions',36,-21),('m_gameSpeed',10,-20),('m_gameType',10,-19),('m_maxUsers',37,-18),('m_maxObservers',37,-17),('m_maxPlayers',37,-16),('m_maxTeams',38,-15),('m_maxColors',39,-14),('m_maxRaces',40,-13),('m_maxControls',40,-12),('m_mapSizeX',8,-11),('m_mapSizeY',8,-10),('m_mapFileSyncChecksum',5,-9),('m_mapFileName',22,-8),('m_mapAuthorName',7,-7),('m_modFileSyncChecksum',5,-6),('m_slotDescriptions',45,-5),('m_defaultDifficulty',2,-4),('m_cacheHandles',28,-3),('m_isBlizzardMap',24,-2),('m_isPremadeFFA',24,-1)]]), #46 ('_optional',[1]), #47 ('_optional',[37]), #48 ('_struct',[[('m_color',48,-1)]]), #49 ('_array',[(0,5),5]), #50 ('_struct',[[('m_control',8,-9),('m_userId',47,-8),('m_teamId',1,-7),('m_colorPref',49,-6),('m_racePref',32,-5),('m_difficulty',2,-4),('m_handicap',0,-3),('m_observe',17,-2),('m_rewards',50,-1)]]), #51 ('_array',[(0,5),51]), #52 ('_struct',[[('m_phase',10,-9),('m_maxUsers',37,-8),('m_maxObservers',37,-7),('m_slots',52,-6),('m_randomSeed',5,-5),('m_hostUserId',47,-4),('m_isSinglePlayer',24,-3),('m_gameDuration',5,-2),('m_defaultDifficulty',2,-1)]]), #53 ('_struct',[[('m_userInitialData',35,-3),('m_gameDescription',46,-2),('m_lobbyState',53,-1)]]), #54 ('_struct',[[('m_syncLobbyState',54,-1)]]), #55 ('_struct',[[('m_name',13,-5)]]), #56 ('_blob',[(0,6)]), #57 ('_struct',[[('m_name',57,-5)]]), #58 ('_struct',[[('m_name',57,-7),('m_type',5,-6),('m_data',13,-5)]]), #59 ('_struct',[[('m_type',5,-7),('m_name',57,-6),('m_data',26,-5)]]), #60 ('_struct',[[('m_developmentCheatsEnabled',24,-8),('m_multiplayerCheatsEnabled',24,-7),('m_syncChecksummingEnabled',24,-6),('m_isMapToMapTransition',24,-5)]]), #61 ('_struct',[[]]), #62 ('_struct',[[('m_fileName',22,-9),('m_automatic',24,-8),('m_overwrite',24,-7),('m_name',7,-6),('m_description',21,-5)]]), #63 ('_int',[(-2147483648,32)]), #64 ('_struct',[[('x',64,-2),('y',64,-1)]]), #65 ('_struct',[[('m_point',65,-4),('m_time',64,-3),('m_verb',21,-2),('m_arguments',21,-1)]]), #66 ('_struct',[[('m_data',66,-5)]]), #67 ('_int',[(0,17)]), #68 ('_int',[(0,16)]), #69 ('_struct',[[('m_abilLink',69,-3),('m_abilCmdIndex',37,-2),('m_abilCmdData',31,-1)]]), #70 ('_optional',[70]), #71 ('_null',[]), #72 ('_int',[(0,20)]), #73 ('_struct',[[('x',73,-3),('y',73,-2),('z',64,-1)]]), #74 ('_struct',[[('m_targetUnitFlags',8,-6),('m_timer',8,-5),('m_tag',5,-4),('m_snapshotUnitLink',69,-3),('m_snapshotPlayerId',47,-2),('m_snapshotPoint',74,-1)]]), #75 ('_choice',[(0,2),{0:('None',72),1:('TargetPoint',74),2:('TargetUnit',75),3:('Data',5)}]), #76 ('_optional',[5]), #77 ('_struct',[[('m_cmdFlags',68,-8),('m_abil',71,-7),('m_data',76,-6),('m_otherUnit',77,-5)]]), #78 ('_array',[(0,8),8]), #79 ('_choice',[(0,2),{0:('None',72),1:('Mask',42),2:('OneIndices',79),3:('ZeroIndices',79)}]), #80 ('_struct',[[('m_unitLink',69,-3),('m_intraSubgroupPriority',8,-2),('m_count',8,-1)]]), #81 ('_array',[(0,8),81]), #82 ('_array',[(0,8),5]), #83 ('_struct',[[('m_subgroupIndex',8,-4),('m_removeMask',80,-3),('m_addSubgroups',82,-2),('m_addUnitTags',83,-1)]]), #84 ('_struct',[[('m_controlGroupId',1,-6),('m_delta',84,-5)]]), #85 ('_struct',[[('m_controlGroupIndex',1,-7),('m_controlGroupUpdate',17,-6),('m_mask',80,-5)]]), #86 ('_struct',[[('m_count',8,-6),('m_subgroupCount',8,-5),('m_activeSubgroupIndex',8,-4),('m_unitTagsChecksum',5,-3),('m_subgroupIndicesChecksum',5,-2),('m_subgroupsChecksum',5,-1)]]), #87 ('_struct',[[('m_controlGroupId',1,-6),('m_selectionSyncData',87,-5)]]), #88 ('_array',[(0,3),64]), #89 ('_struct',[[('m_recipientId',1,-6),('m_resources',89,-5)]]), #90 ('_struct',[[('m_chatMessage',21,-5)]]), #91 ('_int',[(-128,8)]), #92 ('_struct',[[('x',64,-3),('y',64,-2),('z',64,-1)]]), #93 ('_struct',[[('m_beacon',92,-11),('m_ally',92,-10),('m_autocast',92,-9),('m_targetUnitTag',5,-8),('m_targetUnitSnapshotUnitLink',69,-7),('m_targetUnitSnapshotPlayerId',47,-6),('m_targetPoint',93,-5)]]), #94 ('_struct',[[('m_speed',10,-5)]]), #95 ('_struct',[[('m_delta',92,-5)]]), #96 ('_struct',[[('m_verb',21,-6),('m_arguments',21,-5)]]), #97 ('_struct',[[('m_alliance',5,-6),('m_control',5,-5)]]), #98 ('_struct',[[('m_unitTag',5,-5)]]), #99 ('_struct',[[('m_unitTag',5,-6),('m_flags',8,-5)]]), #100 ('_struct',[[('m_conversationId',64,-6),('m_replyId',64,-5)]]), #101 ('_struct',[[('m_purchaseItemId',64,-5)]]), #102 ('_struct',[[('m_difficultyLevel',64,-5)]]), #103 ('_choice',[(0,3),{0:('None',72),1:('Checked',24),2:('ValueChanged',5),3:('SelectionChanged',64),4:('TextChanged',22)}]), #104 ('_struct',[[('m_controlId',64,-7),('m_eventType',64,-6),('m_eventData',104,-5)]]), #105 ('_struct',[[('m_soundHash',5,-6),('m_length',5,-5)]]), #106 ('_struct',[[('m_soundHash',83,-2),('m_length',83,-1)]]), #107 ('_struct',[[('m_syncInfo',107,-5)]]), #108 ('_struct',[[('m_sound',5,-5)]]), #109 ('_struct',[[('m_transmissionId',64,-5)]]), #110 ('_struct',[[('x',69,-2),('y',69,-1)]]), #111 ('_optional',[69]), #112 ('_struct',[[('m_target',111,-8),('m_distance',112,-7),('m_pitch',112,-6),('m_yaw',112,-5)]]), #113 ('_int',[(0,1)]), #114 ('_struct',[[('m_skipType',114,-5)]]), #115 ('_struct',[[('m_button',5,-11),('m_down',24,-10),('m_posXUI',5,-9),('m_posYUI',5,-8),('m_posXWorld',64,-7),('m_posYWorld',64,-6),('m_posZWorld',64,-5)]]), #116 ('_struct',[[('m_soundtrack',5,-5)]]), #117 ('_struct',[[('m_planetId',64,-5)]]), #118 ('_struct',[[('m_key',92,-6),('m_flags',92,-5)]]), #119 ('_struct',[[('m_resources',89,-5)]]), #120 ('_struct',[[('m_fulfillRequestId',64,-5)]]), #121 ('_struct',[[('m_cancelRequestId',64,-5)]]), #122 ('_struct',[[('m_researchItemId',64,-5)]]), #123 ('_struct',[[('m_laggingPlayerId',1,-5)]]), #124 ('_struct',[[('m_mercenaryId',64,-5)]]), #125 ('_struct',[[('m_battleReportId',64,-6),('m_difficultyLevel',64,-5)]]), #126 ('_struct',[[('m_battleReportId',64,-5)]]), #127 ('_int',[(0,19)]), #128 ('_struct',[[('m_decrementMs',128,-5)]]), #129 ('_struct',[[('m_portraitId',64,-5)]]), #130 ('_struct',[[('m_functionName',13,-5)]]), #131 ('_struct',[[('m_result',64,-5)]]), #132 ('_struct',[[('m_gameMenuItemIndex',64,-5)]]), #133 ('_struct',[[('m_reason',92,-5)]]), #134 ('_struct',[[('m_purchaseCategoryId',64,-5)]]), #135 ('_struct',[[('m_button',69,-5)]]), #136 ('_struct',[[('m_recipient',17,-3),('m_string',22,-2)]]), #137 ('_struct',[[('m_recipient',17,-3),('m_point',65,-2)]]), #138 ('_struct',[[('m_progress',64,-2)]]), #139 ] # Map from protocol NNet.Game.*Event eventid to (typeid, name) game_event_types = { 5: (62, 'NNet.Game.SUserFinishedLoadingSyncEvent'), 7: (56, 'NNet.Game.SBankFileEvent'), 8: (58, 'NNet.Game.SBankSectionEvent'), 9: (59, 'NNet.Game.SBankKeyEvent'), 10: (60, 'NNet.Game.SBankValueEvent'), 11: (61, 'NNet.Game.SUserOptionsEvent'), 22: (63, 'NNet.Game.SSaveGameEvent'), 23: (62, 'NNet.Game.SSaveGameDoneEvent'), 25: (62, 'NNet.Game.SPlayerLeaveEvent'), 26: (67, 'NNet.Game.SGameCheatEvent'), 27: (78, 'NNet.Game.SCmdEvent'), 28: (85, 'NNet.Game.SSelectionDeltaEvent'), 29: (86, 'NNet.Game.SControlGroupUpdateEvent'), 30: (88, 'NNet.Game.SSelectionSyncCheckEvent'), 31: (90, 'NNet.Game.SResourceTradeEvent'), 32: (91, 'NNet.Game.STriggerChatMessageEvent'), 33: (94, 'NNet.Game.SAICommunicateEvent'), 34: (95, 'NNet.Game.SSetAbsoluteGameSpeedEvent'), 35: (96, 'NNet.Game.SAddAbsoluteGameSpeedEvent'), 37: (97, 'NNet.Game.SBroadcastCheatEvent'), 38: (98, 'NNet.Game.SAllianceEvent'), 39: (99, 'NNet.Game.SUnitClickEvent'), 40: (100, 'NNet.Game.SUnitHighlightEvent'), 41: (101, 'NNet.Game.STriggerReplySelectedEvent'), 44: (62, 'NNet.Game.STriggerSkippedEvent'), 45: (106, 'NNet.Game.STriggerSoundLengthQueryEvent'), 46: (109, 'NNet.Game.STriggerSoundOffsetEvent'), 47: (110, 'NNet.Game.STriggerTransmissionOffsetEvent'), 48: (110, 'NNet.Game.STriggerTransmissionCompleteEvent'), 49: (113, 'NNet.Game.SCameraUpdateEvent'), 50: (62, 'NNet.Game.STriggerAbortMissionEvent'), 51: (102, 'NNet.Game.STriggerPurchaseMadeEvent'), 52: (62, 'NNet.Game.STriggerPurchaseExitEvent'), 53: (103, 'NNet.Game.STriggerPlanetMissionLaunchedEvent'), 54: (62, 'NNet.Game.STriggerPlanetPanelCanceledEvent'), 55: (105, 'NNet.Game.STriggerDialogControlEvent'), 56: (108, 'NNet.Game.STriggerSoundLengthSyncEvent'), 57: (115, 'NNet.Game.STriggerConversationSkippedEvent'), 58: (116, 'NNet.Game.STriggerMouseClickedEvent'), 63: (62, 'NNet.Game.STriggerPlanetPanelReplayEvent'), 64: (117, 'NNet.Game.STriggerSoundtrackDoneEvent'), 65: (118, 'NNet.Game.STriggerPlanetMissionSelectedEvent'), 66: (119, 'NNet.Game.STriggerKeyPressedEvent'), 67: (131, 'NNet.Game.STriggerMovieFunctionEvent'), 68: (62, 'NNet.Game.STriggerPlanetPanelBirthCompleteEvent'), 69: (62, 'NNet.Game.STriggerPlanetPanelDeathCompleteEvent'), 70: (120, 'NNet.Game.SResourceRequestEvent'), 71: (121, 'NNet.Game.SResourceRequestFulfillEvent'), 72: (122, 'NNet.Game.SResourceRequestCancelEvent'), 73: (62, 'NNet.Game.STriggerResearchPanelExitEvent'), 74: (62, 'NNet.Game.STriggerResearchPanelPurchaseEvent'), 75: (123, 'NNet.Game.STriggerResearchPanelSelectionChangedEvent'), 76: (124, 'NNet.Game.SLagMessageEvent'), 77: (62, 'NNet.Game.STriggerMercenaryPanelExitEvent'), 78: (62, 'NNet.Game.STriggerMercenaryPanelPurchaseEvent'), 79: (125, 'NNet.Game.STriggerMercenaryPanelSelectionChangedEvent'), 80: (62, 'NNet.Game.STriggerVictoryPanelExitEvent'), 81: (62, 'NNet.Game.STriggerBattleReportPanelExitEvent'), 82: (126, 'NNet.Game.STriggerBattleReportPanelPlayMissionEvent'), 83: (127, 'NNet.Game.STriggerBattleReportPanelPlaySceneEvent'), 84: (127, 'NNet.Game.STriggerBattleReportPanelSelectionChangedEvent'), 85: (103, 'NNet.Game.STriggerVictoryPanelPlayMissionAgainEvent'), 86: (62, 'NNet.Game.STriggerMovieStartedEvent'), 87: (62, 'NNet.Game.STriggerMovieFinishedEvent'), 88: (129, 'NNet.Game.SDecrementGameTimeRemainingEvent'), 89: (130, 'NNet.Game.STriggerPortraitLoadedEvent'), 90: (132, 'NNet.Game.STriggerCustomDialogDismissedEvent'), 91: (133, 'NNet.Game.STriggerGameMenuItemSelectedEvent'), 92: (134, 'NNet.Game.STriggerCameraMoveEvent'), 93: (102, 'NNet.Game.STriggerPurchasePanelSelectedPurchaseItemChangedEvent'), 94: (135, 'NNet.Game.STriggerPurchasePanelSelectedPurchaseCategoryChangedEvent'), 95: (136, 'NNet.Game.STriggerButtonPressedEvent'), 96: (62, 'NNet.Game.STriggerGameCreditsFinishedEvent'), } # The typeid of the NNet.Game.EEventId enum. game_eventid_typeid = 0 # Map from protocol NNet.Game.*Message eventid to (typeid, name) message_event_types = { 0: (137, 'NNet.Game.SChatMessage'), 1: (138, 'NNet.Game.SPingMessage'), 2: (139, 'NNet.Game.SLoadingProgressMessage'), 3: (62, 'NNet.Game.SServerPingMessage'), } # The typeid of the NNet.Game.EMessageId enum. message_eventid_typeid = 1 # Map from protocol NNet.Replay.Tracker.*Event eventid to (typeid, name) tracker_event_types = { } # NOTE: older builds may not support some types and the generated methods # may fail to function properly, if specific backwards compatibility is # needed these values should be tested against for None # The typeid of the NNet.Replay.Tracker.EEventId enum. tracker_eventid_typeid = None # The typeid of NNet.SVarUint32 (the type used to encode gameloop deltas). svaruint32_typeid = 6 # The typeid of NNet.Replay.SGameUserId (the type used to encode player ids). replay_userid_typeid = None # The typeid of NNet.Replay.SHeader (the type used to store replay game version and length). replay_header_typeid = 11 # The typeid of NNet.Game.SDetails (the type used to store overall replay details). game_details_typeid = 30 # The typeid of NNet.Replay.SInitData (the type used to store the inital lobby). replay_initdata_typeid = 55 def _varuint32_value(value): # Returns the numeric value from a SVarUint32 instance. for v in value.values(): return v return 0 def _decode_event_stream(decoder, eventid_typeid, event_types, decode_user_id): # Decodes events prefixed with a gameloop and possibly userid gameloop = 0 while not decoder.done(): start_bits = decoder.used_bits() # decode the gameloop delta before each event delta = _varuint32_value(decoder.instance(svaruint32_typeid)) gameloop += delta # decode the userid before each event if decode_user_id: userid = decoder.instance(replay_userid_typeid) # decode the event id eventid = decoder.instance(eventid_typeid) typeid, typename = event_types.get(eventid, (None, None)) if typeid is None: raise CorruptedError('eventid({}) at {}'.format(eventid, decoder)) # decode the event struct instance event = decoder.instance(typeid) event['_event'] = typename event['_eventid'] = eventid # insert gameloop and userid event['_gameloop'] = gameloop if decode_user_id: event['_userid'] = userid # the next event is byte aligned decoder.byte_align() # insert bits used in stream event['_bits'] = decoder.used_bits() - start_bits yield event def decode_replay_game_events(contents): """Decodes and yields each game event from the contents byte string.""" decoder = BitPackedDecoder(contents, typeinfos) for event in _decode_event_stream(decoder, game_eventid_typeid, game_event_types, decode_user_id=True): yield event def decode_replay_message_events(contents): """Decodes and yields each message event from the contents byte string.""" decoder = BitPackedDecoder(contents, typeinfos) for event in _decode_event_stream(decoder, message_eventid_typeid, message_event_types, decode_user_id=True): yield event def decode_replay_tracker_events(contents): """Decodes and yields each tracker event from the contents byte string.""" decoder = VersionedDecoder(contents, typeinfos) for event in _decode_event_stream(decoder, tracker_eventid_typeid, tracker_event_types, decode_user_id=False): yield event def decode_replay_header(contents): """Decodes and return the replay header from the contents byte string.""" decoder = VersionedDecoder(contents, typeinfos) return decoder.instance(replay_header_typeid) def decode_replay_details(contents): """Decodes and returns the game details from the contents byte string.""" decoder = VersionedDecoder(contents, typeinfos) return decoder.instance(game_details_typeid) def decode_replay_initdata(contents): """Decodes and return the replay init data from the contents byte string.""" decoder = BitPackedDecoder(contents, typeinfos) return decoder.instance(replay_initdata_typeid) def decode_replay_attributes_events(contents): """Decodes and yields each attribute from the contents byte string.""" buffer = BitPackedBuffer(contents, 'little') attributes = {} if not buffer.done(): attributes['source'] = buffer.read_bits(8) attributes['mapNamespace'] = buffer.read_bits(32) count = buffer.read_bits(32) attributes['scopes'] = {} while not buffer.done(): value = {} value['namespace'] = buffer.read_bits(32) value['attrid'] = attrid = buffer.read_bits(32) scope = buffer.read_bits(8) value['value'] = buffer.read_aligned_bytes(4)[::-1].strip(b'\x00') if not scope in attributes['scopes']: attributes['scopes'][scope] = {} if not attrid in attributes['scopes'][scope]: attributes['scopes'][scope][attrid] = [] attributes['scopes'][scope][attrid].append(value) return attributes def unit_tag(unitTagIndex, unitTagRecycle): return (unitTagIndex << 18) + unitTagRecycle def unit_tag_index(unitTag): return (unitTag >> 18) & 0x00003fff def unit_tag_recycle(unitTag): return (unitTag) & 0x0003ffff
PypiClean
/visual-automata-demo-0.0.4.tar.gz/visual-automata-demo-0.0.4/visual_automata/fa/nfa.py
import copy import random import sys from typing import Generator, Union import numpy as np import pandas as pd from automata.fa.nfa import NFA from colormath.color_objects import sRGBColor from forbiddenfruit import curse from graphviz import Digraph from IPython.display import display from pandas import DataFrame from visual_automata.colors import ( create_palette, hex_to_rgb_color, list_cycler, ) sys.setrecursionlimit(10 ** 6) def deepcopy(self) -> dict: return copy.deepcopy(self) curse(dict, "deepcopy", deepcopy) class VisualNFA: """A wrapper for an automata-lib non-deterministic finite automaton.""" def __init__( self, nfa: NFA = None, *, states: set = None, input_symbols: set = None, transitions: dict = None, initial_state: str = None, final_states: set = None, ): if nfa: self.nfa = nfa.copy() else: if not states: states = {*transitions.keys()} if not input_symbols: input_symbols = set() for v in transitions.values(): symbols = [*v.keys()] for symbol in symbols: if symbol != "": input_symbols.add(symbol) self.nfa = NFA( states=states.copy(), input_symbols=input_symbols.copy(), transitions=transitions.deepcopy(), initial_state=initial_state, final_states=final_states.copy(), ) self.nfa.validate() # ------------------------------------------------------------------------- # Mimic behavior of automata-lib NFA. @property def states(self) -> set: """Pass on .states from the NFA""" return self.nfa.states @states.setter def states(self, states: set): """Set .states on the NFA""" self.nfa.states = states @property def input_symbols(self) -> set: """Pass on .input_symbols from the NFA""" return self.nfa.input_symbols @input_symbols.setter def input_symbols(self, input_symbols: set): """Set .input_symbols on the NFA""" self.nfa.input_symbols = input_symbols @property def transitions(self) -> dict: """Pass on .transitions from the NFA""" return self.nfa.transitions @transitions.setter def transitions(self, transitions: dict): """Set .transitions on the NFA""" self.nfa.transitions = transitions @property def initial_state(self) -> str: """Pass on .initial_state from the NFA""" return self.nfa.initial_state @initial_state.setter def initial_state(self, initial_state: str): """Set .initial_state on the NFA""" self.nfa.initial_state = initial_state @property def final_states(self) -> set: """Pass on .final_states from the NFA""" return self.nfa.final_states @final_states.setter def final_states(self, final_states: set): """Set .final_states on the NFA""" self.nfa.final_states = final_states def copy(self): """Create a deep copy of the automaton.""" return self.__class__(**vars(self)) def validate(self) -> bool: """Return True if this NFA is internally consistent.""" return self.nfa.validate() def accepts_input(self, input_str: str) -> bool: """Return True if this automaton accepts the given input.""" return self.nfa.accepts_input(input_str=input_str) def read_input(self, input_str: str) -> set: """ Check if the given string is accepted by this automaton. Return the automaton's final configuration if this string is valid. """ return self.nfa.read_input(input_str=input_str) def read_input_stepwise(self, input_str: str) -> Generator: """ Check if the given string is accepted by this automaton. Return the automaton's final configuration if this string is valid. """ return self.nfa.read_input_stepwise(input_str=input_str) def _get_lambda_closure(self, start_state: str) -> set: """ Return the lambda closure for the given state. The lambda closure of a state q is the set containing q, along with every state that can be reached from q by following only lambda transitions. """ return self.nfa._get_lambda_closure(start_state=start_state) def _get_next_current_states( self, current_states: set, input_symbol: str ) -> set: """Return the next set of current states given the current set.""" return self.nfa._get_next_current_states(current_states, input_symbol) # ------------------------------------------------------------------------- # Define new attributes and their helper methods. @property def table(self) -> DataFrame: """ Generates a transition table of the given VisualNFA. Returns: DataFrame: A transition table of the VisualNFA. """ final_states = "".join(self.nfa.final_states) transitions = self._add_lambda( all_transitions=self.nfa.transitions, input_symbols=self.nfa.input_symbols, ) table: dict = {} for state, transition in sorted(transitions.items()): if state == self.nfa.initial_state and state in final_states: state = "→*" + state elif state == self.nfa.initial_state: state = "→" + state elif state in final_states: state = "*" + state row: dict = {} for input_symbol, next_states in transition.items(): cell: list = [] for next_state in sorted(next_states): if next_state in final_states: cell.append("*" + next_state) else: cell.append(next_state) if not cell and len(cell) == 1: cell = np.nan elif len(cell) == 1: cell = cell.pop() else: cell = "{" + ",".join(cell) + "}" row[input_symbol] = cell table[state] = row table = pd.DataFrame.from_dict(table).fillna("∅").T table = table.reindex(sorted(table.columns), axis=1) return table @staticmethod def _add_lambda(all_transitions: dict, input_symbols: str) -> dict: """ Replacing '' key name for empty string (lambda/epsilon) transitions. Args: all_transitions (dict): The NFA's transitions with '' for lambda transitions. input_symbols (str): The NFA's input symbols/alphabet. Returns: dict: Transitions with λ for lambda transitions """ all_transitions = all_transitions.deepcopy() input_symbols = input_symbols.copy() # Replacing '' key name for empty string (lambda/epsilon) transitions. for transitions in all_transitions.values(): for state, transition in list(transitions.items()): if state == "": transitions["λ"] = transition del transitions[""] input_symbols.add("λ") return all_transitions # ------------------------------------------------------------------------- # Define new class methods and their helper methods. @property def _lambda_transition_exists(self) -> bool: """ Checks if the nfa has lambda transitions. Returns: bool: If the nfa has lambda transitions, returns True; else False. """ status = False for transitions in self.nfa.transitions.values(): if "" in transitions: return True return status @classmethod def eliminate_lambda(cls, nfa): """ Eliminates lambda transitions, and returns a new nfa. Args: nfa (VisualNFA): A VisualNFA object. Returns: VisualNFA: A VisualNFA object without lambda transitions. """ if nfa._lambda_transition_exists: nfa_lambda_eliminated = nfa.copy() for state in sorted(nfa_lambda_eliminated.transitions): # Find lambda closure for the state. closures = nfa_lambda_eliminated._get_lambda_closure(state) if nfa_lambda_eliminated.initial_state == state: if closures.difference(state).issubset( nfa_lambda_eliminated.final_states ): [ nfa_lambda_eliminated.final_states.add(state) for state in closures.intersection(state) ] for input_symbol in nfa_lambda_eliminated.input_symbols: next_states = nfa.nfa._get_next_current_states( closures, input_symbol ) # Check if a dead state was returned. if next_states != set(): # Update the transition after lambda move has been eliminated. nfa_lambda_eliminated.transitions[state][ input_symbol ] = next_states # Delete the lambda transition. if "" in nfa_lambda_eliminated.transitions[state]: del nfa_lambda_eliminated.transitions[state][""] return nfa_lambda_eliminated else: return nfa # ------------------------------------------------------------------------- # Define new methods and their helper methods. def _pathfinder( self, input_str: str, status: bool = False, counter: int = 0, main_counter: int = 0, ) -> Union[bool, list]: # pragma: no cover. Too many possibilities. """ Searches for a appropriate path to return to input_check. Args: input_str (str): Input symbols status (bool, optional): If a path is found. Defaults to False. counter (int, optional): To keep track of recursion limit in __pathsearcher. Defaults to 0. main_counter (int, optional): To keep track of recursion limit in _pathfinder. Defaults to 0. Returns: Union[bool, list]: If a path is found, and a list of transition tuples. """ counter += 1 nfa = self.copy() recursion_limit = 50 result = self.__pathsearcher(nfa, input_str, status) if result: return status, result else: main_counter += 1 if main_counter <= recursion_limit: return self._pathfinder( input_str, status, counter, main_counter=main_counter ) else: status = ( "[NO VALID PATH FOUND]\n" "Try to eliminate lambda transitions and try again.\n" "Example: nfa_lambda_removed = nfa.eliminate_lambda()" ) return status, [] @staticmethod def __pathsearcher( nfa, input_str: str, status: bool = False, counter: int = 0 ) -> list: # pragma: no cover. Too many possibilities. """ Searches for a appropriate path to return to _pathfinder. Args: nfa (VisualNFA): A VisualNFA object. input_str (str): Input symbols. status (bool, optional): If a path is found. Defaults to False. counter (int, optional): To keep track of recursion limit. Defaults to 0. Returns: list: a list of transition tuples. """ recursion_limit = 20000 counter += 1 current_state = {(nfa.initial_state)} path = [] for symbol in input_str: next_curr = nfa._get_next_current_states(current_state, symbol) if next_curr == set(): if not status: state = {} path.append(("".join(current_state), state, symbol)) return path else: break else: state = random.choice(list(next_curr)) path.append(("".join(current_state), state, symbol)) current_state = {(state)} # Accepted path opptained. if ( status and len(input_str) == (len(path)) and path[-1][1] in nfa.final_states ): return path # Rejected path opptained. elif not status and len(input_str) == (len(path)): return path # No path opptained. Try again. else: if counter <= recursion_limit: return nfa.__pathsearcher(nfa, input_str, status, counter) else: return False @staticmethod def _transition_steps( initial_state, final_states, input_str: str, transitions_taken: list, status: bool, ) -> DataFrame: # pragma: no cover. Too many possibilities. """ Generates a table of taken transitions based on the input string and it's result. Args: initial_state (str): The NFA's initial state. final_states (set): The NFA's final states. input_str (str): The input string to run on the NFA. transitions_taken (list): Transitions taken from the input string. status (bool): The result of the input string. Returns: DataFrame: Table of taken transitions based on the input string and it's result. """ current_states = transitions_taken.copy() for i, state in enumerate(current_states): if state == "" or state == {}: current_states[i] = "∅" elif state == initial_state and state in final_states: current_states[i] = "→*" + state elif state == initial_state: current_states[i] = "→" + state elif state in final_states: current_states[i] = "*" + state new_states = current_states.copy() del current_states[-1] del new_states[0] inputs = [str(x) for x in input_str] inputs = inputs[: len(current_states)] transition_steps: dict = { "Current state:": current_states, "Input symbol:": inputs, "New state:": new_states, } transition_steps = pd.DataFrame.from_dict(transition_steps) transition_steps.index += 1 transition_steps = pd.DataFrame.from_dict( transition_steps ).rename_axis("Step:", axis=1) if status: transition_steps.columns = pd.MultiIndex.from_product( [["[Accepted]"], transition_steps.columns] ) return transition_steps, inputs else: transition_steps.columns = pd.MultiIndex.from_product( [["[Rejected]"], transition_steps.columns] ) return transition_steps, inputs @staticmethod def _transitions_pairs( all_transitions: dict, ) -> list: # pragma: no cover. Too many possibilities. """ Generates a list of all possible transitions pairs for all input symbols. Args: transition_dict (dict): NFA transitions. Returns: list: All possible transitions for all the given input symbols. """ all_transitions = all_transitions.deepcopy() transition_possibilities: list = [] for state, state_transitions in all_transitions.items(): for symbol, transitions in state_transitions.items(): if len(transitions) < 2: if transitions != "" and transitions != {}: transitions = transitions.pop() transition_possibilities.append( (state, transitions, symbol) ) else: for transition in transitions: transition_possibilities.append( (state, transition, symbol) ) return transition_possibilities def input_check( self, input_str: str, return_result=False ) -> Union[ bool, list, DataFrame ]: # pragma: no cover. Too many possibilities. """ Checks if string of input symbols results in final state. Args: input_str (str): The input string to run on the NFA. return_result (bool, optional): Returns results to the show_diagram method. Defaults to False. Raises: TypeError: To let the user know a string has to be entered. Returns: Union[bool, list, list]: If the last state is the final state, transition pairs, and steps taken. """ if not isinstance(input_str, str): raise TypeError( f"input_str should be a string. " f"{input_str} is {type(input_str)}, not a string." ) # Check if input string is accepted. status: bool = self.nfa.accepts_input(input_str=input_str) status, taken_transitions_pairs = self._pathfinder( input_str=input_str, status=status ) if not isinstance(status, bool): if return_result: return status, [], DataFrame, input_str else: return status current_states = self.initial_state transitions_taken = [current_states] for transition in range(len(taken_transitions_pairs)): transitions_taken.append(taken_transitions_pairs[transition][1]) taken_steps, inputs = self._transition_steps( initial_state=self.nfa.initial_state, final_states=self.final_states, input_str=input_str, transitions_taken=transitions_taken, status=status, ) if return_result: return status, taken_transitions_pairs, taken_steps, inputs else: return taken_steps def show_diagram( self, input_str: str = None, filename: str = None, format_type: str = "png", path: str = None, *, view=False, cleanup: bool = True, horizontal: bool = True, reverse_orientation: bool = False, fig_size: tuple = (8, 8), font_size: float = 14.0, arrow_size: float = 0.85, state_seperation: float = 0.5, ) -> Digraph: # pragma: no cover. Too many possibilities. """ Generates the graph associated with the given NFA. Args: nfa (NFA): Deterministic Finite Automata to graph. input_str (str, optional): String list of input symbols. Defaults to None. filename (str, optional): Name of output file. Defaults to None. format_type (str, optional): File format [svg/png/...]. Defaults to "png". path (str, optional): Folder path for output file. Defaults to None. view (bool, optional): Storing and displaying the graph as a pdf. Defaults to False. cleanup (bool, optional): Garbage collection. Defaults to True. horizontal (bool, optional): Direction of node layout. Defaults to True. reverse_orientation (bool, optional): Reverse direction of node layout. Defaults to False. fig_size (tuple, optional): Figure size. Defaults to (8, 8). font_size (float, optional): Font size. Defaults to 14.0. arrow_size (float, optional): Arrow head size. Defaults to 0.85. state_seperation (float, optional): Node distance. Defaults to 0.5. Returns: Digraph: The graph in dot format. """ # Converting to graphviz preferred input type, # keeping the conventional input styles; i.e fig_size(8,8) fig_size = ", ".join(map(str, fig_size)) font_size = str(font_size) arrow_size = str(arrow_size) state_seperation = str(state_seperation) # Defining the graph. graph = Digraph(strict=False) graph.attr( size=fig_size, ranksep=state_seperation, ) if horizontal: graph.attr(rankdir="LR") if reverse_orientation: if horizontal: graph.attr(rankdir="RL") else: graph.attr(rankdir="BT") # Defining arrow to indicate the initial state. graph.node("Initial", label="", shape="point", fontsize=font_size) # Defining all states. for state in sorted(self.nfa.states): if ( state in self.nfa.initial_state and state in self.nfa.final_states ): graph.node(state, shape="doublecircle", fontsize=font_size) elif state in self.nfa.initial_state: graph.node(state, shape="circle", fontsize=font_size) elif state in self.nfa.final_states: graph.node(state, shape="doublecircle", fontsize=font_size) else: graph.node(state, shape="circle", fontsize=font_size) # Point initial arrow to the initial state. graph.edge("Initial", self.nfa.initial_state, arrowsize=arrow_size) # Define all tansitions in the finite state machine. all_transitions_pairs = self._transitions_pairs(self.nfa.transitions) # Replacing '' key name for empty string (lambda/epsilon) transitions. for i, pair in enumerate(all_transitions_pairs): if pair[2] == "": all_transitions_pairs[i] = (pair[0], pair[1], "λ") if input_str is None: for pair in all_transitions_pairs: graph.edge( pair[0], pair[1], label=" {} ".format(pair[2]), arrowsize=arrow_size, fontsize=font_size, ) status = None else: ( status, taken_transitions_pairs, taken_steps, inputs, ) = self.input_check(input_str=input_str, return_result=True) if not isinstance(status, bool): print(status) return remaining_transitions_pairs = [ x for x in all_transitions_pairs if x not in taken_transitions_pairs ] # Define color palette for transitions if status: start_color = hex_to_rgb_color("#FFFF00") end_color = hex_to_rgb_color("#00FF00") else: start_color = hex_to_rgb_color("#FFFF00") end_color = hex_to_rgb_color("#FF0000") number_of_colors = len(inputs) palette = create_palette( start_color, end_color, number_of_colors, sRGBColor ) color_gen = list_cycler(palette) # Define all tansitions in the finite state machine with traversal. counter = 0 for i, pair in enumerate(taken_transitions_pairs): dead_state = "\u00D8" edge_color = next(color_gen) counter += 1 if pair[1] != {}: graph.edge( pair[0], pair[1], label=" [{}]\n{} ".format(counter, pair[2]), arrowsize=arrow_size, fontsize=font_size, color=edge_color, penwidth="2.5", ) else: graph.node(dead_state, shape="circle", fontsize=font_size) graph.edge( pair[0], dead_state, label=" [{}]\n{} ".format(counter, inputs[-1]), arrowsize=arrow_size, fontsize=font_size, color=edge_color, penwidth="2.5", ) for pair in remaining_transitions_pairs: graph.edge( pair[0], pair[1], label=" {} ".format(pair[2]), arrowsize=arrow_size, fontsize=font_size, ) # Write diagram to file. PNG, SVG, etc. if filename: graph.render( filename=filename, format=format_type, directory=path, cleanup=cleanup, ) if view: graph.render(view=True) if input_str: display(taken_steps) return graph else: return graph
PypiClean
/purplship_python-2021.7-py3-none-any.whl/purplship/api/webhooks_api.py
import re # noqa: F401 import sys # noqa: F401 from purplship.api_client import ApiClient, Endpoint as _Endpoint from purplship.model_utils import ( # noqa: F401 check_allowed_values, check_validations, date, datetime, file_type, none_type, validate_and_convert_types ) from purplship.model.error_response import ErrorResponse from purplship.model.operation import Operation from purplship.model.webhook import Webhook from purplship.model.webhook_data import WebhookData from purplship.model.webhook_list import WebhookList from purplship.model.webhook_test_request import WebhookTestRequest class WebhooksApi(object): """NOTE: This class is auto generated by OpenAPI Generator Ref: https://openapi-generator.tech Do not edit the class manually. """ def __init__(self, api_client=None): if api_client is None: api_client = ApiClient() self.api_client = api_client def __create( self, data, **kwargs ): """Create a webhook # noqa: E501 Create a new webhook. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create(data, async_req=True) >>> result = thread.get() Args: data (WebhookData): Keyword Args: _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (int/float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: Webhook If the method is called asynchronously, returns the request thread. """ kwargs['async_req'] = kwargs.get( 'async_req', False ) kwargs['_return_http_data_only'] = kwargs.get( '_return_http_data_only', True ) kwargs['_preload_content'] = kwargs.get( '_preload_content', True ) kwargs['_request_timeout'] = kwargs.get( '_request_timeout', None ) kwargs['_check_input_type'] = kwargs.get( '_check_input_type', True ) kwargs['_check_return_type'] = kwargs.get( '_check_return_type', True ) kwargs['_host_index'] = kwargs.get('_host_index') kwargs['data'] = \ data return self.call_with_http_info(**kwargs) self.create = _Endpoint( settings={ 'response_type': (Webhook,), 'auth': [ 'Token' ], 'endpoint_path': '/v1/webhooks', 'operation_id': 'create', 'http_method': 'POST', 'servers': None, }, params_map={ 'all': [ 'data', ], 'required': [ 'data', ], 'nullable': [ ], 'enum': [ ], 'validation': [ ] }, root_map={ 'validations': { }, 'allowed_values': { }, 'openapi_types': { 'data': (WebhookData,), }, 'attribute_map': { }, 'location_map': { 'data': 'body', }, 'collection_format_map': { } }, headers_map={ 'accept': [ 'application/json' ], 'content_type': [ 'application/json' ] }, api_client=api_client, callable=__create ) def __list( self, **kwargs ): """List all webhooks # noqa: E501 Retrieve all webhooks. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list(async_req=True) >>> result = thread.get() Keyword Args: limit (int): Number of results to return per page.. [optional] offset (int): The initial index from which to return the results.. [optional] test_mode (bool, none_type): This flag filter out webhooks created in test or live mode. [optional] _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (int/float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: WebhookList If the method is called asynchronously, returns the request thread. """ kwargs['async_req'] = kwargs.get( 'async_req', False ) kwargs['_return_http_data_only'] = kwargs.get( '_return_http_data_only', True ) kwargs['_preload_content'] = kwargs.get( '_preload_content', True ) kwargs['_request_timeout'] = kwargs.get( '_request_timeout', None ) kwargs['_check_input_type'] = kwargs.get( '_check_input_type', True ) kwargs['_check_return_type'] = kwargs.get( '_check_return_type', True ) kwargs['_host_index'] = kwargs.get('_host_index') return self.call_with_http_info(**kwargs) self.list = _Endpoint( settings={ 'response_type': (WebhookList,), 'auth': [ 'Token' ], 'endpoint_path': '/v1/webhooks', 'operation_id': 'list', 'http_method': 'GET', 'servers': None, }, params_map={ 'all': [ 'limit', 'offset', 'test_mode', ], 'required': [], 'nullable': [ 'test_mode', ], 'enum': [ ], 'validation': [ ] }, root_map={ 'validations': { }, 'allowed_values': { }, 'openapi_types': { 'limit': (int,), 'offset': (int,), 'test_mode': (bool, none_type,), }, 'attribute_map': { 'limit': 'limit', 'offset': 'offset', 'test_mode': 'test_mode', }, 'location_map': { 'limit': 'query', 'offset': 'query', 'test_mode': 'query', }, 'collection_format_map': { } }, headers_map={ 'accept': [ 'application/json' ], 'content_type': [], }, api_client=api_client, callable=__list ) def __remove( self, id, **kwargs ): """Remove a webhook # noqa: E501 Remove a webhook. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.remove(id, async_req=True) >>> result = thread.get() Args: id (str): Keyword Args: _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (int/float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: Operation If the method is called asynchronously, returns the request thread. """ kwargs['async_req'] = kwargs.get( 'async_req', False ) kwargs['_return_http_data_only'] = kwargs.get( '_return_http_data_only', True ) kwargs['_preload_content'] = kwargs.get( '_preload_content', True ) kwargs['_request_timeout'] = kwargs.get( '_request_timeout', None ) kwargs['_check_input_type'] = kwargs.get( '_check_input_type', True ) kwargs['_check_return_type'] = kwargs.get( '_check_return_type', True ) kwargs['_host_index'] = kwargs.get('_host_index') kwargs['id'] = \ id return self.call_with_http_info(**kwargs) self.remove = _Endpoint( settings={ 'response_type': (Operation,), 'auth': [ 'Token' ], 'endpoint_path': '/v1/webhooks/{id}', 'operation_id': 'remove', 'http_method': 'DELETE', 'servers': None, }, params_map={ 'all': [ 'id', ], 'required': [ 'id', ], 'nullable': [ ], 'enum': [ ], 'validation': [ ] }, root_map={ 'validations': { }, 'allowed_values': { }, 'openapi_types': { 'id': (str,), }, 'attribute_map': { 'id': 'id', }, 'location_map': { 'id': 'path', }, 'collection_format_map': { } }, headers_map={ 'accept': [ 'application/json' ], 'content_type': [], }, api_client=api_client, callable=__remove ) def __retrieve( self, id, **kwargs ): """Retrieve a webhook # noqa: E501 Retrieve a webhook. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.retrieve(id, async_req=True) >>> result = thread.get() Args: id (str): Keyword Args: _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (int/float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: Webhook If the method is called asynchronously, returns the request thread. """ kwargs['async_req'] = kwargs.get( 'async_req', False ) kwargs['_return_http_data_only'] = kwargs.get( '_return_http_data_only', True ) kwargs['_preload_content'] = kwargs.get( '_preload_content', True ) kwargs['_request_timeout'] = kwargs.get( '_request_timeout', None ) kwargs['_check_input_type'] = kwargs.get( '_check_input_type', True ) kwargs['_check_return_type'] = kwargs.get( '_check_return_type', True ) kwargs['_host_index'] = kwargs.get('_host_index') kwargs['id'] = \ id return self.call_with_http_info(**kwargs) self.retrieve = _Endpoint( settings={ 'response_type': (Webhook,), 'auth': [ 'Token' ], 'endpoint_path': '/v1/webhooks/{id}', 'operation_id': 'retrieve', 'http_method': 'GET', 'servers': None, }, params_map={ 'all': [ 'id', ], 'required': [ 'id', ], 'nullable': [ ], 'enum': [ ], 'validation': [ ] }, root_map={ 'validations': { }, 'allowed_values': { }, 'openapi_types': { 'id': (str,), }, 'attribute_map': { 'id': 'id', }, 'location_map': { 'id': 'path', }, 'collection_format_map': { } }, headers_map={ 'accept': [ 'application/json' ], 'content_type': [], }, api_client=api_client, callable=__retrieve ) def __test( self, id, data, **kwargs ): """Test a webhook # noqa: E501 test a webhook. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.test(id, data, async_req=True) >>> result = thread.get() Args: id (str): data (WebhookTestRequest): Keyword Args: _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (int/float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: Operation If the method is called asynchronously, returns the request thread. """ kwargs['async_req'] = kwargs.get( 'async_req', False ) kwargs['_return_http_data_only'] = kwargs.get( '_return_http_data_only', True ) kwargs['_preload_content'] = kwargs.get( '_preload_content', True ) kwargs['_request_timeout'] = kwargs.get( '_request_timeout', None ) kwargs['_check_input_type'] = kwargs.get( '_check_input_type', True ) kwargs['_check_return_type'] = kwargs.get( '_check_return_type', True ) kwargs['_host_index'] = kwargs.get('_host_index') kwargs['id'] = \ id kwargs['data'] = \ data return self.call_with_http_info(**kwargs) self.test = _Endpoint( settings={ 'response_type': (Operation,), 'auth': [ 'Token' ], 'endpoint_path': '/v1/webhooks/{id}/test', 'operation_id': 'test', 'http_method': 'POST', 'servers': None, }, params_map={ 'all': [ 'id', 'data', ], 'required': [ 'id', 'data', ], 'nullable': [ ], 'enum': [ ], 'validation': [ ] }, root_map={ 'validations': { }, 'allowed_values': { }, 'openapi_types': { 'id': (str,), 'data': (WebhookTestRequest,), }, 'attribute_map': { 'id': 'id', }, 'location_map': { 'id': 'path', 'data': 'body', }, 'collection_format_map': { } }, headers_map={ 'accept': [ 'application/json' ], 'content_type': [ 'application/json' ] }, api_client=api_client, callable=__test ) def __update( self, id, data, **kwargs ): """Update a webhook # noqa: E501 update a webhook. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.update(id, data, async_req=True) >>> result = thread.get() Args: id (str): data (WebhookData): Keyword Args: _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (int/float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: Webhook If the method is called asynchronously, returns the request thread. """ kwargs['async_req'] = kwargs.get( 'async_req', False ) kwargs['_return_http_data_only'] = kwargs.get( '_return_http_data_only', True ) kwargs['_preload_content'] = kwargs.get( '_preload_content', True ) kwargs['_request_timeout'] = kwargs.get( '_request_timeout', None ) kwargs['_check_input_type'] = kwargs.get( '_check_input_type', True ) kwargs['_check_return_type'] = kwargs.get( '_check_return_type', True ) kwargs['_host_index'] = kwargs.get('_host_index') kwargs['id'] = \ id kwargs['data'] = \ data return self.call_with_http_info(**kwargs) self.update = _Endpoint( settings={ 'response_type': (Webhook,), 'auth': [ 'Token' ], 'endpoint_path': '/v1/webhooks/{id}', 'operation_id': 'update', 'http_method': 'PATCH', 'servers': None, }, params_map={ 'all': [ 'id', 'data', ], 'required': [ 'id', 'data', ], 'nullable': [ ], 'enum': [ ], 'validation': [ ] }, root_map={ 'validations': { }, 'allowed_values': { }, 'openapi_types': { 'id': (str,), 'data': (WebhookData,), }, 'attribute_map': { 'id': 'id', }, 'location_map': { 'id': 'path', 'data': 'body', }, 'collection_format_map': { } }, headers_map={ 'accept': [ 'application/json' ], 'content_type': [ 'application/json' ] }, api_client=api_client, callable=__update )
PypiClean
/h2o_engine_manager-0.5.3-py3-none-any.whl/h2o_engine_manager/gen/dai_engine_constraint_set_service/api/dai_engine_constraint_set_service_api.py
import re # noqa: F401 import sys # noqa: F401 from h2o_engine_manager.gen.dai_engine_constraint_set_service.api_client import ApiClient, Endpoint as _Endpoint from h2o_engine_manager.gen.dai_engine_constraint_set_service.model_utils import ( # noqa: F401 check_allowed_values, check_validations, date, datetime, file_type, none_type, validate_and_convert_types ) from h2o_engine_manager.gen.dai_engine_constraint_set_service.model.rpc_status import RpcStatus from h2o_engine_manager.gen.dai_engine_constraint_set_service.model.v1_get_dai_engine_constraint_set_response import V1GetDAIEngineConstraintSetResponse class DAIEngineConstraintSetServiceApi(object): """NOTE: This class is auto generated by OpenAPI Generator Ref: https://openapi-generator.tech Do not edit the class manually. """ def __init__(self, api_client=None): if api_client is None: api_client = ApiClient() self.api_client = api_client self.d_ai_engine_constraint_set_service_get_dai_engine_constraint_set_endpoint = _Endpoint( settings={ 'response_type': (V1GetDAIEngineConstraintSetResponse,), 'auth': [], 'endpoint_path': '/v1/{name}', 'operation_id': 'd_ai_engine_constraint_set_service_get_dai_engine_constraint_set', 'http_method': 'GET', 'servers': None, }, params_map={ 'all': [ 'name', ], 'required': [ 'name', ], 'nullable': [ ], 'enum': [ ], 'validation': [ 'name', ] }, root_map={ 'validations': { ('name',): { 'regex': { 'pattern': r'workspaces\/[^\/]+\/daiEngineConstraintSet', # noqa: E501 }, }, }, 'allowed_values': { }, 'openapi_types': { 'name': (str,), }, 'attribute_map': { 'name': 'name', }, 'location_map': { 'name': 'path', }, 'collection_format_map': { } }, headers_map={ 'accept': [ 'application/json' ], 'content_type': [], }, api_client=api_client ) def d_ai_engine_constraint_set_service_get_dai_engine_constraint_set( self, name, **kwargs ): """Returns the current DAIEngineConstraintSet. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.d_ai_engine_constraint_set_service_get_dai_engine_constraint_set(name, async_req=True) >>> result = thread.get() Args: name (str): DAIEngineConstraintSet resource name. Format: workspaces/{workspace}/daiEngineConstraintSet Keyword Args: _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (int/float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _content_type (str/None): force body content-type. Default is None and content-type will be predicted by allowed content-types and body. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. _request_auths (list): set to override the auth_settings for an a single request; this effectively ignores the authentication in the spec for a single request. Default is None async_req (bool): execute request asynchronously Returns: V1GetDAIEngineConstraintSetResponse If the method is called asynchronously, returns the request thread. """ kwargs['async_req'] = kwargs.get( 'async_req', False ) kwargs['_return_http_data_only'] = kwargs.get( '_return_http_data_only', True ) kwargs['_preload_content'] = kwargs.get( '_preload_content', True ) kwargs['_request_timeout'] = kwargs.get( '_request_timeout', None ) kwargs['_check_input_type'] = kwargs.get( '_check_input_type', True ) kwargs['_check_return_type'] = kwargs.get( '_check_return_type', True ) kwargs['_spec_property_naming'] = kwargs.get( '_spec_property_naming', False ) kwargs['_content_type'] = kwargs.get( '_content_type') kwargs['_host_index'] = kwargs.get('_host_index') kwargs['_request_auths'] = kwargs.get('_request_auths', None) kwargs['name'] = \ name return self.d_ai_engine_constraint_set_service_get_dai_engine_constraint_set_endpoint.call_with_http_info(**kwargs)
PypiClean
/deepNets-0.1.8.tar.gz/deepNets-0.1.8/.eggs/pluggy-1.0.0.dev0-py3.8.egg/pluggy/hooks.py
import inspect import sys import warnings class HookspecMarker(object): """ Decorator helper class for marking functions as hook specifications. You can instantiate it with a project_name to get a decorator. Calling :py:meth:`.PluginManager.add_hookspecs` later will discover all marked functions if the :py:class:`.PluginManager` uses the same project_name. """ def __init__(self, project_name): self.project_name = project_name def __call__( self, function=None, firstresult=False, historic=False, warn_on_impl=None ): """ if passed a function, directly sets attributes on the function which will make it discoverable to :py:meth:`.PluginManager.add_hookspecs`. If passed no function, returns a decorator which can be applied to a function later using the attributes supplied. If ``firstresult`` is ``True`` the 1:N hook call (N being the number of registered hook implementation functions) will stop at I<=N when the I'th function returns a non-``None`` result. If ``historic`` is ``True`` calls to a hook will be memorized and replayed on later registered plugins. """ def setattr_hookspec_opts(func): if historic and firstresult: raise ValueError("cannot have a historic firstresult hook") setattr( func, self.project_name + "_spec", dict( firstresult=firstresult, historic=historic, warn_on_impl=warn_on_impl, ), ) return func if function is not None: return setattr_hookspec_opts(function) else: return setattr_hookspec_opts class HookimplMarker(object): """ Decorator helper class for marking functions as hook implementations. You can instantiate with a ``project_name`` to get a decorator. Calling :py:meth:`.PluginManager.register` later will discover all marked functions if the :py:class:`.PluginManager` uses the same project_name. """ def __init__(self, project_name): self.project_name = project_name def __call__( self, function=None, hookwrapper=False, optionalhook=False, tryfirst=False, trylast=False, specname=None, ): """ if passed a function, directly sets attributes on the function which will make it discoverable to :py:meth:`.PluginManager.register`. If passed no function, returns a decorator which can be applied to a function later using the attributes supplied. If ``optionalhook`` is ``True`` a missing matching hook specification will not result in an error (by default it is an error if no matching spec is found). If ``tryfirst`` is ``True`` this hook implementation will run as early as possible in the chain of N hook implementations for a specification. If ``trylast`` is ``True`` this hook implementation will run as late as possible in the chain of N hook implementations. If ``hookwrapper`` is ``True`` the hook implementations needs to execute exactly one ``yield``. The code before the ``yield`` is run early before any non-hookwrapper function is run. The code after the ``yield`` is run after all non-hookwrapper function have run. The ``yield`` receives a :py:class:`.callers._Result` object representing the exception or result outcome of the inner calls (including other hookwrapper calls). If ``specname`` is provided, it will be used instead of the function name when matching this hook implementation to a hook specification during registration. """ def setattr_hookimpl_opts(func): setattr( func, self.project_name + "_impl", dict( hookwrapper=hookwrapper, optionalhook=optionalhook, tryfirst=tryfirst, trylast=trylast, specname=specname, ), ) return func if function is None: return setattr_hookimpl_opts else: return setattr_hookimpl_opts(function) def normalize_hookimpl_opts(opts): opts.setdefault("tryfirst", False) opts.setdefault("trylast", False) opts.setdefault("hookwrapper", False) opts.setdefault("optionalhook", False) opts.setdefault("specname", None) if hasattr(inspect, "getfullargspec"): def _getargspec(func): return inspect.getfullargspec(func) else: def _getargspec(func): return inspect.getargspec(func) _PYPY3 = hasattr(sys, "pypy_version_info") and sys.version_info.major == 3 def varnames(func): """Return tuple of positional and keywrord argument names for a function, method, class or callable. In case of a class, its ``__init__`` method is considered. For methods the ``self`` parameter is not included. """ cache = getattr(func, "__dict__", {}) try: return cache["_varnames"] except KeyError: pass if inspect.isclass(func): try: func = func.__init__ except AttributeError: return (), () elif not inspect.isroutine(func): # callable object? try: func = getattr(func, "__call__", func) except Exception: return (), () try: # func MUST be a function or method here or we won't parse any args spec = _getargspec(func) except TypeError: return (), () args, defaults = tuple(spec.args), spec.defaults if defaults: index = -len(defaults) args, kwargs = args[:index], tuple(args[index:]) else: kwargs = () # strip any implicit instance arg # pypy3 uses "obj" instead of "self" for default dunder methods implicit_names = ("self",) if not _PYPY3 else ("self", "obj") if args: if inspect.ismethod(func) or ( "." in getattr(func, "__qualname__", ()) and args[0] in implicit_names ): args = args[1:] try: cache["_varnames"] = args, kwargs except TypeError: pass return args, kwargs class _HookRelay(object): """ hook holder object for performing 1:N hook calls where N is the number of registered plugins. """ class _HookCaller(object): def __init__(self, name, hook_execute, specmodule_or_class=None, spec_opts=None): self.name = name self._wrappers = [] self._nonwrappers = [] self._hookexec = hook_execute self.argnames = None self.kwargnames = None self.spec = None if specmodule_or_class is not None: assert spec_opts is not None self.set_specification(specmodule_or_class, spec_opts) def has_spec(self): return self.spec is not None def set_specification(self, specmodule_or_class, spec_opts): assert not self.has_spec() self.spec = HookSpec(specmodule_or_class, self.name, spec_opts) if spec_opts.get("historic"): self._call_history = [] def is_historic(self): return hasattr(self, "_call_history") def _remove_plugin(self, plugin): def remove(wrappers): for i, method in enumerate(wrappers): if method.plugin == plugin: del wrappers[i] return True if remove(self._wrappers) is None: if remove(self._nonwrappers) is None: raise ValueError("plugin %r not found" % (plugin,)) def get_hookimpls(self): # Order is important for _hookexec return self._nonwrappers + self._wrappers def _add_hookimpl(self, hookimpl): """Add an implementation to the callback chain. """ if hookimpl.hookwrapper: methods = self._wrappers else: methods = self._nonwrappers if hookimpl.trylast: methods.insert(0, hookimpl) elif hookimpl.tryfirst: methods.append(hookimpl) else: # find last non-tryfirst method i = len(methods) - 1 while i >= 0 and methods[i].tryfirst: i -= 1 methods.insert(i + 1, hookimpl) def __repr__(self): return "<_HookCaller %r>" % (self.name,) def __call__(self, *args, **kwargs): if args: raise TypeError("hook calling supports only keyword arguments") assert not self.is_historic() if self.spec and self.spec.argnames: notincall = set(self.spec.argnames) - set(kwargs.keys()) if notincall: warnings.warn( "Argument(s) {} which are declared in the hookspec " "can not be found in this hook call".format(tuple(notincall)), stacklevel=2, ) return self._hookexec(self, self.get_hookimpls(), kwargs) def call_historic(self, result_callback=None, kwargs=None, proc=None): """Call the hook with given ``kwargs`` for all registered plugins and for all plugins which will be registered afterwards. If ``result_callback`` is not ``None`` it will be called for for each non-``None`` result obtained from a hook implementation. .. note:: The ``proc`` argument is now deprecated. """ if proc is not None: warnings.warn( "Support for `proc` argument is now deprecated and will be" "removed in an upcoming release.", DeprecationWarning, ) result_callback = proc self._call_history.append((kwargs or {}, result_callback)) # historizing hooks don't return results res = self._hookexec(self, self.get_hookimpls(), kwargs) if result_callback is None: return # XXX: remember firstresult isn't compat with historic for x in res or []: result_callback(x) def call_extra(self, methods, kwargs): """ Call the hook with some additional temporarily participating methods using the specified ``kwargs`` as call parameters. """ old = list(self._nonwrappers), list(self._wrappers) for method in methods: opts = dict(hookwrapper=False, trylast=False, tryfirst=False) hookimpl = HookImpl(None, "<temp>", method, opts) self._add_hookimpl(hookimpl) try: return self(**kwargs) finally: self._nonwrappers, self._wrappers = old def _maybe_apply_history(self, method): """Apply call history to a new hookimpl if it is marked as historic. """ if self.is_historic(): for kwargs, result_callback in self._call_history: res = self._hookexec(self, [method], kwargs) if res and result_callback is not None: result_callback(res[0]) class HookImpl(object): def __init__(self, plugin, plugin_name, function, hook_impl_opts): self.function = function self.argnames, self.kwargnames = varnames(self.function) self.plugin = plugin self.opts = hook_impl_opts self.plugin_name = plugin_name self.__dict__.update(hook_impl_opts) def __repr__(self): return "<HookImpl plugin_name=%r, plugin=%r>" % (self.plugin_name, self.plugin) class HookSpec(object): def __init__(self, namespace, name, opts): self.namespace = namespace self.function = function = getattr(namespace, name) self.name = name self.argnames, self.kwargnames = varnames(function) self.opts = opts self.warn_on_impl = opts.get("warn_on_impl")
PypiClean
/pyfloat-0.0.3.tar.gz/pyfloat-0.0.3/README.md
## PyFloat Lib Python Library for doing math operations with floats without lossing precission ## How to use ``` >> from pyfloat import PyFloat >> PyFloat(2.1234e-12) '0.0000000000021234' >> PyFloat(-2.1234e+12) '-2123400000000' >> PyFloat(0.1452) '0.1452' >> PyFloat(PyFloat(152.455)) '152.455' >> a = PyFloat("123451.1234551230000000004444445551122000000011") >> b = PyFloat(-8123994.000002234100000001323400000001232112221) >> a + b '-8000542.8765471111000000008789554448890321122199' >> a - b '8247445.1234573571000000017678445551134321122221' >> a * b '-1002916186242.9543234169148643344158458369442274183843912848765430855693663896461975553234431' >> a == b 'False' >> a != b 'True' >> a > b 'True' >>> PyFloat(0.00239419391).round(10) '0.0023941939' >>> PyFloat(0.00239419391).round(7) '0.0023942' >>> PyFloat(-0.00239419391).round(7) '-0.0023942' >>> PyFloat(0.00239419391).round(4) '0.0024' >>> PyFloat(-1234.5678).abs() '1234.5678' >>> PyFloat(0.0000089778).truncate(8) '0.00000897' >>> PyFloat(-0.0000089778).truncate(8) -0.00000897 ``` ## Test ``` python pytfloat_test.py ```
PypiClean
/intel_tensorflow-2.13.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/tensorflow/python/keras/layers/convolutional_recurrent.py
"""Convolutional-recurrent layers.""" import numpy as np from tensorflow.python.keras import activations from tensorflow.python.keras import backend from tensorflow.python.keras import constraints from tensorflow.python.keras import initializers from tensorflow.python.keras import regularizers from tensorflow.python.keras.engine.base_layer import Layer from tensorflow.python.keras.engine.input_spec import InputSpec from tensorflow.python.keras.layers.recurrent import DropoutRNNCellMixin from tensorflow.python.keras.layers.recurrent import RNN from tensorflow.python.keras.utils import conv_utils from tensorflow.python.keras.utils import generic_utils from tensorflow.python.keras.utils import tf_utils from tensorflow.python.ops import array_ops from tensorflow.python.util.tf_export import keras_export class ConvRNN2D(RNN): """Base class for convolutional-recurrent layers. Args: cell: A RNN cell instance. A RNN cell is a class that has: - a `call(input_at_t, states_at_t)` method, returning `(output_at_t, states_at_t_plus_1)`. The call method of the cell can also take the optional argument `constants`, see section "Note on passing external constants" below. - a `state_size` attribute. This can be a single integer (single state) in which case it is the number of channels of the recurrent state (which should be the same as the number of channels of the cell output). This can also be a list/tuple of integers (one size per state). In this case, the first entry (`state_size[0]`) should be the same as the size of the cell output. return_sequences: Boolean. Whether to return the last output. in the output sequence, or the full sequence. return_state: Boolean. Whether to return the last state in addition to the output. go_backwards: Boolean (default False). If True, process the input sequence backwards and return the reversed sequence. stateful: Boolean (default False). If True, the last state for each sample at index i in a batch will be used as initial state for the sample of index i in the following batch. input_shape: Use this argument to specify the shape of the input when this layer is the first one in a model. Call arguments: inputs: A 5D tensor. mask: Binary tensor of shape `(samples, timesteps)` indicating whether a given timestep should be masked. training: Python boolean indicating whether the layer should behave in training mode or in inference mode. This argument is passed to the cell when calling it. This is for use with cells that use dropout. initial_state: List of initial state tensors to be passed to the first call of the cell. constants: List of constant tensors to be passed to the cell at each timestep. Input shape: 5D tensor with shape: `(samples, timesteps, channels, rows, cols)` if data_format='channels_first' or 5D tensor with shape: `(samples, timesteps, rows, cols, channels)` if data_format='channels_last'. Output shape: - If `return_state`: a list of tensors. The first tensor is the output. The remaining tensors are the last states, each 4D tensor with shape: `(samples, filters, new_rows, new_cols)` if data_format='channels_first' or 4D tensor with shape: `(samples, new_rows, new_cols, filters)` if data_format='channels_last'. `rows` and `cols` values might have changed due to padding. - If `return_sequences`: 5D tensor with shape: `(samples, timesteps, filters, new_rows, new_cols)` if data_format='channels_first' or 5D tensor with shape: `(samples, timesteps, new_rows, new_cols, filters)` if data_format='channels_last'. - Else, 4D tensor with shape: `(samples, filters, new_rows, new_cols)` if data_format='channels_first' or 4D tensor with shape: `(samples, new_rows, new_cols, filters)` if data_format='channels_last'. Masking: This layer supports masking for input data with a variable number of timesteps. Note on using statefulness in RNNs: You can set RNN layers to be 'stateful', which means that the states computed for the samples in one batch will be reused as initial states for the samples in the next batch. This assumes a one-to-one mapping between samples in different successive batches. To enable statefulness: - Specify `stateful=True` in the layer constructor. - Specify a fixed batch size for your model, by passing - If sequential model: `batch_input_shape=(...)` to the first layer in your model. - If functional model with 1 or more Input layers: `batch_shape=(...)` to all the first layers in your model. This is the expected shape of your inputs *including the batch size*. It should be a tuple of integers, e.g. `(32, 10, 100, 100, 32)`. Note that the number of rows and columns should be specified too. - Specify `shuffle=False` when calling fit(). To reset the states of your model, call `.reset_states()` on either a specific layer, or on your entire model. Note on specifying the initial state of RNNs: You can specify the initial state of RNN layers symbolically by calling them with the keyword argument `initial_state`. The value of `initial_state` should be a tensor or list of tensors representing the initial state of the RNN layer. You can specify the initial state of RNN layers numerically by calling `reset_states` with the keyword argument `states`. The value of `states` should be a numpy array or list of numpy arrays representing the initial state of the RNN layer. Note on passing external constants to RNNs: You can pass "external" constants to the cell using the `constants` keyword argument of `RNN.__call__` (as well as `RNN.call`) method. This requires that the `cell.call` method accepts the same keyword argument `constants`. Such constants can be used to condition the cell transformation on additional static inputs (not changing over time), a.k.a. an attention mechanism. """ def __init__(self, cell, return_sequences=False, return_state=False, go_backwards=False, stateful=False, unroll=False, **kwargs): if unroll: raise TypeError('Unrolling isn\'t possible with ' 'convolutional RNNs.') if isinstance(cell, (list, tuple)): # The StackedConvRNN2DCells isn't implemented yet. raise TypeError('It is not possible at the moment to' 'stack convolutional cells.') super(ConvRNN2D, self).__init__(cell, return_sequences, return_state, go_backwards, stateful, unroll, **kwargs) self.input_spec = [InputSpec(ndim=5)] self.states = None self._num_constants = None @tf_utils.shape_type_conversion def compute_output_shape(self, input_shape): if isinstance(input_shape, list): input_shape = input_shape[0] cell = self.cell if cell.data_format == 'channels_first': rows = input_shape[3] cols = input_shape[4] elif cell.data_format == 'channels_last': rows = input_shape[2] cols = input_shape[3] rows = conv_utils.conv_output_length(rows, cell.kernel_size[0], padding=cell.padding, stride=cell.strides[0], dilation=cell.dilation_rate[0]) cols = conv_utils.conv_output_length(cols, cell.kernel_size[1], padding=cell.padding, stride=cell.strides[1], dilation=cell.dilation_rate[1]) if cell.data_format == 'channels_first': output_shape = input_shape[:2] + (cell.filters, rows, cols) elif cell.data_format == 'channels_last': output_shape = input_shape[:2] + (rows, cols, cell.filters) if not self.return_sequences: output_shape = output_shape[:1] + output_shape[2:] if self.return_state: output_shape = [output_shape] if cell.data_format == 'channels_first': output_shape += [(input_shape[0], cell.filters, rows, cols) for _ in range(2)] elif cell.data_format == 'channels_last': output_shape += [(input_shape[0], rows, cols, cell.filters) for _ in range(2)] return output_shape @tf_utils.shape_type_conversion def build(self, input_shape): # Note input_shape will be list of shapes of initial states and # constants if these are passed in __call__. if self._num_constants is not None: constants_shape = input_shape[-self._num_constants:] # pylint: disable=E1130 else: constants_shape = None if isinstance(input_shape, list): input_shape = input_shape[0] batch_size = input_shape[0] if self.stateful else None self.input_spec[0] = InputSpec(shape=(batch_size, None) + input_shape[2:5]) # allow cell (if layer) to build before we set or validate state_spec if isinstance(self.cell, Layer): step_input_shape = (input_shape[0],) + input_shape[2:] if constants_shape is not None: self.cell.build([step_input_shape] + constants_shape) else: self.cell.build(step_input_shape) # set or validate state_spec if hasattr(self.cell.state_size, '__len__'): state_size = list(self.cell.state_size) else: state_size = [self.cell.state_size] if self.state_spec is not None: # initial_state was passed in call, check compatibility if self.cell.data_format == 'channels_first': ch_dim = 1 elif self.cell.data_format == 'channels_last': ch_dim = 3 if [spec.shape[ch_dim] for spec in self.state_spec] != state_size: raise ValueError( 'An initial_state was passed that is not compatible with ' '`cell.state_size`. Received `state_spec`={}; ' 'However `cell.state_size` is ' '{}'.format([spec.shape for spec in self.state_spec], self.cell.state_size)) else: if self.cell.data_format == 'channels_first': self.state_spec = [InputSpec(shape=(None, dim, None, None)) for dim in state_size] elif self.cell.data_format == 'channels_last': self.state_spec = [InputSpec(shape=(None, None, None, dim)) for dim in state_size] if self.stateful: self.reset_states() self.built = True def get_initial_state(self, inputs): # (samples, timesteps, rows, cols, filters) initial_state = backend.zeros_like(inputs) # (samples, rows, cols, filters) initial_state = backend.sum(initial_state, axis=1) shape = list(self.cell.kernel_shape) shape[-1] = self.cell.filters initial_state = self.cell.input_conv(initial_state, array_ops.zeros(tuple(shape), initial_state.dtype), padding=self.cell.padding) if hasattr(self.cell.state_size, '__len__'): return [initial_state for _ in self.cell.state_size] else: return [initial_state] def call(self, inputs, mask=None, training=None, initial_state=None, constants=None): # note that the .build() method of subclasses MUST define # self.input_spec and self.state_spec with complete input shapes. inputs, initial_state, constants = self._process_inputs( inputs, initial_state, constants) if isinstance(mask, list): mask = mask[0] timesteps = backend.int_shape(inputs)[1] kwargs = {} if generic_utils.has_arg(self.cell.call, 'training'): kwargs['training'] = training if constants: if not generic_utils.has_arg(self.cell.call, 'constants'): raise ValueError('RNN cell does not support constants') def step(inputs, states): constants = states[-self._num_constants:] # pylint: disable=invalid-unary-operand-type states = states[:-self._num_constants] # pylint: disable=invalid-unary-operand-type return self.cell.call(inputs, states, constants=constants, **kwargs) else: def step(inputs, states): return self.cell.call(inputs, states, **kwargs) last_output, outputs, states = backend.rnn(step, inputs, initial_state, constants=constants, go_backwards=self.go_backwards, mask=mask, input_length=timesteps) if self.stateful: updates = [ backend.update(self_state, state) for self_state, state in zip(self.states, states) ] self.add_update(updates) if self.return_sequences: output = outputs else: output = last_output if self.return_state: if not isinstance(states, (list, tuple)): states = [states] else: states = list(states) return [output] + states else: return output def reset_states(self, states=None): if not self.stateful: raise AttributeError('Layer must be stateful.') input_shape = self.input_spec[0].shape state_shape = self.compute_output_shape(input_shape) if self.return_state: state_shape = state_shape[0] if self.return_sequences: state_shape = state_shape[:1].concatenate(state_shape[2:]) if None in state_shape: raise ValueError('If a RNN is stateful, it needs to know ' 'its batch size. Specify the batch size ' 'of your input tensors: \n' '- If using a Sequential model, ' 'specify the batch size by passing ' 'a `batch_input_shape` ' 'argument to your first layer.\n' '- If using the functional API, specify ' 'the time dimension by passing a ' '`batch_shape` argument to your Input layer.\n' 'The same thing goes for the number of rows and ' 'columns.') # helper function def get_tuple_shape(nb_channels): result = list(state_shape) if self.cell.data_format == 'channels_first': result[1] = nb_channels elif self.cell.data_format == 'channels_last': result[3] = nb_channels else: raise KeyError return tuple(result) # initialize state if None if self.states[0] is None: if hasattr(self.cell.state_size, '__len__'): self.states = [backend.zeros(get_tuple_shape(dim)) for dim in self.cell.state_size] else: self.states = [backend.zeros(get_tuple_shape(self.cell.state_size))] elif states is None: if hasattr(self.cell.state_size, '__len__'): for state, dim in zip(self.states, self.cell.state_size): backend.set_value(state, np.zeros(get_tuple_shape(dim))) else: backend.set_value(self.states[0], np.zeros(get_tuple_shape(self.cell.state_size))) else: if not isinstance(states, (list, tuple)): states = [states] if len(states) != len(self.states): raise ValueError('Layer ' + self.name + ' expects ' + str(len(self.states)) + ' states, ' + 'but it received ' + str(len(states)) + ' state values. Input received: ' + str(states)) for index, (value, state) in enumerate(zip(states, self.states)): if hasattr(self.cell.state_size, '__len__'): dim = self.cell.state_size[index] else: dim = self.cell.state_size if value.shape != get_tuple_shape(dim): raise ValueError('State ' + str(index) + ' is incompatible with layer ' + self.name + ': expected shape=' + str(get_tuple_shape(dim)) + ', found shape=' + str(value.shape)) # TODO(anjalisridhar): consider batch calls to `set_value`. backend.set_value(state, value) class ConvLSTM2DCell(DropoutRNNCellMixin, Layer): """Cell class for the ConvLSTM2D layer. Args: filters: Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution). kernel_size: An integer or tuple/list of n integers, specifying the dimensions of the convolution window. strides: An integer or tuple/list of n integers, specifying the strides of the convolution. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: One of `"valid"` or `"same"` (case-insensitive). `"valid"` means no padding. `"same"` results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. data_format: A string, one of `channels_last` (default) or `channels_first`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be "channels_last". dilation_rate: An integer or tuple/list of n integers, specifying the dilation rate to use for dilated convolution. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any `strides` value != 1. activation: Activation function to use. If you don't specify anything, no activation is applied (ie. "linear" activation: `a(x) = x`). recurrent_activation: Activation function to use for the recurrent step. use_bias: Boolean, whether the layer uses a bias vector. kernel_initializer: Initializer for the `kernel` weights matrix, used for the linear transformation of the inputs. recurrent_initializer: Initializer for the `recurrent_kernel` weights matrix, used for the linear transformation of the recurrent state. bias_initializer: Initializer for the bias vector. unit_forget_bias: Boolean. If True, add 1 to the bias of the forget gate at initialization. Use in combination with `bias_initializer="zeros"`. This is recommended in [Jozefowicz et al., 2015]( http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf) kernel_regularizer: Regularizer function applied to the `kernel` weights matrix. recurrent_regularizer: Regularizer function applied to the `recurrent_kernel` weights matrix. bias_regularizer: Regularizer function applied to the bias vector. kernel_constraint: Constraint function applied to the `kernel` weights matrix. recurrent_constraint: Constraint function applied to the `recurrent_kernel` weights matrix. bias_constraint: Constraint function applied to the bias vector. dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the inputs. recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the recurrent state. Call arguments: inputs: A 4D tensor. states: List of state tensors corresponding to the previous timestep. training: Python boolean indicating whether the layer should behave in training mode or in inference mode. Only relevant when `dropout` or `recurrent_dropout` is used. """ def __init__(self, filters, kernel_size, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1), activation='tanh', recurrent_activation='hard_sigmoid', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', unit_forget_bias=True, kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, dropout=0., recurrent_dropout=0., **kwargs): super(ConvLSTM2DCell, self).__init__(**kwargs) self.filters = filters self.kernel_size = conv_utils.normalize_tuple(kernel_size, 2, 'kernel_size') self.strides = conv_utils.normalize_tuple(strides, 2, 'strides') self.padding = conv_utils.normalize_padding(padding) self.data_format = conv_utils.normalize_data_format(data_format) self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, 2, 'dilation_rate') self.activation = activations.get(activation) self.recurrent_activation = activations.get(recurrent_activation) self.use_bias = use_bias self.kernel_initializer = initializers.get(kernel_initializer) self.recurrent_initializer = initializers.get(recurrent_initializer) self.bias_initializer = initializers.get(bias_initializer) self.unit_forget_bias = unit_forget_bias self.kernel_regularizer = regularizers.get(kernel_regularizer) self.recurrent_regularizer = regularizers.get(recurrent_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.recurrent_constraint = constraints.get(recurrent_constraint) self.bias_constraint = constraints.get(bias_constraint) self.dropout = min(1., max(0., dropout)) self.recurrent_dropout = min(1., max(0., recurrent_dropout)) self.state_size = (self.filters, self.filters) def build(self, input_shape): if self.data_format == 'channels_first': channel_axis = 1 else: channel_axis = -1 if input_shape[channel_axis] is None: raise ValueError('The channel dimension of the inputs ' 'should be defined. Found `None`.') input_dim = input_shape[channel_axis] kernel_shape = self.kernel_size + (input_dim, self.filters * 4) self.kernel_shape = kernel_shape recurrent_kernel_shape = self.kernel_size + (self.filters, self.filters * 4) self.kernel = self.add_weight(shape=kernel_shape, initializer=self.kernel_initializer, name='kernel', regularizer=self.kernel_regularizer, constraint=self.kernel_constraint) self.recurrent_kernel = self.add_weight( shape=recurrent_kernel_shape, initializer=self.recurrent_initializer, name='recurrent_kernel', regularizer=self.recurrent_regularizer, constraint=self.recurrent_constraint) if self.use_bias: if self.unit_forget_bias: def bias_initializer(_, *args, **kwargs): return backend.concatenate([ self.bias_initializer((self.filters,), *args, **kwargs), initializers.get('ones')((self.filters,), *args, **kwargs), self.bias_initializer((self.filters * 2,), *args, **kwargs), ]) else: bias_initializer = self.bias_initializer self.bias = self.add_weight( shape=(self.filters * 4,), name='bias', initializer=bias_initializer, regularizer=self.bias_regularizer, constraint=self.bias_constraint) else: self.bias = None self.built = True def call(self, inputs, states, training=None): h_tm1 = states[0] # previous memory state c_tm1 = states[1] # previous carry state # dropout matrices for input units dp_mask = self.get_dropout_mask_for_cell(inputs, training, count=4) # dropout matrices for recurrent units rec_dp_mask = self.get_recurrent_dropout_mask_for_cell( h_tm1, training, count=4) if 0 < self.dropout < 1.: inputs_i = inputs * dp_mask[0] inputs_f = inputs * dp_mask[1] inputs_c = inputs * dp_mask[2] inputs_o = inputs * dp_mask[3] else: inputs_i = inputs inputs_f = inputs inputs_c = inputs inputs_o = inputs if 0 < self.recurrent_dropout < 1.: h_tm1_i = h_tm1 * rec_dp_mask[0] h_tm1_f = h_tm1 * rec_dp_mask[1] h_tm1_c = h_tm1 * rec_dp_mask[2] h_tm1_o = h_tm1 * rec_dp_mask[3] else: h_tm1_i = h_tm1 h_tm1_f = h_tm1 h_tm1_c = h_tm1 h_tm1_o = h_tm1 (kernel_i, kernel_f, kernel_c, kernel_o) = array_ops.split(self.kernel, 4, axis=3) (recurrent_kernel_i, recurrent_kernel_f, recurrent_kernel_c, recurrent_kernel_o) = array_ops.split(self.recurrent_kernel, 4, axis=3) if self.use_bias: bias_i, bias_f, bias_c, bias_o = array_ops.split(self.bias, 4) else: bias_i, bias_f, bias_c, bias_o = None, None, None, None x_i = self.input_conv(inputs_i, kernel_i, bias_i, padding=self.padding) x_f = self.input_conv(inputs_f, kernel_f, bias_f, padding=self.padding) x_c = self.input_conv(inputs_c, kernel_c, bias_c, padding=self.padding) x_o = self.input_conv(inputs_o, kernel_o, bias_o, padding=self.padding) h_i = self.recurrent_conv(h_tm1_i, recurrent_kernel_i) h_f = self.recurrent_conv(h_tm1_f, recurrent_kernel_f) h_c = self.recurrent_conv(h_tm1_c, recurrent_kernel_c) h_o = self.recurrent_conv(h_tm1_o, recurrent_kernel_o) i = self.recurrent_activation(x_i + h_i) f = self.recurrent_activation(x_f + h_f) c = f * c_tm1 + i * self.activation(x_c + h_c) o = self.recurrent_activation(x_o + h_o) h = o * self.activation(c) return h, [h, c] def input_conv(self, x, w, b=None, padding='valid'): conv_out = backend.conv2d(x, w, strides=self.strides, padding=padding, data_format=self.data_format, dilation_rate=self.dilation_rate) if b is not None: conv_out = backend.bias_add(conv_out, b, data_format=self.data_format) return conv_out def recurrent_conv(self, x, w): conv_out = backend.conv2d(x, w, strides=(1, 1), padding='same', data_format=self.data_format) return conv_out def get_config(self): config = {'filters': self.filters, 'kernel_size': self.kernel_size, 'strides': self.strides, 'padding': self.padding, 'data_format': self.data_format, 'dilation_rate': self.dilation_rate, 'activation': activations.serialize(self.activation), 'recurrent_activation': activations.serialize( self.recurrent_activation), 'use_bias': self.use_bias, 'kernel_initializer': initializers.serialize( self.kernel_initializer), 'recurrent_initializer': initializers.serialize( self.recurrent_initializer), 'bias_initializer': initializers.serialize(self.bias_initializer), 'unit_forget_bias': self.unit_forget_bias, 'kernel_regularizer': regularizers.serialize( self.kernel_regularizer), 'recurrent_regularizer': regularizers.serialize( self.recurrent_regularizer), 'bias_regularizer': regularizers.serialize(self.bias_regularizer), 'kernel_constraint': constraints.serialize( self.kernel_constraint), 'recurrent_constraint': constraints.serialize( self.recurrent_constraint), 'bias_constraint': constraints.serialize(self.bias_constraint), 'dropout': self.dropout, 'recurrent_dropout': self.recurrent_dropout} base_config = super(ConvLSTM2DCell, self).get_config() return dict(list(base_config.items()) + list(config.items())) @keras_export('keras.layers.ConvLSTM2D') class ConvLSTM2D(ConvRNN2D): """2D Convolutional LSTM layer. A convolutional LSTM is similar to an LSTM, but the input transformations and recurrent transformations are both convolutional. This layer is typically used to process timeseries of images (i.e. video-like data). It is known to perform well for weather data forecasting, using inputs that are timeseries of 2D grids of sensor values. It isn't usually applied to regular video data, due to its high computational cost. Args: filters: Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution). kernel_size: An integer or tuple/list of n integers, specifying the dimensions of the convolution window. strides: An integer or tuple/list of n integers, specifying the strides of the convolution. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. padding: One of `"valid"` or `"same"` (case-insensitive). `"valid"` means no padding. `"same"` results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, time, ..., channels)` while `channels_first` corresponds to inputs with shape `(batch, time, channels, ...)`. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be "channels_last". dilation_rate: An integer or tuple/list of n integers, specifying the dilation rate to use for dilated convolution. Currently, specifying any `dilation_rate` value != 1 is incompatible with specifying any `strides` value != 1. activation: Activation function to use. By default hyperbolic tangent activation function is applied (`tanh(x)`). recurrent_activation: Activation function to use for the recurrent step. use_bias: Boolean, whether the layer uses a bias vector. kernel_initializer: Initializer for the `kernel` weights matrix, used for the linear transformation of the inputs. recurrent_initializer: Initializer for the `recurrent_kernel` weights matrix, used for the linear transformation of the recurrent state. bias_initializer: Initializer for the bias vector. unit_forget_bias: Boolean. If True, add 1 to the bias of the forget gate at initialization. Use in combination with `bias_initializer="zeros"`. This is recommended in [Jozefowicz et al., 2015]( http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf) kernel_regularizer: Regularizer function applied to the `kernel` weights matrix. recurrent_regularizer: Regularizer function applied to the `recurrent_kernel` weights matrix. bias_regularizer: Regularizer function applied to the bias vector. activity_regularizer: Regularizer function applied to. kernel_constraint: Constraint function applied to the `kernel` weights matrix. recurrent_constraint: Constraint function applied to the `recurrent_kernel` weights matrix. bias_constraint: Constraint function applied to the bias vector. return_sequences: Boolean. Whether to return the last output in the output sequence, or the full sequence. (default False) return_state: Boolean Whether to return the last state in addition to the output. (default False) go_backwards: Boolean (default False). If True, process the input sequence backwards. stateful: Boolean (default False). If True, the last state for each sample at index i in a batch will be used as initial state for the sample of index i in the following batch. dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the inputs. recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for the linear transformation of the recurrent state. Call arguments: inputs: A 5D float tensor (see input shape description below). mask: Binary tensor of shape `(samples, timesteps)` indicating whether a given timestep should be masked. training: Python boolean indicating whether the layer should behave in training mode or in inference mode. This argument is passed to the cell when calling it. This is only relevant if `dropout` or `recurrent_dropout` are set. initial_state: List of initial state tensors to be passed to the first call of the cell. Input shape: - If data_format='channels_first' 5D tensor with shape: `(samples, time, channels, rows, cols)` - If data_format='channels_last' 5D tensor with shape: `(samples, time, rows, cols, channels)` Output shape: - If `return_state`: a list of tensors. The first tensor is the output. The remaining tensors are the last states, each 4D tensor with shape: `(samples, filters, new_rows, new_cols)` if data_format='channels_first' or 4D tensor with shape: `(samples, new_rows, new_cols, filters)` if data_format='channels_last'. `rows` and `cols` values might have changed due to padding. - If `return_sequences`: 5D tensor with shape: `(samples, timesteps, filters, new_rows, new_cols)` if data_format='channels_first' or 5D tensor with shape: `(samples, timesteps, new_rows, new_cols, filters)` if data_format='channels_last'. - Else, 4D tensor with shape: `(samples, filters, new_rows, new_cols)` if data_format='channels_first' or 4D tensor with shape: `(samples, new_rows, new_cols, filters)` if data_format='channels_last'. Raises: ValueError: in case of invalid constructor arguments. References: - [Shi et al., 2015](http://arxiv.org/abs/1506.04214v1) (the current implementation does not include the feedback loop on the cells output). Example: ```python steps = 10 height = 32 width = 32 input_channels = 3 output_channels = 6 inputs = tf.keras.Input(shape=(steps, height, width, input_channels)) layer = tf.keras.layers.ConvLSTM2D(filters=output_channels, kernel_size=3) outputs = layer(inputs) ``` """ def __init__(self, filters, kernel_size, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1), activation='tanh', recurrent_activation='hard_sigmoid', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', unit_forget_bias=True, kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, return_sequences=False, return_state=False, go_backwards=False, stateful=False, dropout=0., recurrent_dropout=0., **kwargs): cell = ConvLSTM2DCell(filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, activation=activation, recurrent_activation=recurrent_activation, use_bias=use_bias, kernel_initializer=kernel_initializer, recurrent_initializer=recurrent_initializer, bias_initializer=bias_initializer, unit_forget_bias=unit_forget_bias, kernel_regularizer=kernel_regularizer, recurrent_regularizer=recurrent_regularizer, bias_regularizer=bias_regularizer, kernel_constraint=kernel_constraint, recurrent_constraint=recurrent_constraint, bias_constraint=bias_constraint, dropout=dropout, recurrent_dropout=recurrent_dropout, dtype=kwargs.get('dtype')) super(ConvLSTM2D, self).__init__(cell, return_sequences=return_sequences, return_state=return_state, go_backwards=go_backwards, stateful=stateful, **kwargs) self.activity_regularizer = regularizers.get(activity_regularizer) def call(self, inputs, mask=None, training=None, initial_state=None): return super(ConvLSTM2D, self).call(inputs, mask=mask, training=training, initial_state=initial_state) @property def filters(self): return self.cell.filters @property def kernel_size(self): return self.cell.kernel_size @property def strides(self): return self.cell.strides @property def padding(self): return self.cell.padding @property def data_format(self): return self.cell.data_format @property def dilation_rate(self): return self.cell.dilation_rate @property def activation(self): return self.cell.activation @property def recurrent_activation(self): return self.cell.recurrent_activation @property def use_bias(self): return self.cell.use_bias @property def kernel_initializer(self): return self.cell.kernel_initializer @property def recurrent_initializer(self): return self.cell.recurrent_initializer @property def bias_initializer(self): return self.cell.bias_initializer @property def unit_forget_bias(self): return self.cell.unit_forget_bias @property def kernel_regularizer(self): return self.cell.kernel_regularizer @property def recurrent_regularizer(self): return self.cell.recurrent_regularizer @property def bias_regularizer(self): return self.cell.bias_regularizer @property def kernel_constraint(self): return self.cell.kernel_constraint @property def recurrent_constraint(self): return self.cell.recurrent_constraint @property def bias_constraint(self): return self.cell.bias_constraint @property def dropout(self): return self.cell.dropout @property def recurrent_dropout(self): return self.cell.recurrent_dropout def get_config(self): config = {'filters': self.filters, 'kernel_size': self.kernel_size, 'strides': self.strides, 'padding': self.padding, 'data_format': self.data_format, 'dilation_rate': self.dilation_rate, 'activation': activations.serialize(self.activation), 'recurrent_activation': activations.serialize( self.recurrent_activation), 'use_bias': self.use_bias, 'kernel_initializer': initializers.serialize( self.kernel_initializer), 'recurrent_initializer': initializers.serialize( self.recurrent_initializer), 'bias_initializer': initializers.serialize(self.bias_initializer), 'unit_forget_bias': self.unit_forget_bias, 'kernel_regularizer': regularizers.serialize( self.kernel_regularizer), 'recurrent_regularizer': regularizers.serialize( self.recurrent_regularizer), 'bias_regularizer': regularizers.serialize(self.bias_regularizer), 'activity_regularizer': regularizers.serialize( self.activity_regularizer), 'kernel_constraint': constraints.serialize( self.kernel_constraint), 'recurrent_constraint': constraints.serialize( self.recurrent_constraint), 'bias_constraint': constraints.serialize(self.bias_constraint), 'dropout': self.dropout, 'recurrent_dropout': self.recurrent_dropout} base_config = super(ConvLSTM2D, self).get_config() del base_config['cell'] return dict(list(base_config.items()) + list(config.items())) @classmethod def from_config(cls, config): return cls(**config)
PypiClean
/google_ads_megalista-18.0.2-py3-none-any.whl/google/ads/googleads/v13/services/services/keyword_plan_campaign_service/transports/grpc.py
import warnings from typing import Callable, Dict, Optional, Sequence, Tuple from google.api_core import grpc_helpers from google.api_core import gapic_v1 import google.auth # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore import grpc # type: ignore from google.ads.googleads.v13.services.types import ( keyword_plan_campaign_service, ) from .base import KeywordPlanCampaignServiceTransport, DEFAULT_CLIENT_INFO class KeywordPlanCampaignServiceGrpcTransport( KeywordPlanCampaignServiceTransport ): """gRPC backend transport for KeywordPlanCampaignService. Service to manage Keyword Plan campaigns. This class defines the same methods as the primary client, so the primary client can load the underlying transport implementation and call it. It sends protocol buffers over the wire using gRPC (which is built on top of HTTP/2); the ``grpcio`` package must be installed. """ _stubs: Dict[str, Callable] def __init__( self, *, host: str = "googleads.googleapis.com", credentials: ga_credentials.Credentials = None, credentials_file: str = None, scopes: Sequence[str] = None, channel: grpc.Channel = None, api_mtls_endpoint: str = None, client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, ssl_channel_credentials: grpc.ChannelCredentials = None, client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool] = False, ) -> None: """Instantiate the transport. Args: host (Optional[str]): The hostname to connect to. credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if ``channel`` is provided. credentials_file (Optional[str]): A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if ``channel`` is provided. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if ``channel`` is provided. channel (Optional[grpc.Channel]): A ``Channel`` instance through which to make calls. api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. If provided, it overrides the ``host`` argument and tries to create a mutual TLS channel with client SSL credentials from ``client_cert_source`` or application default SSL credentials. client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): Deprecated. A callback to provide client SSL certificate bytes and private key bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials for the grpc channel. It is ignored if ``channel`` is provided. client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): A callback to provide client certificate bytes and private key bytes, both in PEM format. It is used to configure a mutual TLS channel. It is ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. always_use_jwt_access (Optional[bool]): Whether self signed JWT should be used for service account credentials. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport creation failed for any reason. google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials self._stubs: Dict[str, Callable] = {} if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) if client_cert_source: warnings.warn( "client_cert_source is deprecated", DeprecationWarning ) if channel: # Ignore credentials if a channel was passed. credentials = False # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None else: if api_mtls_endpoint: host = api_mtls_endpoint # Create SSL credentials with client_cert_source or application # default SSL credentials. if client_cert_source: cert, key = client_cert_source() self._ssl_channel_credentials = grpc.ssl_channel_credentials( certificate_chain=cert, private_key=key ) else: self._ssl_channel_credentials = ( SslCredentials().ssl_credentials ) else: if client_cert_source_for_mtls and not ssl_channel_credentials: cert, key = client_cert_source_for_mtls() self._ssl_channel_credentials = grpc.ssl_channel_credentials( certificate_chain=cert, private_key=key ) # The base transport sets the host, credentials and scopes super().__init__( host=host, credentials=credentials, credentials_file=credentials_file, scopes=scopes, quota_project_id=quota_project_id, client_info=client_info, always_use_jwt_access=always_use_jwt_access, ) if not self._grpc_channel: self._grpc_channel = type(self).create_channel( self._host, # use the credentials which are saved credentials=self._credentials, # Set ``credentials_file`` to ``None`` here as # the credentials that we saved earlier should be used. credentials_file=None, scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) # Wrap messages. This must be done after self._grpc_channel exists self._prep_wrapped_messages(client_info) @classmethod def create_channel( cls, host: str = "googleads.googleapis.com", credentials: ga_credentials.Credentials = None, credentials_file: str = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, **kwargs, ) -> grpc.Channel: """Create and return a gRPC channel object. Args: host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. credentials_file (Optional[str]): A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is mutually exclusive with credentials. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. quota_project_id (Optional[str]): An optional project to use for billing and quota. kwargs (Optional[dict]): Keyword arguments, which are passed to the channel creation. Returns: grpc.Channel: A gRPC channel object. Raises: google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ return grpc_helpers.create_channel( host, credentials=credentials, credentials_file=credentials_file, quota_project_id=quota_project_id, default_scopes=cls.AUTH_SCOPES, scopes=scopes, default_host=cls.DEFAULT_HOST, **kwargs, ) @property def grpc_channel(self) -> grpc.Channel: """Return the channel designed to connect to this service. """ return self._grpc_channel @property def mutate_keyword_plan_campaigns( self, ) -> Callable[ [keyword_plan_campaign_service.MutateKeywordPlanCampaignsRequest], keyword_plan_campaign_service.MutateKeywordPlanCampaignsResponse, ]: r"""Return a callable for the mutate keyword plan campaigns method over gRPC. Creates, updates, or removes Keyword Plan campaigns. Operation statuses are returned. List of thrown errors: `AuthenticationError <>`__ `AuthorizationError <>`__ `DatabaseError <>`__ `FieldError <>`__ `FieldMaskError <>`__ `HeaderError <>`__ `InternalError <>`__ `KeywordPlanCampaignError <>`__ `KeywordPlanError <>`__ `ListOperationError <>`__ `MutateError <>`__ `QuotaError <>`__ `RangeError <>`__ `RequestError <>`__ `ResourceCountLimitExceededError <>`__ Returns: Callable[[~.MutateKeywordPlanCampaignsRequest], ~.MutateKeywordPlanCampaignsResponse]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "mutate_keyword_plan_campaigns" not in self._stubs: self._stubs[ "mutate_keyword_plan_campaigns" ] = self.grpc_channel.unary_unary( "/google.ads.googleads.v13.services.KeywordPlanCampaignService/MutateKeywordPlanCampaigns", request_serializer=keyword_plan_campaign_service.MutateKeywordPlanCampaignsRequest.serialize, response_deserializer=keyword_plan_campaign_service.MutateKeywordPlanCampaignsResponse.deserialize, ) return self._stubs["mutate_keyword_plan_campaigns"] def close(self): self.grpc_channel.close() __all__ = ("KeywordPlanCampaignServiceGrpcTransport",)
PypiClean
/pyr2-2.0.0-py3-none-macosx_10_14_x86_64.whl/r2/r_magic.py
import ctypes from .r_libs import r_anal as _libr_anal from .r_libs import r_asm as _libr_asm from .r_libs import r_bin as _libr_bin from .r_libs import r_bp as _libr_bp from .r_libs import r_config as _libr_config from .r_libs import r_cons as _libr_cons from .r_libs import r_core as _libr_core from .r_libs import r_crypto as _libr_crypto from .r_libs import r_debug as _libr_debug from .r_libs import r_egg as _libr_egg from .r_libs import r_flag as _libr_flag from .r_libs import r_fs as _libr_fs from .r_libs import r_hash as _libr_hash from .r_libs import r_io as _libr_io from .r_libs import r_lang as _libr_lang from .r_libs import r_magic as _libr_magic from .r_libs import r_main as _libr_main from .r_libs import r_parse as _libr_parse from .r_libs import r_reg as _libr_reg from .r_libs import r_search as _libr_search from .r_libs import r_socket as _libr_socket from .r_libs import r_syscall as _libr_syscall from .r_libs import r_util as _libr_util _libraries = {} def string_cast(char_pointer, encoding='utf-8', errors='strict'): value = ctypes.cast(char_pointer, ctypes.c_char_p).value if value is not None and encoding is not None: value = value.decode(encoding, errors=errors) return value def char_pointer_cast(string, encoding='utf-8'): if encoding is not None: try: string = string.encode(encoding) except AttributeError: # In Python3, bytes has no encode attribute pass string = ctypes.c_char_p(string) return ctypes.cast(string, ctypes.POINTER(ctypes.c_char)) class AsDictMixin: @classmethod def as_dict(cls, self): result = {} if not isinstance(self, AsDictMixin): # not a structure, assume it's already a python object return self if not hasattr(cls, "_fields_"): return result # sys.version_info >= (3, 5) # for (field, *_) in cls._fields_: # noqa for field_tuple in cls._fields_: # noqa field = field_tuple[0] if field.startswith('PADDING_'): continue value = getattr(self, field) type_ = type(value) if hasattr(value, "_length_") and hasattr(value, "_type_"): # array if not hasattr(type_, "as_dict"): value = [v for v in value] else: type_ = type_._type_ value = [type_.as_dict(v) for v in value] elif hasattr(value, "contents") and hasattr(value, "_type_"): # pointer try: if not hasattr(type_, "as_dict"): value = value.contents else: type_ = type_._type_ value = type_.as_dict(value.contents) except ValueError: # nullptr value = None elif isinstance(value, AsDictMixin): # other structure value = type_.as_dict(value) result[field] = value return result class Structure(ctypes.Structure, AsDictMixin): def __init__(self, *args, **kwds): # We don't want to use positional arguments fill PADDING_* fields args = dict(zip(self.__class__._field_names_(), args)) args.update(kwds) super(Structure, self).__init__(**args) @classmethod def _field_names_(cls): if hasattr(cls, '_fields_'): return (f[0] for f in cls._fields_ if not f[0].startswith('PADDING')) else: return () @classmethod def get_type(cls, field): for f in cls._fields_: if f[0] == field: return f[1] return None @classmethod def bind(cls, bound_fields): fields = {} for name, type_ in cls._fields_: if hasattr(type_, "restype"): if name in bound_fields: # use a closure to capture the callback from the loop scope fields[name] = ( type_((lambda callback: lambda *args: callback(*args))( bound_fields[name])) ) del bound_fields[name] else: # default callback implementation (does nothing) try: default_ = type_(0).restype().value except TypeError: default_ = None fields[name] = type_(( lambda default_: lambda *args: default_)(default_)) else: # not a callback function, use default initialization if name in bound_fields: fields[name] = bound_fields[name] del bound_fields[name] else: fields[name] = type_() if len(bound_fields) != 0: raise ValueError( "Cannot bind the following unknown callback(s) {}.{}".format( cls.__name__, bound_fields.keys() )) return cls(**fields) class Union(ctypes.Union, AsDictMixin): pass c_int128 = ctypes.c_ubyte*16 c_uint128 = c_int128 void = None if ctypes.sizeof(ctypes.c_longdouble) == 16: c_long_double_t = ctypes.c_longdouble else: c_long_double_t = ctypes.c_ubyte*16 r_magic_version = _libr_magic.r_magic_version r_magic_version.restype = ctypes.POINTER(ctypes.c_char) r_magic_version.argtypes = [] class union_VALUETYPE(Union): pass union_VALUETYPE._pack_ = 1 # source:False union_VALUETYPE._fields_ = [ ('b', ctypes.c_ubyte), ('h', ctypes.c_uint16), ('l', ctypes.c_uint32), ('q', ctypes.c_uint64), ('hs', ctypes.c_ubyte * 2), ('hl', ctypes.c_ubyte * 4), ('hq', ctypes.c_ubyte * 8), ('s', ctypes.c_char * 32), ('f', ctypes.c_float), ('d', ctypes.c_double), ('PADDING_0', ctypes.c_ubyte * 24), ] class struct_r_magic(Structure): pass class union_r_magic_0(Union): pass class struct_r_magic_0_0(Structure): pass struct_r_magic_0_0._pack_ = 1 # source:False struct_r_magic_0_0._fields_ = [ ('_count', ctypes.c_uint32), ('_flags', ctypes.c_uint32), ] union_r_magic_0._pack_ = 1 # source:False union_r_magic_0._fields_ = [ ('_mask', ctypes.c_uint64), ('_s', struct_r_magic_0_0), ] struct_r_magic._pack_ = 1 # source:False struct_r_magic._fields_ = [ ('cont_level', ctypes.c_uint16), ('flag', ctypes.c_ubyte), ('dummy1', ctypes.c_ubyte), ('reln', ctypes.c_ubyte), ('vallen', ctypes.c_ubyte), ('type', ctypes.c_ubyte), ('in_type', ctypes.c_ubyte), ('in_op', ctypes.c_ubyte), ('mask_op', ctypes.c_ubyte), ('cond', ctypes.c_ubyte), ('dummy2', ctypes.c_ubyte), ('offset', ctypes.c_uint32), ('in_offset', ctypes.c_uint32), ('lineno', ctypes.c_uint32), ('_u', union_r_magic_0), ('value', union_VALUETYPE), ('desc', ctypes.c_char * 64), ('mimetype', ctypes.c_char * 64), ] class struct_mlist(Structure): pass struct_mlist._pack_ = 1 # source:False struct_mlist._fields_ = [ ('magic', ctypes.POINTER(struct_r_magic)), ('nmagic', ctypes.c_uint32), ('mapped', ctypes.c_int32), ('next', ctypes.POINTER(struct_mlist)), ('prev', ctypes.POINTER(struct_mlist)), ] class struct_r_magic_set(Structure): pass class struct_out(Structure): pass struct_out._pack_ = 1 # source:False struct_out._fields_ = [ ('buf', ctypes.POINTER(ctypes.c_char)), ('pbuf', ctypes.POINTER(ctypes.c_char)), ] class struct_cont(Structure): pass class struct_level_info(Structure): pass struct_cont._pack_ = 1 # source:False struct_cont._fields_ = [ ('len', ctypes.c_uint64), ('li', ctypes.POINTER(struct_level_info)), ] class struct_r_magic_set_2(Structure): pass struct_r_magic_set_2._pack_ = 1 # source:False struct_r_magic_set_2._fields_ = [ ('s', ctypes.POINTER(ctypes.c_char)), ('s_len', ctypes.c_uint64), ('offset', ctypes.c_uint64), ('rm_len', ctypes.c_uint64), ] struct_r_magic_set._pack_ = 1 # source:False struct_r_magic_set._fields_ = [ ('mlist', ctypes.POINTER(struct_mlist)), ('c', struct_cont), ('o', struct_out), ('offset', ctypes.c_uint32), ('error', ctypes.c_int32), ('flags', ctypes.c_int32), ('haderr', ctypes.c_int32), ('file', ctypes.POINTER(ctypes.c_char)), ('line', ctypes.c_uint64), ('search', struct_r_magic_set_2), ('ms_value', union_VALUETYPE), ] struct_level_info._pack_ = 1 # source:False struct_level_info._fields_ = [ ('off', ctypes.c_int32), ('got_match', ctypes.c_int32), ('last_match', ctypes.c_int32), ('last_cond', ctypes.c_int32), ] RMagic = struct_r_magic_set r_magic_new = _libr_magic.r_magic_new r_magic_new.restype = ctypes.POINTER(struct_r_magic_set) r_magic_new.argtypes = [ctypes.c_int32] r_magic_free = _libr_magic.r_magic_free r_magic_free.restype = None r_magic_free.argtypes = [ctypes.POINTER(struct_r_magic_set)] r_magic_file = _libr_magic.r_magic_file r_magic_file.restype = ctypes.POINTER(ctypes.c_char) r_magic_file.argtypes = [ctypes.POINTER(struct_r_magic_set), ctypes.POINTER(ctypes.c_char)] r_magic_descriptor = _libr_magic.r_magic_descriptor r_magic_descriptor.restype = ctypes.POINTER(ctypes.c_char) r_magic_descriptor.argtypes = [ctypes.POINTER(struct_r_magic_set), ctypes.c_int32] size_t = ctypes.c_uint64 r_magic_buffer = _libr_magic.r_magic_buffer r_magic_buffer.restype = ctypes.POINTER(ctypes.c_char) r_magic_buffer.argtypes = [ctypes.POINTER(struct_r_magic_set), ctypes.POINTER(None), size_t] r_magic_error = _libr_magic.r_magic_error r_magic_error.restype = ctypes.POINTER(ctypes.c_char) r_magic_error.argtypes = [ctypes.POINTER(struct_r_magic_set)] r_magic_setflags = _libr_magic.r_magic_setflags r_magic_setflags.restype = None r_magic_setflags.argtypes = [ctypes.POINTER(struct_r_magic_set), ctypes.c_int32] r_magic_load = _libr_magic.r_magic_load r_magic_load.restype = ctypes.c_bool r_magic_load.argtypes = [ctypes.POINTER(struct_r_magic_set), ctypes.POINTER(ctypes.c_char)] r_magic_load_buffer = _libr_magic.r_magic_load_buffer r_magic_load_buffer.restype = ctypes.c_bool r_magic_load_buffer.argtypes = [ctypes.POINTER(struct_r_magic_set), ctypes.POINTER(ctypes.c_char)] r_magic_compile = _libr_magic.r_magic_compile r_magic_compile.restype = ctypes.c_bool r_magic_compile.argtypes = [ctypes.POINTER(struct_r_magic_set), ctypes.POINTER(ctypes.c_char)] r_magic_check = _libr_magic.r_magic_check r_magic_check.restype = ctypes.c_bool r_magic_check.argtypes = [ctypes.POINTER(struct_r_magic_set), ctypes.POINTER(ctypes.c_char)] r_magic_errno = _libr_magic.r_magic_errno r_magic_errno.restype = ctypes.c_int32 r_magic_errno.argtypes = [ctypes.POINTER(struct_r_magic_set)] __all__ = \ ['RMagic', 'r_magic_buffer', 'r_magic_check', 'r_magic_compile', 'r_magic_descriptor', 'r_magic_errno', 'r_magic_error', 'r_magic_file', 'r_magic_free', 'r_magic_load', 'r_magic_load_buffer', 'r_magic_new', 'r_magic_setflags', 'r_magic_version', 'size_t', 'struct_cont', 'struct_level_info', 'struct_mlist', 'struct_out', 'struct_r_magic', 'struct_r_magic_0_0', 'struct_r_magic_set', 'struct_r_magic_set_2', 'union_VALUETYPE', 'union_r_magic_0']
PypiClean
/vioneta_agro_frontend-20230809.1-py3-none-any.whl/hass_frontend/frontend_es5/76045-Swd8I3BgC9s.js
"use strict";(self.webpackChunkvioneta_agro_frontend=self.webpackChunkvioneta_agro_frontend||[]).push([[76045],{58014:function(t,e,n){function i(t,e){if(t.closest)return t.closest(e);for(var n=t;n;){if(o(n,e))return n;n=n.parentElement}return null}function o(t,e){return(t.matches||t.webkitMatchesSelector||t.msMatchesSelector).call(t,e)}n.d(e,{oq:function(){return i},wB:function(){return o}})},20210:function(t,e,n){var i,o,a,r,c=n(33368),s=n(71650),d=n(69205),u=n(70906),l=n(43204),p=n(79932),h=n(88962),b=(n(27763),n(38103)),f=n(98734),v=n(68144),m=n(30153),y=function(t){(0,d.Z)(n,t);var e=(0,u.Z)(n);function n(){var t;return(0,s.Z)(this,n),(t=e.apply(this,arguments)).disabled=!1,t.icon="",t.shouldRenderRipple=!1,t.rippleHandlers=new f.A((function(){return t.shouldRenderRipple=!0,t.ripple})),t}return(0,c.Z)(n,[{key:"renderRipple",value:function(){return this.shouldRenderRipple?(0,v.dy)(i||(i=(0,h.Z)([' <mwc-ripple .disabled="','" unbounded> </mwc-ripple>'])),this.disabled):""}},{key:"focus",value:function(){var t=this.buttonElement;t&&(this.rippleHandlers.startFocus(),t.focus())}},{key:"blur",value:function(){var t=this.buttonElement;t&&(this.rippleHandlers.endFocus(),t.blur())}},{key:"render",value:function(){return(0,v.dy)(o||(o=(0,h.Z)(['<button class="mdc-icon-button mdc-icon-button--display-flex" aria-label="','" aria-haspopup="','" ?disabled="','" @focus="','" @blur="','" @mousedown="','" @mouseenter="','" @mouseleave="','" @touchstart="','" @touchend="','" @touchcancel="','">'," "," <span><slot></slot></span> </button>"])),this.ariaLabel||this.icon,(0,m.o)(this.ariaHasPopup),this.disabled,this.handleRippleFocus,this.handleRippleBlur,this.handleRippleMouseDown,this.handleRippleMouseEnter,this.handleRippleMouseLeave,this.handleRippleTouchStart,this.handleRippleDeactivate,this.handleRippleDeactivate,this.renderRipple(),this.icon?(0,v.dy)(a||(a=(0,h.Z)(['<i class="material-icons">',"</i>"])),this.icon):"")}},{key:"handleRippleMouseDown",value:function(t){var e=this;window.addEventListener("mouseup",(function t(){window.removeEventListener("mouseup",t),e.handleRippleDeactivate()})),this.rippleHandlers.startPress(t)}},{key:"handleRippleTouchStart",value:function(t){this.rippleHandlers.startPress(t)}},{key:"handleRippleDeactivate",value:function(){this.rippleHandlers.endPress()}},{key:"handleRippleMouseEnter",value:function(){this.rippleHandlers.startHover()}},{key:"handleRippleMouseLeave",value:function(){this.rippleHandlers.endHover()}},{key:"handleRippleFocus",value:function(){this.rippleHandlers.startFocus()}},{key:"handleRippleBlur",value:function(){this.rippleHandlers.endFocus()}}]),n}(v.oi);(0,l.__decorate)([(0,p.Cb)({type:Boolean,reflect:!0})],y.prototype,"disabled",void 0),(0,l.__decorate)([(0,p.Cb)({type:String})],y.prototype,"icon",void 0),(0,l.__decorate)([b.L,(0,p.Cb)({type:String,attribute:"aria-label"})],y.prototype,"ariaLabel",void 0),(0,l.__decorate)([b.L,(0,p.Cb)({type:String,attribute:"aria-haspopup"})],y.prototype,"ariaHasPopup",void 0),(0,l.__decorate)([(0,p.IO)("button")],y.prototype,"buttonElement",void 0),(0,l.__decorate)([(0,p.GC)("mwc-ripple")],y.prototype,"ripple",void 0),(0,l.__decorate)([(0,p.SB)()],y.prototype,"shouldRenderRipple",void 0),(0,l.__decorate)([(0,p.hO)({passive:!0})],y.prototype,"handleRippleMouseDown",null),(0,l.__decorate)([(0,p.hO)({passive:!0})],y.prototype,"handleRippleTouchStart",null);var g=(0,v.iv)(r||(r=(0,h.Z)(['.material-icons{font-family:var(--mdc-icon-font, "Material Icons");font-weight:400;font-style:normal;font-size:var(--mdc-icon-size,24px);line-height:1;letter-spacing:normal;text-transform:none;display:inline-block;white-space:nowrap;word-wrap:normal;direction:ltr;-webkit-font-smoothing:antialiased;text-rendering:optimizeLegibility;-moz-osx-font-smoothing:grayscale;font-feature-settings:"liga"}.mdc-icon-button{font-size:24px;width:48px;height:48px;padding:12px}.mdc-icon-button .mdc-icon-button__focus-ring{display:none}.mdc-icon-button.mdc-ripple-upgraded--background-focused .mdc-icon-button__focus-ring,.mdc-icon-button:not(.mdc-ripple-upgraded):focus .mdc-icon-button__focus-ring{display:block;max-height:48px;max-width:48px}@media screen and (forced-colors:active){.mdc-icon-button.mdc-ripple-upgraded--background-focused .mdc-icon-button__focus-ring,.mdc-icon-button:not(.mdc-ripple-upgraded):focus .mdc-icon-button__focus-ring{pointer-events:none;border:2px solid transparent;border-radius:6px;box-sizing:content-box;position:absolute;top:50%;left:50%;transform:translate(-50%,-50%);height:100%;width:100%}}@media screen and (forced-colors:active)and (forced-colors:active){.mdc-icon-button.mdc-ripple-upgraded--background-focused .mdc-icon-button__focus-ring,.mdc-icon-button:not(.mdc-ripple-upgraded):focus .mdc-icon-button__focus-ring{border-color:CanvasText}}@media screen and (forced-colors:active){.mdc-icon-button.mdc-ripple-upgraded--background-focused .mdc-icon-button__focus-ring::after,.mdc-icon-button:not(.mdc-ripple-upgraded):focus .mdc-icon-button__focus-ring::after{content:"";border:2px solid transparent;border-radius:8px;display:block;position:absolute;top:50%;left:50%;transform:translate(-50%,-50%);height:calc(100% + 4px);width:calc(100% + 4px)}}@media screen and (forced-colors:active)and (forced-colors:active){.mdc-icon-button.mdc-ripple-upgraded--background-focused .mdc-icon-button__focus-ring::after,.mdc-icon-button:not(.mdc-ripple-upgraded):focus .mdc-icon-button__focus-ring::after{border-color:CanvasText}}.mdc-icon-button.mdc-icon-button--reduced-size .mdc-icon-button__ripple{width:40px;height:40px;margin-top:4px;margin-bottom:4px;margin-right:4px;margin-left:4px}.mdc-icon-button.mdc-icon-button--reduced-size.mdc-ripple-upgraded--background-focused .mdc-icon-button__focus-ring,.mdc-icon-button.mdc-icon-button--reduced-size:not(.mdc-ripple-upgraded):focus .mdc-icon-button__focus-ring{max-height:40px;max-width:40px}.mdc-icon-button .mdc-icon-button__touch{position:absolute;top:50%;height:48px;left:50%;width:48px;transform:translate(-50%,-50%)}.mdc-icon-button:disabled{color:rgba(0,0,0,.38);color:var(--mdc-theme-text-disabled-on-light,rgba(0,0,0,.38))}.mdc-icon-button img,.mdc-icon-button svg{width:24px;height:24px}.mdc-icon-button{display:inline-block;position:relative;box-sizing:border-box;border:none;outline:0;background-color:transparent;fill:currentColor;color:inherit;text-decoration:none;cursor:pointer;user-select:none;z-index:0;overflow:visible}.mdc-icon-button .mdc-icon-button__touch{position:absolute;top:50%;height:48px;left:50%;width:48px;transform:translate(-50%,-50%)}.mdc-icon-button:disabled{cursor:default;pointer-events:none}.mdc-icon-button--display-flex{align-items:center;display:inline-flex;justify-content:center}.mdc-icon-button__icon{display:inline-block}.mdc-icon-button__icon.mdc-icon-button__icon--on{display:none}.mdc-icon-button--on .mdc-icon-button__icon{display:none}.mdc-icon-button--on .mdc-icon-button__icon.mdc-icon-button__icon--on{display:inline-block}.mdc-icon-button__link{height:100%;left:0;outline:0;position:absolute;top:0;width:100%}.mdc-icon-button{display:inline-block;position:relative;box-sizing:border-box;border:none;outline:0;background-color:transparent;fill:currentColor;color:inherit;text-decoration:none;cursor:pointer;user-select:none;z-index:0;overflow:visible}.mdc-icon-button .mdc-icon-button__touch{position:absolute;top:50%;height:48px;left:50%;width:48px;transform:translate(-50%,-50%)}.mdc-icon-button:disabled{cursor:default;pointer-events:none}.mdc-icon-button--display-flex{align-items:center;display:inline-flex;justify-content:center}.mdc-icon-button__icon{display:inline-block}.mdc-icon-button__icon.mdc-icon-button__icon--on{display:none}.mdc-icon-button--on .mdc-icon-button__icon{display:none}.mdc-icon-button--on .mdc-icon-button__icon.mdc-icon-button__icon--on{display:inline-block}.mdc-icon-button__link{height:100%;left:0;outline:0;position:absolute;top:0;width:100%}:host{display:inline-block;outline:0}:host([disabled]){pointer-events:none}.mdc-icon-button ::slotted(*),.mdc-icon-button i,.mdc-icon-button img,.mdc-icon-button svg{display:block}:host{--mdc-ripple-color:currentcolor;-webkit-tap-highlight-color:transparent}.mdc-icon-button,:host{vertical-align:top}.mdc-icon-button{width:var(--mdc-icon-button-size,48px);height:var(--mdc-icon-button-size,48px);padding:calc((var(--mdc-icon-button-size,48px) - var(--mdc-icon-size,24px))/ 2)}.mdc-icon-button ::slotted(*),.mdc-icon-button i,.mdc-icon-button img,.mdc-icon-button svg{display:block;width:var(--mdc-icon-size,24px);height:var(--mdc-icon-size,24px)}']))),k=function(t){(0,d.Z)(n,t);var e=(0,u.Z)(n);function n(){return(0,s.Z)(this,n),e.apply(this,arguments)}return(0,c.Z)(n)}(y);k.styles=[g],k=(0,l.__decorate)([(0,p.Mo)("mwc-icon-button")],k)},22311:function(t,e,n){n.d(e,{N:function(){return o}});var i=n(58831),o=function(t){return(0,i.M)(t.entity_id)}},10983:function(t,e,n){n.d(e,{$:function(){return m}});var i,o,a,r,c=n(88962),s=n(33368),d=n(71650),u=n(82390),l=n(69205),p=n(70906),h=n(91808),b=(n(20210),n(68144)),f=n(79932),v=n(30153),m=(n(52039),(0,h.Z)([(0,f.Mo)("ha-icon-button")],(function(t,e){var n=function(e){(0,l.Z)(i,e);var n=(0,p.Z)(i);function i(){var e;(0,d.Z)(this,i);for(var o=arguments.length,a=new Array(o),r=0;r<o;r++)a[r]=arguments[r];return e=n.call.apply(n,[this].concat(a)),t((0,u.Z)(e)),e}return(0,s.Z)(i)}(e);return{F:n,d:[{kind:"field",decorators:[(0,f.Cb)({type:Boolean,reflect:!0})],key:"disabled",value:function(){return!1}},{kind:"field",decorators:[(0,f.Cb)({type:String})],key:"path",value:void 0},{kind:"field",decorators:[(0,f.Cb)({type:String})],key:"label",value:void 0},{kind:"field",decorators:[(0,f.Cb)({type:String,attribute:"aria-haspopup"})],key:"ariaHasPopup",value:void 0},{kind:"field",decorators:[(0,f.Cb)({type:Boolean})],key:"hideTitle",value:function(){return!1}},{kind:"field",decorators:[(0,f.IO)("mwc-icon-button",!0)],key:"_button",value:void 0},{kind:"method",key:"focus",value:function(){var t;null===(t=this._button)||void 0===t||t.focus()}},{kind:"field",static:!0,key:"shadowRootOptions",value:function(){return{mode:"open",delegatesFocus:!0}}},{kind:"method",key:"render",value:function(){return(0,b.dy)(i||(i=(0,c.Z)([' <mwc-icon-button aria-label="','" title="','" aria-haspopup="','" .disabled="','"> '," </mwc-icon-button> "])),(0,v.o)(this.label),(0,v.o)(this.hideTitle?void 0:this.label),(0,v.o)(this.ariaHasPopup),this.disabled,this.path?(0,b.dy)(o||(o=(0,c.Z)(['<ha-svg-icon .path="','"></ha-svg-icon>'])),this.path):(0,b.dy)(a||(a=(0,c.Z)(["<slot></slot>"]))))}},{kind:"get",static:!0,key:"styles",value:function(){return(0,b.iv)(r||(r=(0,c.Z)([":host{display:inline-block;outline:0}:host([disabled]){pointer-events:none}mwc-icon-button{--mdc-theme-on-primary:currentColor;--mdc-theme-text-disabled-on-light:var(--disabled-text-color)}"])))}}]}}),b.oi))},48932:function(t,e,n){var i,o,a,r=n(88962),c=n(33368),s=n(71650),d=n(82390),u=n(69205),l=n(70906),p=n(91808),h=n(34541),b=n(47838),f=n(68144),v=n(79932),m=n(47181),y=n(6936);n(10983),(0,p.Z)([(0,v.Mo)("ha-menu-button")],(function(t,e){var n=function(e){(0,u.Z)(i,e);var n=(0,l.Z)(i);function i(){var e;(0,s.Z)(this,i);for(var o=arguments.length,a=new Array(o),r=0;r<o;r++)a[r]=arguments[r];return e=n.call.apply(n,[this].concat(a)),t((0,d.Z)(e)),e}return(0,c.Z)(i)}(e);return{F:n,d:[{kind:"field",decorators:[(0,v.Cb)({type:Boolean})],key:"hassio",value:function(){return!1}},{kind:"field",decorators:[(0,v.Cb)()],key:"narrow",value:void 0},{kind:"field",decorators:[(0,v.Cb)({attribute:!1})],key:"hass",value:void 0},{kind:"field",decorators:[(0,v.SB)()],key:"_hasNotifications",value:function(){return!1}},{kind:"field",decorators:[(0,v.SB)()],key:"_show",value:function(){return!1}},{kind:"field",key:"_alwaysVisible",value:function(){return!1}},{kind:"field",key:"_attachNotifOnConnect",value:function(){return!1}},{kind:"field",key:"_unsubNotifications",value:void 0},{kind:"method",key:"connectedCallback",value:function(){(0,h.Z)((0,b.Z)(n.prototype),"connectedCallback",this).call(this),this._attachNotifOnConnect&&(this._attachNotifOnConnect=!1,this._subscribeNotifications())}},{kind:"method",key:"disconnectedCallback",value:function(){(0,h.Z)((0,b.Z)(n.prototype),"disconnectedCallback",this).call(this),this._unsubNotifications&&(this._attachNotifOnConnect=!0,this._unsubNotifications(),this._unsubNotifications=void 0)}},{kind:"method",key:"render",value:function(){if(!this._show)return f.Ld;var t=this._hasNotifications&&(this.narrow||"always_hidden"===this.hass.dockedSidebar);return(0,f.dy)(i||(i=(0,r.Z)([' <ha-icon-button .label="','" .path="','" @click="','"></ha-icon-button> '," "])),this.hass.localize("ui.sidebar.sidebar_toggle"),"M3,6H21V8H3V6M3,11H21V13H3V11M3,16H21V18H3V16Z",this._toggleMenu,t?(0,f.dy)(o||(o=(0,r.Z)(['<div class="dot"></div>']))):"")}},{kind:"method",key:"firstUpdated",value:function(t){(0,h.Z)((0,b.Z)(n.prototype),"firstUpdated",this).call(this,t),this.hassio&&(this._alwaysVisible=(Number(window.parent.frontendVersion)||0)<20190710)}},{kind:"method",key:"willUpdate",value:function(t){if((0,h.Z)((0,b.Z)(n.prototype),"willUpdate",this).call(this,t),t.has("narrow")||t.has("hass")){var e=t.has("hass")?t.get("hass"):this.hass,i=(t.has("narrow")?t.get("narrow"):this.narrow)||"always_hidden"===(null==e?void 0:e.dockedSidebar),o=this.narrow||"always_hidden"===this.hass.dockedSidebar;this.hasUpdated&&i===o||(this._show=o||this._alwaysVisible,o?this._subscribeNotifications():this._unsubNotifications&&(this._unsubNotifications(),this._unsubNotifications=void 0))}}},{kind:"method",key:"_subscribeNotifications",value:function(){var t=this;if(this._unsubNotifications)throw new Error("Already subscribed");this._unsubNotifications=(0,y.r)(this.hass.connection,(function(e){t._hasNotifications=e.length>0}))}},{kind:"method",key:"_toggleMenu",value:function(){(0,m.B)(this,"hass-toggle-menu")}},{kind:"get",static:!0,key:"styles",value:function(){return(0,f.iv)(a||(a=(0,r.Z)([":host{position:relative}.dot{pointer-events:none;position:absolute;background-color:var(--accent-color);width:12px;height:12px;top:9px;right:7px;border-radius:50%;border:2px solid var(--app-header-background-color)}"])))}}]}}),f.oi)},52039:function(t,e,n){n.d(e,{C:function(){return f}});var i,o,a,r=n(88962),c=n(33368),s=n(71650),d=n(82390),u=n(69205),l=n(70906),p=n(91808),h=n(68144),b=n(79932),f=(0,p.Z)([(0,b.Mo)("ha-svg-icon")],(function(t,e){var n=function(e){(0,u.Z)(i,e);var n=(0,l.Z)(i);function i(){var e;(0,s.Z)(this,i);for(var o=arguments.length,a=new Array(o),r=0;r<o;r++)a[r]=arguments[r];return e=n.call.apply(n,[this].concat(a)),t((0,d.Z)(e)),e}return(0,c.Z)(i)}(e);return{F:n,d:[{kind:"field",decorators:[(0,b.Cb)()],key:"path",value:void 0},{kind:"field",decorators:[(0,b.Cb)()],key:"viewBox",value:void 0},{kind:"method",key:"render",value:function(){return(0,h.YP)(i||(i=(0,r.Z)([' <svg viewBox="','" preserveAspectRatio="xMidYMid meet" focusable="false" role="img" aria-hidden="true"> <g> '," </g> </svg>"])),this.viewBox||"0 0 24 24",this.path?(0,h.YP)(o||(o=(0,r.Z)(['<path d="','"></path>'])),this.path):"")}},{kind:"get",static:!0,key:"styles",value:function(){return(0,h.iv)(a||(a=(0,r.Z)([":host{display:var(--ha-icon-display,inline-flex);align-items:center;justify-content:center;position:relative;vertical-align:middle;fill:currentcolor;width:var(--mdc-icon-size,24px);height:var(--mdc-icon-size,24px)}svg{width:100%;height:100%;pointer-events:none;display:block}"])))}}]}}),h.oi)},36226:function(t,e,n){var i,o=n(88962),a=n(33368),r=n(71650),c=n(82390),s=n(69205),d=n(70906),u=n(91808),l=n(73968),p=n(71711),h=n(68144),b=n(79932);(0,u.Z)([(0,b.Mo)("ha-top-app-bar-fixed")],(function(t,e){var n=function(e){(0,s.Z)(i,e);var n=(0,d.Z)(i);function i(){var e;(0,r.Z)(this,i);for(var o=arguments.length,a=new Array(o),s=0;s<o;s++)a[s]=arguments[s];return e=n.call.apply(n,[this].concat(a)),t((0,c.Z)(e)),e}return(0,a.Z)(i)}(e);return{F:n,d:[{kind:"field",static:!0,key:"styles",value:function(){return[p.W,(0,h.iv)(i||(i=(0,o.Z)([".mdc-top-app-bar__row{height:var(--header-height);border-bottom:var(--app-header-border-bottom)}.mdc-top-app-bar--fixed-adjust{padding-top:var(--header-height)}.mdc-top-app-bar{--mdc-typography-headline6-font-weight:400;color:var(--app-header-text-color,var(--mdc-theme-on-primary,#fff));background-color:var(--app-header-background-color,var(--mdc-theme-primary))}"])))]}}]}}),l.s)},6936:function(t,e,n){n.d(e,{r:function(){return a}});var i=n(71650),o=n(33368),a=function(t,e){var n=new r,i=t.subscribeMessage((function(t){return e(n.processMessage(t))}),{type:"persistent_notification/subscribe"});return function(){i.then((function(t){return null==t?void 0:t()}))}},r=function(){function t(){(0,i.Z)(this,t),this.notifications=void 0,this.notifications={}}return(0,o.Z)(t,[{key:"processMessage",value:function(t){if("removed"===t.type)for(var e=0,n=Object.keys(t.notifications);e<n.length;e++){var i=n[e];delete this.notifications[i]}else this.notifications=Object.assign(Object.assign({},this.notifications),t.notifications);return Object.values(this.notifications)}}]),t}()},34481:function(t,e,n){n.r(e);var i,o,a,r=n(88962),c=n(33368),s=n(71650),d=n(82390),u=n(69205),l=n(70906),p=n(91808),h=n(34541),b=n(47838),f=n(68144),v=n(79932),m=n(22311),y=n(83849),g=(n(10983),n(48932),n(36226),n(13786),n(11654));(0,p.Z)([(0,v.Mo)("ha-panel-map")],(function(t,e){var n=function(e){(0,u.Z)(i,e);var n=(0,l.Z)(i);function i(){var e;(0,s.Z)(this,i);for(var o=arguments.length,a=new Array(o),r=0;r<o;r++)a[r]=arguments[r];return e=n.call.apply(n,[this].concat(a)),t((0,d.Z)(e)),e}return(0,c.Z)(i)}(e);return{F:n,d:[{kind:"field",decorators:[(0,v.Cb)({attribute:!1})],key:"hass",value:void 0},{kind:"field",decorators:[(0,v.Cb)({type:Boolean})],key:"narrow",value:void 0},{kind:"field",key:"_entities",value:function(){return[]}},{kind:"method",key:"render",value:function(){var t;return(0,f.dy)(i||(i=(0,r.Z)([' <ha-top-app-bar-fixed> <ha-menu-button slot="navigationIcon" .hass="','" .narrow="','"></ha-menu-button> <div slot="title">',"</div> ",' <ha-map .hass="','" .entities="','" autoFit interactiveZones></ha-map> </ha-top-app-bar-fixed> '])),this.hass,this.narrow,this.hass.localize("panel.map"),null!==(t=this.hass.user)&&void 0!==t&&t.is_admin?(0,f.dy)(o||(o=(0,r.Z)([' <ha-icon-button slot="actionItems" .label="','" .path="','" @click="','"></ha-icon-button>'])),this.hass.localize("ui.panel.map.edit_zones"),"M20.71,7.04C21.1,6.65 21.1,6 20.71,5.63L18.37,3.29C18,2.9 17.35,2.9 16.96,3.29L15.12,5.12L18.87,8.87M3,17.25V21H6.75L17.81,9.93L14.06,6.18L3,17.25Z",this._openZonesEditor):"",this.hass,this._entities)}},{kind:"method",key:"_openZonesEditor",value:function(){(0,y.c)("/config/zone")}},{kind:"method",key:"willUpdate",value:function(t){if((0,h.Z)((0,b.Z)(n.prototype),"willUpdate",this).call(this,t),t.has("hass")){var e=t.get("hass");this._getStates(e)}}},{kind:"method",key:"_getStates",value:function(t){var e=!1,n=new Set,i=[];Object.values(this.hass.states).forEach((function(o){"home"!==o.state&&"latitude"in o.attributes&&"longitude"in o.attributes&&(i.push(o.entity_id),"person"===(0,m.N)(o)&&o.attributes.source&&n.add(o.attributes.source),(null==t?void 0:t.states[o.entity_id])!==o&&(e=!0))})),e&&(this._entities=i.filter((function(t){return!n.has(t)})))}},{kind:"get",static:!0,key:"styles",value:function(){return[g.Qx,(0,f.iv)(a||(a=(0,r.Z)(["ha-map{height:calc(100vh - var(--header-height))}"])))]}}]}}),f.oi)},44281:function(t,e,n){n.d(e,{j:function(){return a}});var i=n(99312),o=n(81043),a=function(){var t=(0,o.Z)((0,i.Z)().mark((function t(){return(0,i.Z)().wrap((function(t){for(;;)switch(t.prev=t.next){case 0:t.prev=0,new ResizeObserver((function(){})),t.next=9;break;case 4:return t.prev=4,t.t0=t.catch(0),t.next=8,n.e(5442).then(n.bind(n,5442));case 8:window.ResizeObserver=t.sent.default;case 9:case"end":return t.stop()}}),t,null,[[0,4]])})));return function(){return t.apply(this,arguments)}}()},47501:function(t,e,n){n.d(e,{V:function(){return i.V}});var i=n(84298)}}]); //# sourceMappingURL=76045-Swd8I3BgC9s.js.map
PypiClean
/PyTikTokAPI-0.0.5.tar.gz/PyTikTokAPI-0.0.5/TikTokAPI/tiktokapi.py
import os import string import random import urllib.parse from .utils import random_key, build_get_url, get_req_json, get_req_content, get_req_text from .tiktok_browser import TikTokBrowser class VideoException(Exception): pass class TikTokAPI(object): def __init__(self, cookie=None, language='en', browser_lang="en-US", timezone="Asia/Kolkata", region='IN'): self.base_url = "https://t.tiktok.com/api" self.user_agent = "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:79.0) Gecko/20100101 Firefox/79.0" if cookie is None: cookie = {} self.verifyFp = cookie.get("s_v_web_id", "verify_kjf974fd_y7bupmR0_3uRm_43kF_Awde_8K95qt0GcpBk") self.tt_webid = cookie.get("tt_webid", "6913027209393473025") self.headers = { 'Host': 't.tiktok.com', 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:79.0) Gecko/20100101 Firefox/79.0', 'Referer': 'https://www.tiktok.com/', 'Cookie': 'tt_webid_v2={}; tt_webid={}'.format(self.tt_webid, self.tt_webid) } self.language = language self.browser_lang = browser_lang self.timezone = timezone self.region = region self.default_params = { "aid": "1988", "app_name": "tiktok_web", "device_platform": "web", "referer": "", "user_agent": urllib.parse.quote_plus(self.user_agent), "cookie_enabled": "true", "screen_width": "1920", "screen_height": "1080", "browser_language": self.browser_lang, "browser_platform": "Linux+x86_64", "browser_name": "Mozilla", "browser_version": "5.0+(X11)", "browser_online": "true", "timezone_name": self.timezone, # "page_referer": "https://www.tiktok.com/foryou?lang=en", "priority_region": self.region, "appId": "1180", "region": self.region, "appType": "t", "isAndroid": "false", "isMobile": "false", "isIOS": "false", "OS": "linux", "tt-web-region": self.region, "language": self.language, "verifyFp": self.verifyFp } self.signature_key = "_signature" self.did_key = "did" self.tiktok_browser = TikTokBrowser(self.user_agent) def send_get_request(self, url, params, extra_headers=None): url = build_get_url(url, params) did = ''.join(random.choice(string.digits) for num in range(19)) url = build_get_url(url, {self.did_key: did}, append=True) signature = self.tiktok_browser.fetch_auth_params(url, language=self.language) url = build_get_url(url, {self.signature_key: signature}, append=True) if extra_headers is None: headers = self.headers else: headers = {} for key, val in extra_headers.items(): headers[key] = val for key, val in self.headers.items(): headers[key] = val data = get_req_json(url, params=None, headers=self.headers) return data def getTrending(self, count=30): url = self.base_url + "/item_list/" req_default_params = { "id": "1", "type": "5", "secUid": "", "maxCursor": "0", "minCursor": "0", "sourceType": "12", } params = { "count": str(count) } for key, val in req_default_params.items(): params[key] = val for key, val in self.default_params.items(): params[key] = val return self.send_get_request(url, params) def getUserByName(self, user_name): url = "https://t.tiktok.com/node/share/user/@" + user_name params = { "uniqueId": user_name, "validUniqueId": user_name, } for key, val in self.default_params.items(): params[key] = val return self.send_get_request(url, params) def getVideosByUserName(self, user_name, count=30): user_data = self.getUserByName(user_name) user_obj = user_data["userInfo"]["user"] user_id = user_obj["id"] secUid = user_obj["secUid"] url = self.base_url + "/item_list/" req_default_params = { "type": "1", "maxCursor": "0", "minCursor": "0", "sourceType": "8", } params = { "id": user_id, "secUid": secUid, "count": str(count) } for key, val in req_default_params.items(): params[key] = val for key, val in self.default_params.items(): params[key] = val return self.send_get_request(url, params) def getLikesByUserName(self, user_name, count=30): user_data = self.getUserByName(user_name) user_obj = user_data["userInfo"]["user"] user_id = user_obj["id"] secUid = user_obj["secUid"] url = self.base_url + "/item_list/" req_default_params = { "type": "2", "maxCursor": "0", "minCursor": "0", "sourceType": "9", } params = { "id": user_id, "secUid": secUid, "count": str(count) } for key, val in req_default_params.items(): params[key] = val for key, val in self.default_params.items(): params[key] = val return self.send_get_request(url, params) def getHashTag(self, hashTag): url = self.base_url + "/challenge/detail/" params = { "challengeName": hashTag.replace("#", "") } for key, val in self.default_params.items(): params[key] = val return self.send_get_request(url, params) def getVideosByHashTag(self, hashTag, count=30): hashTag = hashTag.replace("#", "") hashTag_obj = self.getHashTag(hashTag) hashTag_id = hashTag_obj["challengeInfo"]["challenge"]["id"] url = self.base_url + "/challenge/item_list/" req_default_params = { "secUid": "", "type": "3", "minCursor": "0", "maxCursor": "0", "shareUid": "", "recType": "" } params = { "challengeID": str(hashTag_id), "count": str(count), "cursor": "0", } for key, val in req_default_params.items(): params[key] = val for key, val in self.default_params.items(): params[key] = val extra_headers = {"Referer": "https://www.tiktok.com/tag/" + str(hashTag)} return self.send_get_request(url, params, extra_headers=extra_headers) def getMusic(self, music_id): url = self.base_url + "/music/detail/" params = { "musicId": music_id } for key, val in self.default_params.items(): params[key] = val return self.send_get_request(url, params) def getVideosByMusic(self, music_id, count=30): url = self.base_url + "/music/item_list/" req_default_params = { "secUid": "", "type": "4", "minCursor": "0", "maxCursor": "0", "shareUid": "", "recType": "" } params = { "musicID": str(music_id), "count": str(count), "cursor": "0", } for key, val in req_default_params.items(): params[key] = val for key, val in self.default_params.items(): params[key] = val extra_headers = {"Referer": "https://www.tiktok.com/music/original-sound-" + str(music_id)} return self.send_get_request(url, params, extra_headers=extra_headers) def getVideoById(self, video_id): url = self.base_url + "/item/detail/" params = { "itemId": str(video_id) } for key, val in self.default_params.items(): params[key] = val return self.send_get_request(url, params) def downloadVideoById(self, video_id, save_path): video_info = self.getVideoById(video_id) video_url = video_info["itemInfo"]["itemStruct"]["video"]["playAddr"] video_data = get_req_content(video_url, params=None, headers=self.headers) with open(save_path, 'wb') as f: f.write(video_data) def downloadVideoByIdNoWatermark(self, video_id, save_path): video_info = self.getVideoById(video_id) video_url = video_info["itemInfo"]["itemStruct"]["video"]["downloadAddr"] video_data = get_req_text(video_url, params=None, headers=self.headers) pos = video_data.find("vid:") if pos == -1: raise VideoException("Video without watermark not available in new videos") video_url_no_wm = "https://api2-16-h2.musical.ly/aweme/v1/play/?video_id={" \ "}&vr_type=0&is_play_url=1&source=PackSourceEnum_PUBLISH&media_type=4" \ .format(video_data[pos + 4:pos + 36]) video_data_no_wm = get_req_content(video_url_no_wm, params=None, headers=self.headers) with open(save_path, 'wb') as f: f.write(video_data_no_wm)
PypiClean
/accelbyte_py_sdk-0.48.0.tar.gz/accelbyte_py_sdk-0.48.0/accelbyte_py_sdk/api/ugc/operations/admin_group/single_admin_get_all_groups.py
# template file: ags_py_codegen # pylint: disable=duplicate-code # pylint: disable=line-too-long # pylint: disable=missing-function-docstring # pylint: disable=missing-module-docstring # pylint: disable=too-many-arguments # pylint: disable=too-many-branches # pylint: disable=too-many-instance-attributes # pylint: disable=too-many-lines # pylint: disable=too-many-locals # pylint: disable=too-many-public-methods # pylint: disable=too-many-return-statements # pylint: disable=too-many-statements # pylint: disable=unused-import # AccelByte Gaming Services Ugc Service (2.12.0) from __future__ import annotations from typing import Any, Dict, List, Optional, Tuple, Union from .....core import Operation from .....core import HeaderStr from .....core import HttpResponse from ...models import ModelsPaginatedGroupResponse from ...models import ResponseError class SingleAdminGetAllGroups(Operation): """Get all user groups (SingleAdminGetAllGroups) Required permission ADMIN:NAMESPACE:{namespace}:USER:{userId}:CONTENTGROUP [READ]. Required Permission(s): - ADMIN:NAMESPACE:{namespace}:USER:{userId}:CONTENTGROUP [READ] Properties: url: /ugc/v1/admin/namespaces/{namespace}/groups method: GET tags: ["Admin Group"] consumes: ["application/json"] produces: ["application/json"] securities: [BEARER_AUTH] namespace: (namespace) REQUIRED str in path limit: (limit) OPTIONAL int in query offset: (offset) OPTIONAL int in query Responses: 200: OK - ModelsPaginatedGroupResponse (OK) 401: Unauthorized - ResponseError (Unauthorized) 404: Not Found - ResponseError (Not Found) 500: Internal Server Error - ResponseError (Internal Server Error) """ # region fields _url: str = "/ugc/v1/admin/namespaces/{namespace}/groups" _method: str = "GET" _consumes: List[str] = ["application/json"] _produces: List[str] = ["application/json"] _securities: List[List[str]] = [["BEARER_AUTH"]] _location_query: str = None namespace: str # REQUIRED in [path] limit: int # OPTIONAL in [query] offset: int # OPTIONAL in [query] # endregion fields # region properties @property def url(self) -> str: return self._url @property def method(self) -> str: return self._method @property def consumes(self) -> List[str]: return self._consumes @property def produces(self) -> List[str]: return self._produces @property def securities(self) -> List[List[str]]: return self._securities @property def location_query(self) -> str: return self._location_query # endregion properties # region get methods # endregion get methods # region get_x_params methods def get_all_params(self) -> dict: return { "path": self.get_path_params(), "query": self.get_query_params(), } def get_path_params(self) -> dict: result = {} if hasattr(self, "namespace"): result["namespace"] = self.namespace return result def get_query_params(self) -> dict: result = {} if hasattr(self, "limit"): result["limit"] = self.limit if hasattr(self, "offset"): result["offset"] = self.offset return result # endregion get_x_params methods # region is/has methods # endregion is/has methods # region with_x methods def with_namespace(self, value: str) -> SingleAdminGetAllGroups: self.namespace = value return self def with_limit(self, value: int) -> SingleAdminGetAllGroups: self.limit = value return self def with_offset(self, value: int) -> SingleAdminGetAllGroups: self.offset = value return self # endregion with_x methods # region to methods def to_dict(self, include_empty: bool = False) -> dict: result: dict = {} if hasattr(self, "namespace") and self.namespace: result["namespace"] = str(self.namespace) elif include_empty: result["namespace"] = "" if hasattr(self, "limit") and self.limit: result["limit"] = int(self.limit) elif include_empty: result["limit"] = 0 if hasattr(self, "offset") and self.offset: result["offset"] = int(self.offset) elif include_empty: result["offset"] = 0 return result # endregion to methods # region response methods # noinspection PyMethodMayBeStatic def parse_response( self, code: int, content_type: str, content: Any ) -> Tuple[ Union[None, ModelsPaginatedGroupResponse], Union[None, HttpResponse, ResponseError], ]: """Parse the given response. 200: OK - ModelsPaginatedGroupResponse (OK) 401: Unauthorized - ResponseError (Unauthorized) 404: Not Found - ResponseError (Not Found) 500: Internal Server Error - ResponseError (Internal Server Error) ---: HttpResponse (Undocumented Response) ---: HttpResponse (Unexpected Content-Type Error) ---: HttpResponse (Unhandled Error) """ pre_processed_response, error = self.pre_process_response( code=code, content_type=content_type, content=content ) if error is not None: return None, None if error.is_no_content() else error code, content_type, content = pre_processed_response if code == 200: return ModelsPaginatedGroupResponse.create_from_dict(content), None if code == 401: return None, ResponseError.create_from_dict(content) if code == 404: return None, ResponseError.create_from_dict(content) if code == 500: return None, ResponseError.create_from_dict(content) return self.handle_undocumented_response( code=code, content_type=content_type, content=content ) # endregion response methods # region static methods @classmethod def create( cls, namespace: str, limit: Optional[int] = None, offset: Optional[int] = None, **kwargs, ) -> SingleAdminGetAllGroups: instance = cls() instance.namespace = namespace if limit is not None: instance.limit = limit if offset is not None: instance.offset = offset return instance @classmethod def create_from_dict( cls, dict_: dict, include_empty: bool = False ) -> SingleAdminGetAllGroups: instance = cls() if "namespace" in dict_ and dict_["namespace"] is not None: instance.namespace = str(dict_["namespace"]) elif include_empty: instance.namespace = "" if "limit" in dict_ and dict_["limit"] is not None: instance.limit = int(dict_["limit"]) elif include_empty: instance.limit = 0 if "offset" in dict_ and dict_["offset"] is not None: instance.offset = int(dict_["offset"]) elif include_empty: instance.offset = 0 return instance @staticmethod def get_field_info() -> Dict[str, str]: return { "namespace": "namespace", "limit": "limit", "offset": "offset", } @staticmethod def get_required_map() -> Dict[str, bool]: return { "namespace": True, "limit": False, "offset": False, } # endregion static methods
PypiClean
/django_htmx_ui_adminlte-0.1.13-py3-none-any.whl/django_htmx_ui_adminlte/forms.py
from django import forms from django.contrib.auth.models import User class SignupForm(forms.Form): email = forms.EmailField(label='', widget=forms.EmailInput(attrs={'id': 'email', 'class': "form-control", 'placeholder': 'Email'}), required=True) password = forms.CharField(label='', widget=forms.PasswordInput(attrs={'id':'password', 'class': "form-control", 'placeholder': 'Password'}), min_length=4, required=True) confirm_password = forms.CharField(label='', widget=forms.PasswordInput(attrs={'id': 'confirm_password', 'class': "form-control", 'placeholder': 'Confirm password'}), min_length=4, required=True) def clean(self): email = self.cleaned_data.get('email') password = self.cleaned_data.get('password') confirm_password = self.cleaned_data.get('confirm_password') if password != confirm_password: self.add_error('password', 'Passwords do not match.') return if len(User.objects.filter(username=email)) > 0: self.add_error('email', 'There is already an account with this email.') return return self.cleaned_data class LoginForm(forms.Form): email = forms.EmailField(label='', widget=forms.EmailInput(attrs={'id': 'email', 'class': "form-control", 'placeholder': 'Email'}), required=True) password = forms.CharField(label='', widget=forms.PasswordInput(attrs={'id': 'password', 'class': "form-control", 'placeholder': 'Password'}), min_length=4, required=True)
PypiClean
/package-0.1.1.tar.gz/package-0.1.1/doc/Design.rst
This document describes the design goals and details of `package`, the package package package. Glossary -------- author A person who writes and distributes Python packages. package `package` has a double meaning in Python. It means either `namespace` or `distribution`. In this text, it means the latter. user A person who installs and uses Python packages. Design Goals ------------ * Give authors a simple way to start new packages. * Put all the author/package info in a yaml file. (instead of `setup.py`) * Lead users through the installation matrix with lots of helpful suggestions. * Provide authors with a set of simple standards for various author tasks. * Provide an easy upgrade path for new releases of package-py.
PypiClean
/aisdb-1.6.5-cp39-none-win_amd64.whl/aisdb_web/dist_map_bingmaps/assets/render-0afdbaa3.js
import{w as on,u as Qn,l as gn}from"./map-e7b21464.js";import"./main-b50358e6.js";import"./proj-04f64360.js";import"./constants-f1a52cc9.js";/*! * html2canvas 1.4.1 <https://html2canvas.hertzen.com> * Copyright (c) 2022 Niklas von Hertzen <https://hertzen.com> * Released under MIT License *//*! ***************************************************************************** Copyright (c) Microsoft Corporation. Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted. THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ***************************************************************************** */var mr=function(e,A){return mr=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(t,r){t.__proto__=r}||function(t,r){for(var B in r)Object.prototype.hasOwnProperty.call(r,B)&&(t[B]=r[B])},mr(e,A)};function tA(e,A){if(typeof A!="function"&&A!==null)throw new TypeError("Class extends value "+String(A)+" is not a constructor or null");mr(e,A);function t(){this.constructor=e}e.prototype=A===null?Object.create(A):(t.prototype=A.prototype,new t)}var Lr=function(){return Lr=Object.assign||function(A){for(var t,r=1,B=arguments.length;r<B;r++){t=arguments[r];for(var n in t)Object.prototype.hasOwnProperty.call(t,n)&&(A[n]=t[n])}return A},Lr.apply(this,arguments)};function J(e,A,t,r){function B(n){return n instanceof t?n:new t(function(s){s(n)})}return new(t||(t=Promise))(function(n,s){function i(Q){try{o(r.next(Q))}catch(g){s(g)}}function a(Q){try{o(r.throw(Q))}catch(g){s(g)}}function o(Q){Q.done?n(Q.value):B(Q.value).then(i,a)}o((r=r.apply(e,A||[])).next())})}function _(e,A){var t={label:0,sent:function(){if(n[0]&1)throw n[1];return n[1]},trys:[],ops:[]},r,B,n,s;return s={next:i(0),throw:i(1),return:i(2)},typeof Symbol=="function"&&(s[Symbol.iterator]=function(){return this}),s;function i(o){return function(Q){return a([o,Q])}}function a(o){if(r)throw new TypeError("Generator is already executing.");for(;t;)try{if(r=1,B&&(n=o[0]&2?B.return:o[0]?B.throw||((n=B.return)&&n.call(B),0):B.next)&&!(n=n.call(B,o[1])).done)return n;switch(B=0,n&&(o=[o[0]&2,n.value]),o[0]){case 0:case 1:n=o;break;case 4:return t.label++,{value:o[1],done:!1};case 5:t.label++,B=o[1],o=[0];continue;case 7:o=t.ops.pop(),t.trys.pop();continue;default:if(n=t.trys,!(n=n.length>0&&n[n.length-1])&&(o[0]===6||o[0]===2)){t=0;continue}if(o[0]===3&&(!n||o[1]>n[0]&&o[1]<n[3])){t.label=o[1];break}if(o[0]===6&&t.label<n[1]){t.label=n[1],n=o;break}if(n&&t.label<n[2]){t.label=n[2],t.ops.push(o);break}n[2]&&t.ops.pop(),t.trys.pop();continue}o=A.call(e,t)}catch(Q){o=[6,Q],B=0}finally{r=n=0}if(o[0]&5)throw o[1];return{value:o[0]?o[1]:void 0,done:!0}}}function le(e,A,t){if(t||arguments.length===2)for(var r=0,B=A.length,n;r<B;r++)(n||!(r in A))&&(n||(n=Array.prototype.slice.call(A,0,r)),n[r]=A[r]);return e.concat(n||A)}var cA=function(){function e(A,t,r,B){this.left=A,this.top=t,this.width=r,this.height=B}return e.prototype.add=function(A,t,r,B){return new e(this.left+A,this.top+t,this.width+r,this.height+B)},e.fromClientRect=function(A,t){return new e(t.left+A.windowBounds.left,t.top+A.windowBounds.top,t.width,t.height)},e.fromDOMRectList=function(A,t){var r=Array.from(t).find(function(B){return B.width!==0});return r?new e(r.left+A.windowBounds.left,r.top+A.windowBounds.top,r.width,r.height):e.EMPTY},e.EMPTY=new e(0,0,0,0),e}(),ze=function(e,A){return cA.fromClientRect(e,A.getBoundingClientRect())},wn=function(e){var A=e.body,t=e.documentElement;if(!A||!t)throw new Error("Unable to get document size");var r=Math.max(Math.max(A.scrollWidth,t.scrollWidth),Math.max(A.offsetWidth,t.offsetWidth),Math.max(A.clientWidth,t.clientWidth)),B=Math.max(Math.max(A.scrollHeight,t.scrollHeight),Math.max(A.offsetHeight,t.offsetHeight),Math.max(A.clientHeight,t.clientHeight));return new cA(0,0,r,B)},$e=function(e){for(var A=[],t=0,r=e.length;t<r;){var B=e.charCodeAt(t++);if(B>=55296&&B<=56319&&t<r){var n=e.charCodeAt(t++);(n&64512)===56320?A.push(((B&1023)<<10)+(n&1023)+65536):(A.push(B),t--)}else A.push(B)}return A},S=function(){for(var e=[],A=0;A<arguments.length;A++)e[A]=arguments[A];if(String.fromCodePoint)return String.fromCodePoint.apply(String,e);var t=e.length;if(!t)return"";for(var r=[],B=-1,n="";++B<t;){var s=e[B];s<=65535?r.push(s):(s-=65536,r.push((s>>10)+55296,s%1024+56320)),(B+1===t||r.length>16384)&&(n+=String.fromCharCode.apply(String,r),r.length=0)}return n},nt="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",cn=typeof Uint8Array>"u"?[]:new Uint8Array(256);for(var Ce=0;Ce<nt.length;Ce++)cn[nt.charCodeAt(Ce)]=Ce;var st="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",zA=typeof Uint8Array>"u"?[]:new Uint8Array(256);for(var fe=0;fe<st.length;fe++)zA[st.charCodeAt(fe)]=fe;var un=function(e){var A=e.length*.75,t=e.length,r,B=0,n,s,i,a;e[e.length-1]==="="&&(A--,e[e.length-2]==="="&&A--);var o=typeof ArrayBuffer<"u"&&typeof Uint8Array<"u"&&typeof Uint8Array.prototype.slice<"u"?new ArrayBuffer(A):new Array(A),Q=Array.isArray(o)?o:new Uint8Array(o);for(r=0;r<t;r+=4)n=zA[e.charCodeAt(r)],s=zA[e.charCodeAt(r+1)],i=zA[e.charCodeAt(r+2)],a=zA[e.charCodeAt(r+3)],Q[B++]=n<<2|s>>4,Q[B++]=(s&15)<<4|i>>2,Q[B++]=(i&3)<<6|a&63;return o},ln=function(e){for(var A=e.length,t=[],r=0;r<A;r+=2)t.push(e[r+1]<<8|e[r]);return t},Cn=function(e){for(var A=e.length,t=[],r=0;r<A;r+=4)t.push(e[r+3]<<24|e[r+2]<<16|e[r+1]<<8|e[r]);return t},bA=5,zr=6+5,ir=2,fn=zr-bA,BB=65536>>bA,Un=1<<bA,or=Un-1,Fn=1024>>bA,hn=BB+Fn,dn=hn,En=32,Hn=dn+En,pn=65536>>zr,In=1<<fn,vn=In-1,at=function(e,A,t){return e.slice?e.slice(A,t):new Uint16Array(Array.prototype.slice.call(e,A,t))},yn=function(e,A,t){return e.slice?e.slice(A,t):new Uint32Array(Array.prototype.slice.call(e,A,t))},Kn=function(e,A){var t=un(e),r=Array.isArray(t)?Cn(t):new Uint32Array(t),B=Array.isArray(t)?ln(t):new Uint16Array(t),n=24,s=at(B,n/2,r[4]/2),i=r[5]===2?at(B,(n+r[4])/2):yn(r,Math.ceil((n+r[4])/4));return new mn(r[0],r[1],r[2],r[3],s,i)},mn=function(){function e(A,t,r,B,n,s){this.initialValue=A,this.errorValue=t,this.highStart=r,this.highValueIndex=B,this.index=n,this.data=s}return e.prototype.get=function(A){var t;if(A>=0){if(A<55296||A>56319&&A<=65535)return t=this.index[A>>bA],t=(t<<ir)+(A&or),this.data[t];if(A<=65535)return t=this.index[BB+(A-55296>>bA)],t=(t<<ir)+(A&or),this.data[t];if(A<this.highStart)return t=Hn-pn+(A>>zr),t=this.index[t],t+=A>>bA&vn,t=this.index[t],t=(t<<ir)+(A&or),this.data[t];if(A<=1114111)return this.data[this.highValueIndex]}return this.errorValue},e}(),it="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",Ln=typeof Uint8Array>"u"?[]:new Uint8Array(256);for(var Ue=0;Ue<it.length;Ue++)Ln[it.charCodeAt(Ue)]=Ue;var Dn="KwAAAAAAAAAACA4AUD0AADAgAAACAAAAAAAIABAAGABAAEgAUABYAGAAaABgAGgAYgBqAF8AZwBgAGgAcQB5AHUAfQCFAI0AlQCdAKIAqgCyALoAYABoAGAAaABgAGgAwgDKAGAAaADGAM4A0wDbAOEA6QDxAPkAAQEJAQ8BFwF1AH0AHAEkASwBNAE6AUIBQQFJAVEBWQFhAWgBcAF4ATAAgAGGAY4BlQGXAZ8BpwGvAbUBvQHFAc0B0wHbAeMB6wHxAfkBAQIJAvEBEQIZAiECKQIxAjgCQAJGAk4CVgJeAmQCbAJ0AnwCgQKJApECmQKgAqgCsAK4ArwCxAIwAMwC0wLbAjAA4wLrAvMC+AIAAwcDDwMwABcDHQMlAy0DNQN1AD0DQQNJA0kDSQNRA1EDVwNZA1kDdQB1AGEDdQBpA20DdQN1AHsDdQCBA4kDkQN1AHUAmQOhA3UAdQB1AHUAdQB1AHUAdQB1AHUAdQB1AHUAdQB1AHUAdQB1AKYDrgN1AHUAtgO+A8YDzgPWAxcD3gPjA+sD8wN1AHUA+wMDBAkEdQANBBUEHQQlBCoEFwMyBDgEYABABBcDSARQBFgEYARoBDAAcAQzAXgEgASIBJAEdQCXBHUAnwSnBK4EtgS6BMIEyAR1AHUAdQB1AHUAdQCVANAEYABgAGAAYABgAGAAYABgANgEYADcBOQEYADsBPQE/AQEBQwFFAUcBSQFLAU0BWQEPAVEBUsFUwVbBWAAYgVgAGoFcgV6BYIFigWRBWAAmQWfBaYFYABgAGAAYABgAKoFYACxBbAFuQW6BcEFwQXHBcEFwQXPBdMF2wXjBeoF8gX6BQIGCgYSBhoGIgYqBjIGOgZgAD4GRgZMBmAAUwZaBmAAYABgAGAAYABgAGAAYABgAGAAYABgAGIGYABpBnAGYABgAGAAYABgAGAAYABgAGAAYAB4Bn8GhQZgAGAAYAB1AHcDFQSLBmAAYABgAJMGdQA9A3UAmwajBqsGqwaVALMGuwbDBjAAywbSBtIG1QbSBtIG0gbSBtIG0gbdBuMG6wbzBvsGAwcLBxMHAwcbByMHJwcsBywHMQcsB9IGOAdAB0gHTgfSBkgHVgfSBtIG0gbSBtIG0gbSBtIG0gbSBiwHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAdgAGAALAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAdbB2MHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsB2kH0gZwB64EdQB1AHUAdQB1AHUAdQB1AHUHfQdgAIUHjQd1AHUAlQedB2AAYAClB6sHYACzB7YHvgfGB3UAzgfWBzMB3gfmB1EB7gf1B/0HlQENAQUIDQh1ABUIHQglCBcDLQg1CD0IRQhNCEEDUwh1AHUAdQBbCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIcAh3CHoIMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwAIIIggiCCIIIggiCCIIIggiCCIIIggiCCIIIggiCCIIIggiCCIIIggiCCIIIggiCCIIIggiCCIIIggiCCIIIgggwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAALAcsBywHLAcsBywHLAcsBywHLAcsB4oILAcsB44I0gaWCJ4Ipgh1AHUAqgiyCHUAdQB1AHUAdQB1AHUAdQB1AHUAtwh8AXUAvwh1AMUIyQjRCNkI4AjoCHUAdQB1AO4I9gj+CAYJDgkTCS0HGwkjCYIIggiCCIIIggiCCIIIggiCCIIIggiCCIIIggiCCIIIggiCCIIIggiCCIIIggiCCIIIggiCCIIIggiCCIIIggiAAIAAAAFAAYABgAGIAXwBgAHEAdQBFAJUAogCyAKAAYABgAEIA4ABGANMA4QDxAMEBDwE1AFwBLAE6AQEBUQF4QkhCmEKoQrhCgAHIQsAB0MLAAcABwAHAAeDC6ABoAHDCwMMAAcABwAHAAdDDGMMAAcAB6MM4wwjDWMNow3jDaABoAGgAaABoAGgAaABoAGgAaABoAGgAaABoAGgAaABoAGgAaABoAEjDqABWw6bDqABpg6gAaABoAHcDvwOPA+gAaABfA/8DvwO/A78DvwO/A78DvwO/A78DvwO/A78DvwO/A78DvwO/A78DvwO/A78DvwO/A78DvwO/A78DpcPAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcAB9cPKwkyCToJMAB1AHUAdQBCCUoJTQl1AFUJXAljCWcJawkwADAAMAAwAHMJdQB2CX4JdQCECYoJjgmWCXUAngkwAGAAYABxAHUApgn3A64JtAl1ALkJdQDACTAAMAAwADAAdQB1AHUAdQB1AHUAdQB1AHUAowYNBMUIMAAwADAAMADICcsJ0wnZCRUE4QkwAOkJ8An4CTAAMAB1AAAKvwh1AAgKDwoXCh8KdQAwACcKLgp1ADYKqAmICT4KRgowADAAdQB1AE4KMAB1AFYKdQBeCnUAZQowADAAMAAwADAAMAAwADAAMAAVBHUAbQowADAAdQC5CXUKMAAwAHwBxAijBogEMgF9CoQKiASMCpQKmgqIBKIKqgquCogEDQG2Cr4KxgrLCjAAMADTCtsKCgHjCusK8Qr5CgELMAAwADAAMAB1AIsECQsRC3UANAEZCzAAMAAwADAAMAB1ACELKQswAHUANAExCzkLdQBBC0kLMABRC1kLMAAwADAAMAAwADAAdQBhCzAAMAAwAGAAYABpC3ELdwt/CzAAMACHC4sLkwubC58Lpwt1AK4Ltgt1APsDMAAwADAAMAAwADAAMAAwAL4LwwvLC9IL1wvdCzAAMADlC+kL8Qv5C/8LSQswADAAMAAwADAAMAAwADAAMAAHDDAAMAAwADAAMAAODBYMHgx1AHUAdQB1AHUAdQB1AHUAdQB1AHUAdQB1AHUAdQB1AHUAdQB1AHUAdQB1AHUAdQB1AHUAdQB1ACYMMAAwADAAdQB1AHUALgx1AHUAdQB1AHUAdQA2DDAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwAHUAdQB1AHUAdQB1AHUAdQB1AHUAdQB1AHUAdQB1AHUAdQB1AD4MdQBGDHUAdQB1AHUAdQB1AEkMdQB1AHUAdQB1AFAMMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwAHUAdQB1AHUAdQB1AHUAdQB1AHUAdQB1AHUAdQBYDHUAdQB1AF8MMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAB1AHUAdQB1AHUAdQB1AHUAdQB1AHUAdQB1AHUAdQB1AHUA+wMVBGcMMAAwAHwBbwx1AHcMfwyHDI8MMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAYABgAJcMMAAwADAAdQB1AJ8MlQClDDAAMACtDCwHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsB7UMLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHdQB1AHUAdQB1AHUAdQB1AHUAdQB1AHUAdQB1AA0EMAC9DDAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAsBywHLAcsBywHLAcsBywHLQcwAMEMyAwsBywHLAcsBywHLAcsBywHLAcsBywHzAwwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwAHUAdQB1ANQM2QzhDDAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMABgAGAAYABgAGAAYABgAOkMYADxDGAA+AwADQYNYABhCWAAYAAODTAAMAAwADAAFg1gAGAAHg37AzAAMAAwADAAYABgACYNYAAsDTQNPA1gAEMNPg1LDWAAYABgAGAAYABgAGAAYABgAGAAUg1aDYsGVglhDV0NcQBnDW0NdQ15DWAAYABgAGAAYABgAGAAYABgAGAAYABgAGAAYABgAGAAlQCBDZUAiA2PDZcNMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAnw2nDTAAMAAwADAAMAAwAHUArw23DTAAMAAwADAAMAAwADAAMAAwADAAMAB1AL8NMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAB1AHUAdQB1AHUAdQDHDTAAYABgAM8NMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAA1w11ANwNMAAwAD0B5A0wADAAMAAwADAAMADsDfQN/A0EDgwOFA4wABsOMAAwADAAMAAwADAAMAAwANIG0gbSBtIG0gbSBtIG0gYjDigOwQUuDsEFMw7SBjoO0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIGQg5KDlIOVg7SBtIGXg5lDm0OdQ7SBtIGfQ6EDooOjQ6UDtIGmg6hDtIG0gaoDqwO0ga0DrwO0gZgAGAAYADEDmAAYAAkBtIGzA5gANIOYADaDokO0gbSBt8O5w7SBu8O0gb1DvwO0gZgAGAAxA7SBtIG0gbSBtIGYABgAGAAYAAED2AAsAUMD9IG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIGFA8sBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAccD9IGLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHJA8sBywHLAcsBywHLAccDywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywPLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAc0D9IG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIGLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAccD9IG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIGFA8sBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHPA/SBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gYUD0QPlQCVAJUAMAAwADAAMACVAJUAlQCVAJUAlQCVAEwPMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAA//8EAAQABAAEAAQABAAEAAQABAANAAMAAQABAAIABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQACgATABcAHgAbABoAHgAXABYAEgAeABsAGAAPABgAHABLAEsASwBLAEsASwBLAEsASwBLABgAGAAeAB4AHgATAB4AUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQABYAGwASAB4AHgAeAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAWAA0AEQAeAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArAAQABAAEAAQABAAFAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAJABYAGgAbABsAGwAeAB0AHQAeAE8AFwAeAA0AHgAeABoAGwBPAE8ADgBQAB0AHQAdAE8ATwAXAE8ATwBPABYAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAB0AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAdAFAAUABQAFAAUABQAFAAUAAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAFAAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAeAB4AHgAeAFAATwBAAE8ATwBPAEAATwBQAFAATwBQAB4AHgAeAB4AHgAeAB0AHQAdAB0AHgAdAB4ADgBQAFAAUABQAFAAHgAeAB4AHgAeAB4AHgBQAB4AUAAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4ABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAJAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAkACQAJAAkACQAJAAkABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAeAB4AHgAeAFAAHgAeAB4AKwArAFAAUABQAFAAGABQACsAKwArACsAHgAeAFAAHgBQAFAAUAArAFAAKwAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AKwAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4ABAAEAAQABAAEAAQABAAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAUAAeAB4AHgAeAB4AHgBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAYAA0AKwArAB4AHgAbACsABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQADQAEAB4ABAAEAB4ABAAEABMABAArACsAKwArACsAKwArACsAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAKwArACsAKwBWAFYAVgBWAB4AHgArACsAKwArACsAKwArACsAKwArACsAHgAeAB4AHgAeAB4AHgAeAB4AGgAaABoAGAAYAB4AHgAEAAQABAAEAAQABAAEAAQABAAEAAQAEwAEACsAEwATAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABABLAEsASwBLAEsASwBLAEsASwBLABoAGQAZAB4AUABQAAQAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQABMAUAAEAAQABAAEAAQABAAEAB4AHgAEAAQABAAEAAQABABQAFAABAAEAB4ABAAEAAQABABQAFAASwBLAEsASwBLAEsASwBLAEsASwBQAFAAUAAeAB4AUAAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AKwAeAFAABABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAABAAEAAQABAAEAAQABAAEAAQABAAEAFAAKwArACsAKwArACsAKwArACsAKwArACsAKwArAEsASwBLAEsASwBLAEsASwBLAEsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAABAAEAAQABAAEAAQABAAEAAQAUABQAB4AHgAYABMAUAArACsABAAbABsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAAEAFAABAAEAAQABAAEAFAABAAEAAQAUAAEAAQABAAEAAQAKwArAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAArACsAHgArAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArAFAAUABQAFAAUABQAFAAUABQAFAAKwArACsAKwArACsAKwArACsAKwArAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAB4ABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAQABAAEAFAABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQAUAAEAAQABAAEAAQABAAEAFAAUABQAFAAUABQAFAAUABQAFAABAAEAA0ADQBLAEsASwBLAEsASwBLAEsASwBLAB4AUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAArAFAAUABQAFAAUABQAFAAUAArACsAUABQACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwBQAFAAUABQAFAAUABQACsAUAArACsAKwBQAFAAUABQACsAKwAEAFAABAAEAAQABAAEAAQABAArACsABAAEACsAKwAEAAQABABQACsAKwArACsAKwArACsAKwAEACsAKwArACsAUABQACsAUABQAFAABAAEACsAKwBLAEsASwBLAEsASwBLAEsASwBLAFAAUAAaABoAUABQAFAAUABQAEwAHgAbAFAAHgAEACsAKwAEAAQABAArAFAAUABQAFAAUABQACsAKwArACsAUABQACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwBQAFAAUABQAFAAUABQACsAUABQACsAUABQACsAUABQACsAKwAEACsABAAEAAQABAAEACsAKwArACsABAAEACsAKwAEAAQABAArACsAKwAEACsAKwArACsAKwArACsAUABQAFAAUAArAFAAKwArACsAKwArACsAKwBLAEsASwBLAEsASwBLAEsASwBLAAQABABQAFAAUAAEAB4AKwArACsAKwArACsAKwArACsAKwAEAAQABAArAFAAUABQAFAAUABQAFAAUABQACsAUABQAFAAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwBQAFAAUABQAFAAUABQACsAUABQACsAUABQAFAAUABQACsAKwAEAFAABAAEAAQABAAEAAQABAAEACsABAAEAAQAKwAEAAQABAArACsAUAArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwBQAFAABAAEACsAKwBLAEsASwBLAEsASwBLAEsASwBLAB4AGwArACsAKwArACsAKwArAFAABAAEAAQABAAEAAQAKwAEAAQABAArAFAAUABQAFAAUABQAFAAUAArACsAUABQACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAQABAAEAAQABAArACsABAAEACsAKwAEAAQABAArACsAKwArACsAKwArAAQABAAEACsAKwArACsAUABQACsAUABQAFAABAAEACsAKwBLAEsASwBLAEsASwBLAEsASwBLAB4AUABQAFAAUABQAFAAUAArACsAKwArACsAKwArACsAKwArAAQAUAArAFAAUABQAFAAUABQACsAKwArAFAAUABQACsAUABQAFAAUAArACsAKwBQAFAAKwBQACsAUABQACsAKwArAFAAUAArACsAKwBQAFAAUAArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArAAQABAAEAAQABAArACsAKwAEAAQABAArAAQABAAEAAQAKwArAFAAKwArACsAKwArACsABAArACsAKwArACsAKwArACsAKwArAEsASwBLAEsASwBLAEsASwBLAEsAUABQAFAAHgAeAB4AHgAeAB4AGwAeACsAKwArACsAKwAEAAQABAAEAAQAUABQAFAAUABQAFAAUABQACsAUABQAFAAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwArACsAUAAEAAQABAAEAAQABAAEACsABAAEAAQAKwAEAAQABAAEACsAKwArACsAKwArACsABAAEACsAUABQAFAAKwArACsAKwArAFAAUAAEAAQAKwArAEsASwBLAEsASwBLAEsASwBLAEsAKwArACsAKwArACsAKwAOAFAAUABQAFAAUABQAFAAHgBQAAQABAAEAA4AUABQAFAAUABQAFAAUABQACsAUABQAFAAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArAFAAUABQAFAAUABQAFAAUABQAFAAKwBQAFAAUABQAFAAKwArAAQAUAAEAAQABAAEAAQABAAEACsABAAEAAQAKwAEAAQABAAEACsAKwArACsAKwArACsABAAEACsAKwArACsAKwArACsAUAArAFAAUAAEAAQAKwArAEsASwBLAEsASwBLAEsASwBLAEsAKwBQAFAAKwArACsAKwArACsAKwArACsAKwArACsAKwAEAAQABAAEAFAAUABQAFAAUABQAFAAUABQACsAUABQAFAAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAABAAEAFAABAAEAAQABAAEAAQABAArAAQABAAEACsABAAEAAQABABQAB4AKwArACsAKwBQAFAAUAAEAFAAUABQAFAAUABQAFAAUABQAFAABAAEACsAKwBLAEsASwBLAEsASwBLAEsASwBLAFAAUABQAFAAUABQAFAAUABQABoAUABQAFAAUABQAFAAKwAEAAQABAArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArAFAAUABQAFAAUABQAFAAUABQACsAUAArACsAUABQAFAAUABQAFAAUAArACsAKwAEACsAKwArACsABAAEAAQABAAEAAQAKwAEACsABAAEAAQABAAEAAQABAAEACsAKwArACsAKwArAEsASwBLAEsASwBLAEsASwBLAEsAKwArAAQABAAeACsAKwArACsAKwArACsAKwArACsAKwArAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXAAqAFwAXAAqACoAKgAqACoAKgAqACsAKwArACsAGwBcAFwAXABcAFwAXABcACoAKgAqACoAKgAqACoAKgAeAEsASwBLAEsASwBLAEsASwBLAEsADQANACsAKwArACsAKwBcAFwAKwBcACsAXABcAFwAXABcACsAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcACsAXAArAFwAXABcAFwAXABcAFwAXABcAFwAKgBcAFwAKgAqACoAKgAqACoAKgAqACoAXAArACsAXABcAFwAXABcACsAXAArACoAKgAqACoAKgAqACsAKwBLAEsASwBLAEsASwBLAEsASwBLACsAKwBcAFwAXABcAFAADgAOAA4ADgAeAA4ADgAJAA4ADgANAAkAEwATABMAEwATAAkAHgATAB4AHgAeAAQABAAeAB4AHgAeAB4AHgBLAEsASwBLAEsASwBLAEsASwBLAFAAUABQAFAAUABQAFAAUABQAFAADQAEAB4ABAAeAAQAFgARABYAEQAEAAQAUABQAFAAUABQAFAAUABQACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwArACsAKwAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQADQAEAAQABAAEAAQADQAEAAQAUABQAFAAUABQAAQABAAEAAQABAAEAAQABAAEAAQABAArAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAArAA0ADQAeAB4AHgAeAB4AHgAEAB4AHgAeAB4AHgAeACsAHgAeAA4ADgANAA4AHgAeAB4AHgAeAAkACQArACsAKwArACsAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcACoAKgAqACoAKgAqACoAKgAqACoAKgAqACoAKgAqACoAKgAqACoAKgBcAEsASwBLAEsASwBLAEsASwBLAEsADQANAB4AHgAeAB4AXABcAFwAXABcAFwAKgAqACoAKgBcAFwAXABcACoAKgAqAFwAKgAqACoAXABcACoAKgAqACoAKgAqACoAXABcAFwAKgAqACoAKgBcAFwAXABcAFwAXABcAFwAXABcAFwAXABcACoAKgAqACoAKgAqACoAKgAqACoAKgAqAFwAKgBLAEsASwBLAEsASwBLAEsASwBLACoAKgAqACoAKgAqAFAAUABQAFAAUABQACsAUAArACsAKwArACsAUAArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAHgBQAFAAUABQAFgAWABYAFgAWABYAFgAWABYAFgAWABYAFgAWABYAFgAWABYAFgAWABYAFgAWABYAFgAWABYAFgAWABYAFgAWABZAFkAWQBZAFkAWQBZAFkAWQBZAFkAWQBZAFkAWQBZAFkAWQBZAFkAWQBZAFkAWQBZAFkAWQBZAFkAWQBZAFkAWgBaAFoAWgBaAFoAWgBaAFoAWgBaAFoAWgBaAFoAWgBaAFoAWgBaAFoAWgBaAFoAWgBaAFoAWgBaAFoAWgBaAFAAUABQAFAAUABQAFAAUABQACsAUABQAFAAUAArACsAUABQAFAAUABQAFAAUAArAFAAKwBQAFAAUABQACsAKwBQAFAAUABQAFAAUABQAFAAUAArAFAAUABQAFAAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArAFAAUABQAFAAKwArAFAAUABQAFAAUABQAFAAKwBQACsAUABQAFAAUAArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwBQAFAAUABQACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsABAAEAAQAHgANAB4AHgAeAB4AHgAeAB4AUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAHgAeAB4AHgAeAB4AHgAeAB4AHgArACsAKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwBQAFAAUABQAFAAUAArACsADQBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAHgAeAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAANAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAWABEAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAA0ADQANAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwBQAFAAUABQAAQABAAEACsAKwArACsAKwArACsAKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAANAA0AKwArACsAKwArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAABAAEACsAKwArACsAKwArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwBQAFAAUAArAAQABAArACsAKwArACsAKwArACsAKwArACsAKwBcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAKgAqACoAKgAqACoAKgAqACoAKgAqACoAKgAqACoAKgAqACoAKgAqAA0ADQAVAFwADQAeAA0AGwBcACoAKwArAEsASwBLAEsASwBLAEsASwBLAEsAKwArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAKwAeAB4AEwATAA0ADQAOAB4AEwATAB4ABAAEAAQACQArAEsASwBLAEsASwBLAEsASwBLAEsAKwArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArAFAAUABQAFAAUAAEAAQAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAQAUAArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwAEAAQABAAEAAQABAAEAAQABAAEAAQABAArACsAKwArAAQABAAEAAQABAAEAAQABAAEAAQABAAEACsAKwArACsAHgArACsAKwATABMASwBLAEsASwBLAEsASwBLAEsASwBcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXAArACsAXABcAFwAXABcACsAKwArACsAKwArACsAKwArACsAKwBcAFwAXABcAFwAXABcAFwAXABcAFwAXAArACsAKwArAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcACsAKwArACsAKwArAEsASwBLAEsASwBLAEsASwBLAEsAXAArACsAKwAqACoAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAQABAAEAAQABAArACsAHgAeAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcACoAKgAqACoAKgAqACoAKgAqACoAKwAqACoAKgAqACoAKgAqACoAKgAqACoAKgAqACoAKgAqACoAKgAqACoAKgAqACoAKgAqACoAKgAqACoAKwArAAQASwBLAEsASwBLAEsASwBLAEsASwArACsAKwArACsAKwBLAEsASwBLAEsASwBLAEsASwBLACsAKwArACsAKwArACoAKgAqACoAKgAqACoAXAAqACoAKgAqACoAKgArACsABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsABAAEAAQABAAEAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAQABAAEAAQABABQAFAAUABQAFAAUABQACsAKwArACsASwBLAEsASwBLAEsASwBLAEsASwANAA0AHgANAA0ADQANAB4AHgAeAB4AHgAeAB4AHgAeAB4ABAAEAAQABAAEAAQABAAEAAQAHgAeAB4AHgAeAB4AHgAeAB4AKwArACsABAAEAAQAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAABAAEAAQABAAEAAQABAAEAAQABAAEAAQABABQAFAASwBLAEsASwBLAEsASwBLAEsASwBQAFAAUABQAFAAUABQAFAABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEACsAKwArACsAKwArACsAKwAeAB4AHgAeAFAAUABQAFAABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEACsAKwArAA0ADQANAA0ADQBLAEsASwBLAEsASwBLAEsASwBLACsAKwArAFAAUABQAEsASwBLAEsASwBLAEsASwBLAEsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAA0ADQBQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwBQAFAAUAAeAB4AHgAeAB4AHgAeAB4AKwArACsAKwArACsAKwArAAQABAAEAB4ABAAEAAQABAAEAAQABAAEAAQABAAEAAQABABQAFAAUABQAAQAUABQAFAAUABQAFAABABQAFAABAAEAAQAUAArACsAKwArACsABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEACsABAAEAAQABAAEAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AKwArAFAAUABQAFAAUABQACsAKwBQAFAAUABQAFAAUABQAFAAKwBQACsAUAArAFAAKwAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeACsAKwAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgArAB4AHgAeAB4AHgAeAB4AHgBQAB4AHgAeAFAAUABQACsAHgAeAB4AHgAeAB4AHgAeAB4AHgBQAFAAUABQACsAKwAeAB4AHgAeAB4AHgArAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AKwArAFAAUABQACsAHgAeAB4AHgAeAB4AHgAOAB4AKwANAA0ADQANAA0ADQANAAkADQANAA0ACAAEAAsABAAEAA0ACQANAA0ADAAdAB0AHgAXABcAFgAXABcAFwAWABcAHQAdAB4AHgAUABQAFAANAAEAAQAEAAQABAAEAAQACQAaABoAGgAaABoAGgAaABoAHgAXABcAHQAVABUAHgAeAB4AHgAeAB4AGAAWABEAFQAVABUAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4ADQAeAA0ADQANAA0AHgANAA0ADQAHAB4AHgAeAB4AKwAEAAQABAAEAAQABAAEAAQABAAEAFAAUAArACsATwBQAFAAUABQAFAAHgAeAB4AFgARAE8AUABPAE8ATwBPAFAAUABQAFAAUAAeAB4AHgAWABEAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwArABsAGwAbABsAGwAbABsAGgAbABsAGwAbABsAGwAbABsAGwAbABsAGwAbABsAGgAbABsAGwAbABoAGwAbABoAGwAbABsAGwAbABsAGwAbABsAGwAbABsAGwAbABsAGwAbAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQAHgAeAFAAGgAeAB0AHgBQAB4AGgAeAB4AHgAeAB4AHgAeAB4AHgBPAB4AUAAbAB4AHgBQAFAAUABQAFAAHgAeAB4AHQAdAB4AUAAeAFAAHgBQAB4AUABPAFAAUAAeAB4AHgAeAB4AHgAeAFAAUABQAFAAUAAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAFAAHgBQAFAAUABQAE8ATwBQAFAAUABQAFAATwBQAFAATwBQAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAFAAUABQAFAATwBPAE8ATwBPAE8ATwBPAE8ATwBQAFAAUABQAFAAUABQAFAAUAAeAB4AUABQAFAAUABPAB4AHgArACsAKwArAB0AHQAdAB0AHQAdAB0AHQAdAB0AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB0AHgAdAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAdAB4AHQAdAB4AHgAeAB0AHQAeAB4AHQAeAB4AHgAdAB4AHQAbABsAHgAdAB4AHgAeAB4AHQAeAB4AHQAdAB0AHQAeAB4AHQAeAB0AHgAdAB0AHQAdAB0AHQAeAB0AHgAeAB4AHgAeAB0AHQAdAB0AHgAeAB4AHgAdAB0AHgAeAB4AHgAeAB4AHgAeAB4AHgAdAB4AHgAeAB0AHgAeAB4AHgAeAB0AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAdAB0AHgAeAB0AHQAdAB0AHgAeAB0AHQAeAB4AHQAdAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB0AHQAeAB4AHQAdAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHQAeAB4AHgAdAB4AHgAeAB4AHgAeAB4AHQAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB0AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AFAAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeABYAEQAWABEAHgAeAB4AHgAeAB4AHQAeAB4AHgAeAB4AHgAeACUAJQAeAB4AHgAeAB4AHgAeAB4AHgAWABEAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AJQAlACUAJQAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAFAAHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHgAeAB4AHgAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAeAB4AHQAdAB0AHQAeAB4AHgAeAB4AHgAeAB4AHgAeAB0AHQAeAB0AHQAdAB0AHQAdAB0AHgAeAB4AHgAeAB4AHgAeAB0AHQAeAB4AHQAdAB4AHgAeAB4AHQAdAB4AHgAeAB4AHQAdAB0AHgAeAB0AHgAeAB0AHQAdAB0AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAdAB0AHQAdAB4AHgAeAB4AHgAeAB4AHgAeAB0AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAlACUAJQAlAB4AHQAdAB4AHgAdAB4AHgAeAB4AHQAdAB4AHgAeAB4AJQAlAB0AHQAlAB4AJQAlACUAIAAlACUAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAlACUAJQAeAB4AHgAeAB0AHgAdAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAdAB0AHgAdAB0AHQAeAB0AJQAdAB0AHgAdAB0AHgAdAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeACUAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHQAdAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAlACUAJQAlACUAJQAlACUAJQAlACUAJQAdAB0AHQAdACUAHgAlACUAJQAdACUAJQAdAB0AHQAlACUAHQAdACUAHQAdACUAJQAlAB4AHQAeAB4AHgAeAB0AHQAlAB0AHQAdAB0AHQAdACUAJQAlACUAJQAdACUAJQAgACUAHQAdACUAJQAlACUAJQAlACUAJQAeAB4AHgAlACUAIAAgACAAIAAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB0AHgAeAB4AFwAXABcAFwAXABcAHgATABMAJQAeAB4AHgAWABEAFgARABYAEQAWABEAFgARABYAEQAWABEATwBPAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeABYAEQAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAWABEAFgARABYAEQAWABEAFgARAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AFgARABYAEQAWABEAFgARABYAEQAWABEAFgARABYAEQAWABEAFgARABYAEQAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAWABEAFgARAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AFgARAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAdAB0AHQAdAB0AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgArACsAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AKwAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AUABQAFAAUAAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAEAAQABAAeAB4AKwArACsAKwArABMADQANAA0AUAATAA0AUABQAFAAUABQAFAAUABQACsAKwArACsAKwArACsAUAANACsAKwArACsAKwArACsAKwArACsAKwArACsAKwAEAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArACsAKwBQAFAAUABQAFAAUABQACsAUABQAFAAUABQAFAAUAArAFAAUABQAFAAUABQAFAAKwBQAFAAUABQAFAAUABQACsAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXAA0ADQANAA0ADQANAA0ADQAeAA0AFgANAB4AHgAXABcAHgAeABcAFwAWABEAFgARABYAEQAWABEADQANAA0ADQATAFAADQANAB4ADQANAB4AHgAeAB4AHgAMAAwADQANAA0AHgANAA0AFgANAA0ADQANAA0ADQANAA0AHgANAB4ADQANAB4AHgAeACsAKwArACsAKwArACsAKwArACsAKwArACsAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACsAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAKwArACsAKwArACsAKwArACsAKwArACsAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwAlACUAJQAlACUAJQAlACUAJQAlACUAJQArACsAKwArAA0AEQARACUAJQBHAFcAVwAWABEAFgARABYAEQAWABEAFgARACUAJQAWABEAFgARABYAEQAWABEAFQAWABEAEQAlAFcAVwBXAFcAVwBXAFcAVwBXAAQABAAEAAQABAAEACUAVwBXAFcAVwA2ACUAJQBXAFcAVwBHAEcAJQAlACUAKwBRAFcAUQBXAFEAVwBRAFcAUQBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFEAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBRAFcAUQBXAFEAVwBXAFcAVwBXAFcAUQBXAFcAVwBXAFcAVwBRAFEAKwArAAQABAAVABUARwBHAFcAFQBRAFcAUQBXAFEAVwBRAFcAUQBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFEAVwBRAFcAUQBXAFcAVwBXAFcAVwBRAFcAVwBXAFcAVwBXAFEAUQBXAFcAVwBXABUAUQBHAEcAVwArACsAKwArACsAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAKwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAKwAlACUAVwBXAFcAVwAlACUAJQAlACUAJQAlACUAJQAlACsAKwArACsAKwArACsAKwArACsAKwArAFEAUQBRAFEAUQBRAFEAUQBRAFEAUQBRAFEAUQBRAFEAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQArAFcAVwBXAFcAVwBXAFcAVwBXAFcAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQBPAE8ATwBPAE8ATwBPAE8AJQBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXACUAJQAlAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAEcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAKwArACsAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQArACsAKwArACsAKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAADQATAA0AUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABLAEsASwBLAEsASwBLAEsASwBLAFAAUAArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAFAABAAEAAQABAAeAAQABAAEAAQABAAEAAQABAAEAAQAHgBQAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AUABQAAQABABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAQABAAeAA0ADQANAA0ADQArACsAKwArACsAKwArACsAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAFAAUABQAFAAUABQAFAAUABQAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AUAAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgBQAB4AHgAeAB4AHgAeAFAAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgArACsAHgAeAB4AHgAeAB4AHgAeAB4AKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwAeAB4AUABQAFAAUABQAFAAUABQAFAAUABQAAQAUABQAFAABABQAFAAUABQAAQAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAQABAAEAAQABAAeAB4AHgAeAAQAKwArACsAUABQAFAAUABQAFAAHgAeABoAHgArACsAKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAADgAOABMAEwArACsAKwArACsAKwArACsABAAEAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAQABAAEAAQABAAEACsAKwArACsAKwArACsAKwANAA0ASwBLAEsASwBLAEsASwBLAEsASwArACsAKwArACsAKwAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABABQAFAAUABQAFAAUAAeAB4AHgBQAA4AUABQAAQAUABQAFAAUABQAFAABAAEAAQABAAEAAQABAAEAA0ADQBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQAKwArACsAKwArACsAKwArACsAKwArAB4AWABYAFgAWABYAFgAWABYAFgAWABYAFgAWABYAFgAWABYAFgAWABYAFgAWABYAFgAWABYAFgAWABYACsAKwArAAQAHgAeAB4AHgAeAB4ADQANAA0AHgAeAB4AHgArAFAASwBLAEsASwBLAEsASwBLAEsASwArACsAKwArAB4AHgBcAFwAXABcAFwAKgBcAFwAXABcAFwAXABcAFwAXABcAEsASwBLAEsASwBLAEsASwBLAEsAXABcAFwAXABcACsAUABQAFAAUABQAFAAUABQAFAABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEACsAKwArACsAKwArACsAKwArAFAAUABQAAQAUABQAFAAUABQAFAAUABQAAQABAArACsASwBLAEsASwBLAEsASwBLAEsASwArACsAHgANAA0ADQBcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAKgAqACoAXAAqACoAKgBcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXAAqAFwAKgAqACoAXABcACoAKgBcAFwAXABcAFwAKgAqAFwAKgBcACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArAFwAXABcACoAKgBQAFAAUABQAFAAUABQAFAAUABQAFAABAAEAAQABAAEAA0ADQBQAFAAUAAEAAQAKwArACsAKwArACsAKwArACsAKwBQAFAAUABQAFAAUAArACsAUABQAFAAUABQAFAAKwArAFAAUABQAFAAUABQACsAKwArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAKwBQAFAAUABQAFAAUABQACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAHgAeACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAAEAAQABAAEAAQADQAEAAQAKwArAEsASwBLAEsASwBLAEsASwBLAEsAKwArACsAKwArACsAVABVAFUAVQBVAFUAVQBVAFUAVQBVAFUAVQBVAFUAVQBVAFUAVQBVAFUAVQBVAFUAVQBVAFUAVQBUAFUAVQBVAFUAVQBVAFUAVQBVAFUAVQBVAFUAVQBVAFUAVQBVAFUAVQBVAFUAVQBVAFUAVQBVACsAKwArACsAKwArACsAKwArACsAKwArAFkAWQBZAFkAWQBZAFkAWQBZAFkAWQBZAFkAWQBZAFkAWQBZAFkAKwArACsAKwBaAFoAWgBaAFoAWgBaAFoAWgBaAFoAWgBaAFoAWgBaAFoAWgBaAFoAWgBaAFoAWgBaAFoAWgBaAFoAKwArACsAKwAGAAYABgAGAAYABgAGAAYABgAGAAYABgAGAAYABgAGAAYABgAGAAYABgAGAAYABgAGAAYABgAGAAYABgAGAAYAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXACUAJQBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAJQAlACUAJQAlACUAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArACsAKwArACsAKwBQAFAAUABQAFAAKwArACsAKwArAFYABABWAFYAVgBWAFYAVgBWAFYAVgBWAB4AVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgArAFYAVgBWAFYAVgArAFYAKwBWAFYAKwBWAFYAKwBWAFYAVgBWAFYAVgBWAFYAVgBWAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAEQAWAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUAAaAB4AKwArAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQAGAARABEAGAAYABMAEwAWABEAFAArACsAKwArACsAKwAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEACUAJQAlACUAJQAWABEAFgARABYAEQAWABEAFgARABYAEQAlACUAFgARACUAJQAlACUAJQAlACUAEQAlABEAKwAVABUAEwATACUAFgARABYAEQAWABEAJQAlACUAJQAlACUAJQAlACsAJQAbABoAJQArACsAKwArAFAAUABQAFAAUAArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwArAAcAKwATACUAJQAbABoAJQAlABYAEQAlACUAEQAlABEAJQBXAFcAVwBXAFcAVwBXAFcAVwBXABUAFQAlACUAJQATACUAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXABYAJQARACUAJQAlAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwAWACUAEQAlABYAEQARABYAEQARABUAVwBRAFEAUQBRAFEAUQBRAFEAUQBRAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAEcARwArACsAVwBXAFcAVwBXAFcAKwArAFcAVwBXAFcAVwBXACsAKwBXAFcAVwBXAFcAVwArACsAVwBXAFcAKwArACsAGgAbACUAJQAlABsAGwArAB4AHgAeAB4AHgAeAB4AKwArACsAKwArACsAKwArACsAKwAEAAQABAAQAB0AKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwBQAFAAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsADQANAA0AKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwArAB4AHgAeAB4AHgAeAB4AHgAeAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgBQAFAAHgAeAB4AKwAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAAQAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwAEAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAABAAEAAQABAAEACsAKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArAA0AUABQAFAAUAArACsAKwArAFAAUABQAFAAUABQAFAAUAANAFAAUABQAFAAUAArACsAKwArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwArACsAKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwArACsAKwArACsAKwArACsAKwAeACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAUABQAFAAUABQAFAAKwArAFAAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArAFAAUAArACsAKwBQACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwANAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAeAB4AUABQAFAAUABQAFAAUAArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArAFAAUAArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwArAA0AUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwArACsAKwAeAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwArACsAUABQAFAAUABQAAQABAAEACsABAAEACsAKwArACsAKwAEAAQABAAEAFAAUABQAFAAKwBQAFAAUAArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwArAAQABAAEACsAKwArACsABABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArAA0ADQANAA0ADQANAA0ADQAeACsAKwArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAeAFAAUABQAFAAUABQAFAAUAAeAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAQABAArACsAKwArAFAAUABQAFAAUAANAA0ADQANAA0ADQAUACsAKwArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwArACsADQANAA0ADQANAA0ADQBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArAB4AHgAeAB4AKwArACsAKwArACsAKwArACsAKwArACsAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwArACsAKwArACsAKwArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArAFAAUABQAFAAUABQAAQABAAEAAQAKwArACsAKwArACsAKwArAEsASwBLAEsASwBLAEsASwBLAEsAKwArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUAArAAQABAANACsAKwBQAFAAKwArACsAKwArACsAKwArACsAKwArACsAKwArAFAAUABQAFAAUABQAAQABAAEAAQABAAEAAQABAAEAAQABABQAFAAUABQAB4AHgAeAB4AHgArACsAKwArACsAKwAEAAQABAAEAAQABAAEAA0ADQAeAB4AHgAeAB4AKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAEsASwBLAEsASwBLAEsASwBLAEsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsABABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAQABAAEAAQABAAEAAQABAAEAAQABAAeAB4AHgANAA0ADQANACsAKwArACsAKwArACsAKwArACsAKwAeACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwArACsAKwArACsAKwBLAEsASwBLAEsASwBLAEsASwBLACsAKwArACsAKwArAFAAUABQAFAAUABQAFAABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEACsASwBLAEsASwBLAEsASwBLAEsASwANAA0ADQANAFAABAAEAFAAKwArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAABAAeAA4AUAArACsAKwArACsAKwArACsAKwAEAFAAUABQAFAADQANAB4ADQAEAAQABAAEAB4ABAAEAEsASwBLAEsASwBLAEsASwBLAEsAUAAOAFAADQANAA0AKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwArACsAKwArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAAEAAQABAAEAAQABAAEAAQABAANAA0AHgANAA0AHgAEACsAUABQAFAAUABQAFAAUAArAFAAKwBQAFAAUABQACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwBQAFAAUABQAFAAUABQAFAAUABQAA0AKwArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAAEAAQABAAEAAQABAAEAAQAKwArACsAKwArAEsASwBLAEsASwBLAEsASwBLAEsAKwArACsAKwArACsABAAEAAQABAArAFAAUABQAFAAUABQAFAAUAArACsAUABQACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwBQAFAAUABQAFAAUABQACsAUABQACsAUABQAFAAUABQACsABAAEAFAABAAEAAQABAAEAAQABAArACsABAAEACsAKwAEAAQABAArACsAUAArACsAKwArACsAKwAEACsAKwArACsAKwBQAFAAUABQAFAABAAEACsAKwAEAAQABAAEAAQABAAEACsAKwArAAQABAAEAAQABAArACsAKwArACsAKwArACsAKwArACsABAAEAAQABAAEAAQABABQAFAAUABQAA0ADQANAA0AHgBLAEsASwBLAEsASwBLAEsASwBLAA0ADQArAB4ABABQAFAAUAArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwAEAAQABAAEAFAAUAAeAFAAKwArACsAKwArACsAKwArAEsASwBLAEsASwBLAEsASwBLAEsAKwArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAABAAEAAQABAAEAAQABAArACsABAAEAAQABAAEAAQABAAEAAQADgANAA0AEwATAB4AHgAeAA0ADQANAA0ADQANAA0ADQANAA0ADQANAA0ADQANAFAAUABQAFAABAAEACsAKwAEAA0ADQAeAFAAKwArACsAKwArACsAKwArACsAKwArAEsASwBLAEsASwBLAEsASwBLAEsAKwArACsAKwArACsADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAFAAKwArACsAKwArACsAKwBLAEsASwBLAEsASwBLAEsASwBLACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAKwArACoAKgAqACoAKgAqACoAKgAqACoAKgAqACoAKgAqACsAKwArACsASwBLAEsASwBLAEsASwBLAEsASwBcAFwADQANAA0AKgBQAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAeACsAKwArACsASwBLAEsASwBLAEsASwBLAEsASwBQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArACsAKwArACsAKwBQAFAAUABQAFAAUABQAFAAKwArAFAAKwArAFAAUABQAFAAUABQAFAAUAArAFAAUAArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAABAAEAAQABAAEAAQAKwAEAAQAKwArAAQABAAEAAQAUAAEAFAABAAEAA0ADQANACsAKwArACsAKwArACsAKwArAEsASwBLAEsASwBLAEsASwBLAEsAKwArACsAKwArACsAUABQAFAAUABQAFAAUABQACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAABAAEAAQABAAEAAQABAArACsABAAEAAQABAAEAAQABABQAA4AUAAEACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArAFAABAAEAAQABAAEAAQABAAEAAQABABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAAEAAQABAAEAFAABAAEAAQABAAOAB4ADQANAA0ADQAOAB4ABAArACsAKwArACsAKwArACsAUAAEAAQABAAEAAQABAAEAAQABAAEAAQAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAA0ADQANAFAADgAOAA4ADQANACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwBQAFAAUABQAFAAUABQAFAAUAArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAABAAEAAQABAAEAAQABAAEACsABAAEAAQABAAEAAQABAAEAFAADQANAA0ADQANACsAKwArACsAKwArACsAKwArACsASwBLAEsASwBLAEsASwBLAEsASwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwAOABMAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwArAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAArAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAArACsAKwArACsAKwArACsAKwBQAFAAUABQAFAAUABQACsAUABQACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAAEAAQABAArACsAKwAEACsABAAEACsABAAEAAQABAAEAAQABABQAAQAKwArACsAKwArACsAKwArAEsASwBLAEsASwBLAEsASwBLAEsAKwArACsAKwArACsAUABQAFAAUABQAFAAKwBQAFAAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAAEAAQAKwAEAAQAKwAEAAQABAAEAAQAUAArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAABAAEAAQABAAeAB4AKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwBQACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAB4AHgAeAB4AHgAeAB4AHgAaABoAGgAaAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgArACsAKwArACsAKwArACsAKwArACsAKwArAA0AUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsADQANAA0ADQANACsAKwArACsAKwArACsAKwArACsAKwBQAFAAUABQACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAASABIAEgAQwBDAEMAUABQAFAAUABDAFAAUABQAEgAQwBIAEMAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAASABDAEMAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwAJAAkACQAJAAkACQAJABYAEQArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABIAEMAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArAEsASwBLAEsASwBLAEsASwBLAEsAKwArACsAKwANAA0AKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwArAAQABAAEAAQABAANACsAKwArACsAKwArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAAEAAQABAAEAA0ADQANAB4AHgAeAB4AHgAeAFAAUABQAFAADQAeACsAKwArACsAKwArACsAKwArACsASwBLAEsASwBLAEsASwBLAEsASwArAFAAUABQAFAAUABQAFAAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAANAA0AHgAeACsAKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAKwArACsAKwAEAFAABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQAKwArACsAKwArACsAKwAEAAQABAAEAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAARwBHABUARwAJACsAKwArACsAKwArACsAKwArACsAKwAEAAQAKwArACsAKwArACsAKwArACsAKwArACsAKwArAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXACsAKwArACsAKwArACsAKwBXAFcAVwBXAFcAVwBXAFcAVwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAUQBRAFEAKwArACsAKwArACsAKwArACsAKwArACsAKwBRAFEAUQBRACsAKwArACsAKwArACsAKwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUAArACsAHgAEAAQADQAEAAQABAAEACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgArACsAKwArACsAKwArACsAKwArAB4AHgAeAB4AHgAeAB4AKwArAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAAQABAAEAAQABAAeAB4AHgAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAB4AHgAEAAQABAAEAAQABAAEAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4ABAAEAAQABAAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4ABAAEAAQAHgArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwArACsAKwArACsAKwArACsAKwArAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgArACsAKwArACsAKwArACsAKwAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgArAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AKwBQAFAAKwArAFAAKwArAFAAUAArACsAUABQAFAAUAArAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeACsAUAArAFAAUABQAFAAUABQAFAAKwAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AKwBQAFAAUABQACsAKwBQAFAAUABQAFAAUABQAFAAKwBQAFAAUABQAFAAUABQACsAHgAeAFAAUABQAFAAUAArAFAAKwArACsAUABQAFAAUABQAFAAUAArAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAHgBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgBQAFAAUABQAFAAUABQAFAAUABQAFAAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAB4AHgAeAB4AHgAeAB4AHgAeACsAKwBLAEsASwBLAEsASwBLAEsASwBLAEsASwBLAEsASwBLAEsASwBLAEsASwBLAEsASwBLAEsASwBLAEsASwBLAEsASwBLAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAeAB4AHgAeAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAeAB4AHgAeAB4AHgAeAB4ABAAeAB4AHgAeAB4AHgAeAB4AHgAeAAQAHgAeAA0ADQANAA0AHgArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwAEAAQABAAEAAQAKwAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArAAQABAAEAAQABAAEAAQAKwAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQAKwArAAQABAAEAAQABAAEAAQAKwAEAAQAKwAEAAQABAAEAAQAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwAEAAQABAAEAAQABAAEAFAAUABQAFAAUABQAFAAKwArAEsASwBLAEsASwBLAEsASwBLAEsAKwArACsAKwBQAB4AKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAAEAEsASwBLAEsASwBLAEsASwBLAEsAKwArACsAKwArABsAUABQAFAAUABQACsAKwBQAFAAUABQAFAAUABQAFAAUAAEAAQABAAEAAQABAAEACsAKwArACsAKwArACsAKwArAB4AHgAeAB4ABAAEAAQABAAEAAQABABQACsAKwArACsASwBLAEsASwBLAEsASwBLAEsASwArACsAKwArABYAFgArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAGgBQAFAAUAAaAFAAUABQAFAAKwArACsAKwArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAeAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwBQAFAAUABQACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwBQAFAAKwBQACsAKwBQACsAUABQAFAAUABQAFAAUABQAFAAUAArAFAAUABQAFAAKwBQACsAUAArACsAKwArACsAKwBQACsAKwArACsAUAArAFAAKwBQACsAUABQAFAAKwBQAFAAKwBQACsAKwBQACsAUAArAFAAKwBQACsAUAArAFAAUAArAFAAKwArAFAAUABQAFAAKwBQAFAAUABQAFAAUABQACsAUABQAFAAUAArAFAAUABQAFAAKwBQACsAUABQAFAAUABQAFAAUABQAFAAUAArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAUABQAFAAKwBQAFAAUABQAFAAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwAeAB4AKwArACsAKwArACsAKwArACsAKwArACsAKwArAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAE8AJQAlACUAHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHgAeAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB4AHgAeACUAJQAlAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQApACkAKQApACkAKQApACkAKQApACkAKQApACkAKQApACkAKQApACkAKQApACkAKQApACkAJQAlACUAJQAlACAAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAeAB4AJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlAB4AHgAlACUAJQAlACUAHgAlACUAJQAlACUAIAAgACAAJQAlACAAJQAlACAAIAAgACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACEAIQAhACEAIQAlACUAIAAgACUAJQAgACAAIAAgACAAIAAgACAAIAAgACAAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAJQAlACUAIAAlACUAJQAlACAAIAAgACUAIAAgACAAJQAlACUAJQAlACUAJQAgACUAIAAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAHgAlAB4AJQAeACUAJQAlACUAJQAgACUAJQAlACUAHgAlAB4AHgAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlAB4AHgAeAB4AHgAeAB4AJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAeAB4AHgAeAB4AHgAeAB4AHgAeACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACAAIAAlACUAJQAlACAAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACAAJQAlACUAJQAgACAAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAHgAeAB4AHgAeAB4AHgAeACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAeAB4AHgAeAB4AHgAlACUAJQAlACUAJQAlACAAIAAgACUAJQAlACAAIAAgACAAIAAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeABcAFwAXABUAFQAVAB4AHgAeAB4AJQAlACUAIAAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACAAIAAgACUAJQAlACUAJQAlACUAJQAlACAAJQAlACUAJQAlACUAJQAlACUAJQAlACAAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AJQAlACUAJQAlACUAJQAlACUAJQAlACUAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AJQAlACUAJQAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeACUAJQAlACUAJQAlACUAJQAeAB4AHgAeAB4AHgAeAB4AHgAeACUAJQAlACUAJQAlAB4AHgAeAB4AHgAeAB4AHgAlACUAJQAlACUAJQAlACUAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAgACUAJQAgACUAJQAlACUAJQAlACUAJQAgACAAIAAgACAAIAAgACAAJQAlACUAJQAlACUAIAAlACUAJQAlACUAJQAlACUAJQAgACAAIAAgACAAIAAgACAAIAAgACUAJQAgACAAIAAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAgACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACAAIAAlACAAIAAlACAAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAgACAAIAAlACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAJQAlAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AKwAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArAEsASwBLAEsASwBLAEsASwBLAEsAKwArACsAKwArACsAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAKwArAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXACUAJQBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwAlACUAJQAlACUAJQAlACUAJQAlACUAVwBXACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAKwAEACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArAA==",ot=50,bn=1,nB=2,sB=3,xn=4,Tn=5,Qt=7,aB=8,gt=9,FA=10,Dr=11,wt=12,br=13,Sn=14,$A=15,xr=16,Fe=17,WA=18,On=19,ct=20,Tr=21,ZA=22,Qr=23,SA=24,j=25,Ae=26,ee=27,OA=28,Mn=29,LA=30,Gn=31,he=32,de=33,Sr=34,Or=35,Mr=36,Qe=37,Gr=38,Ge=39,Re=40,gr=41,iB=42,Rn=43,Vn=[9001,65288],oB="!",I="×",Ee="÷",Rr=Kn(Dn),QA=[LA,Mr],Vr=[bn,nB,sB,Tn],QB=[FA,aB],ut=[ee,Ae],Nn=Vr.concat(QB),lt=[Gr,Ge,Re,Sr,Or],Xn=[$A,br],_n=function(e,A){A===void 0&&(A="strict");var t=[],r=[],B=[];return e.forEach(function(n,s){var i=Rr.get(n);if(i>ot?(B.push(!0),i-=ot):B.push(!1),["normal","auto","loose"].indexOf(A)!==-1&&[8208,8211,12316,12448].indexOf(n)!==-1)return r.push(s),t.push(xr);if(i===xn||i===Dr){if(s===0)return r.push(s),t.push(LA);var a=t[s-1];return Nn.indexOf(a)===-1?(r.push(r[s-1]),t.push(a)):(r.push(s),t.push(LA))}if(r.push(s),i===Gn)return t.push(A==="strict"?Tr:Qe);if(i===iB||i===Mn)return t.push(LA);if(i===Rn)return n>=131072&&n<=196605||n>=196608&&n<=262141?t.push(Qe):t.push(LA);t.push(i)}),[r,t,B]},wr=function(e,A,t,r){var B=r[t];if(Array.isArray(e)?e.indexOf(B)!==-1:e===B)for(var n=t;n<=r.length;){n++;var s=r[n];if(s===A)return!0;if(s!==FA)break}if(B===FA)for(var n=t;n>0;){n--;var i=r[n];if(Array.isArray(e)?e.indexOf(i)!==-1:e===i)for(var a=t;a<=r.length;){a++;var s=r[a];if(s===A)return!0;if(s!==FA)break}if(i!==FA)break}return!1},Ct=function(e,A){for(var t=e;t>=0;){var r=A[t];if(r===FA)t--;else return r}return 0},Pn=function(e,A,t,r,B){if(t[r]===0)return I;var n=r-1;if(Array.isArray(B)&&B[n]===!0)return I;var s=n-1,i=n+1,a=A[n],o=s>=0?A[s]:0,Q=A[i];if(a===nB&&Q===sB)return I;if(Vr.indexOf(a)!==-1)return oB;if(Vr.indexOf(Q)!==-1||QB.indexOf(Q)!==-1)return I;if(Ct(n,A)===aB)return Ee;if(Rr.get(e[n])===Dr||(a===he||a===de)&&Rr.get(e[i])===Dr||a===Qt||Q===Qt||a===gt||[FA,br,$A].indexOf(a)===-1&&Q===gt||[Fe,WA,On,SA,OA].indexOf(Q)!==-1||Ct(n,A)===ZA||wr(Qr,ZA,n,A)||wr([Fe,WA],Tr,n,A)||wr(wt,wt,n,A))return I;if(a===FA)return Ee;if(a===Qr||Q===Qr)return I;if(Q===xr||a===xr)return Ee;if([br,$A,Tr].indexOf(Q)!==-1||a===Sn||o===Mr&&Xn.indexOf(a)!==-1||a===OA&&Q===Mr||Q===ct||QA.indexOf(Q)!==-1&&a===j||QA.indexOf(a)!==-1&&Q===j||a===ee&&[Qe,he,de].indexOf(Q)!==-1||[Qe,he,de].indexOf(a)!==-1&&Q===Ae||QA.indexOf(a)!==-1&&ut.indexOf(Q)!==-1||ut.indexOf(a)!==-1&&QA.indexOf(Q)!==-1||[ee,Ae].indexOf(a)!==-1&&(Q===j||[ZA,$A].indexOf(Q)!==-1&&A[i+1]===j)||[ZA,$A].indexOf(a)!==-1&&Q===j||a===j&&[j,OA,SA].indexOf(Q)!==-1)return I;if([j,OA,SA,Fe,WA].indexOf(Q)!==-1)for(var g=n;g>=0;){var w=A[g];if(w===j)return I;if([OA,SA].indexOf(w)!==-1)g--;else break}if([ee,Ae].indexOf(Q)!==-1)for(var g=[Fe,WA].indexOf(a)!==-1?s:n;g>=0;){var w=A[g];if(w===j)return I;if([OA,SA].indexOf(w)!==-1)g--;else break}if(Gr===a&&[Gr,Ge,Sr,Or].indexOf(Q)!==-1||[Ge,Sr].indexOf(a)!==-1&&[Ge,Re].indexOf(Q)!==-1||[Re,Or].indexOf(a)!==-1&&Q===Re||lt.indexOf(a)!==-1&&[ct,Ae].indexOf(Q)!==-1||lt.indexOf(Q)!==-1&&a===ee||QA.indexOf(a)!==-1&&QA.indexOf(Q)!==-1||a===SA&&QA.indexOf(Q)!==-1||QA.concat(j).indexOf(a)!==-1&&Q===ZA&&Vn.indexOf(e[i])===-1||QA.concat(j).indexOf(Q)!==-1&&a===WA)return I;if(a===gr&&Q===gr){for(var f=t[n],c=1;f>0&&(f--,A[f]===gr);)c++;if(c%2!==0)return I}return a===he&&Q===de?I:Ee},Jn=function(e,A){A||(A={lineBreak:"normal",wordBreak:"normal"});var t=_n(e,A.lineBreak),r=t[0],B=t[1],n=t[2];(A.wordBreak==="break-all"||A.wordBreak==="break-word")&&(B=B.map(function(i){return[j,LA,iB].indexOf(i)!==-1?Qe:i}));var s=A.wordBreak==="keep-all"?n.map(function(i,a){return i&&e[a]>=19968&&e[a]<=40959}):void 0;return[r,B,s]},kn=function(){function e(A,t,r,B){this.codePoints=A,this.required=t===oB,this.start=r,this.end=B}return e.prototype.slice=function(){return S.apply(void 0,this.codePoints.slice(this.start,this.end))},e}(),Yn=function(e,A){var t=$e(e),r=Jn(t,A),B=r[0],n=r[1],s=r[2],i=t.length,a=0,o=0;return{next:function(){if(o>=i)return{done:!0,value:null};for(var Q=I;o<i&&(Q=Pn(t,n,B,++o,s))===I;);if(Q!==I||o===i){var g=new kn(t,Q,a,o);return a=o,{value:g,done:!1}}return{done:!0,value:null}}}},Wn=1<<0,Zn=1<<1,ce=1<<2,ft=1<<3,Xe=10,Ut=47,ne=92,qn=9,jn=32,He=34,qA=61,zn=35,$n=36,As=37,pe=39,Ie=40,jA=41,es=95,Z=45,rs=33,ts=60,Bs=62,ns=64,ss=91,as=93,is=61,os=123,ve=63,Qs=125,Ft=124,gs=126,ws=128,ht=65533,cr=42,DA=43,cs=44,us=58,ls=59,ge=46,Cs=0,fs=8,Us=11,Fs=14,hs=31,ds=127,nA=-1,gB=48,wB=97,cB=101,Es=102,Hs=117,ps=122,uB=65,lB=69,CB=70,Is=85,vs=90,P=function(e){return e>=gB&&e<=57},ys=function(e){return e>=55296&&e<=57343},MA=function(e){return P(e)||e>=uB&&e<=CB||e>=wB&&e<=Es},Ks=function(e){return e>=wB&&e<=ps},ms=function(e){return e>=uB&&e<=vs},Ls=function(e){return Ks(e)||ms(e)},Ds=function(e){return e>=ws},ye=function(e){return e===Xe||e===qn||e===jn},_e=function(e){return Ls(e)||Ds(e)||e===es},dt=function(e){return _e(e)||P(e)||e===Z},bs=function(e){return e>=Cs&&e<=fs||e===Us||e>=Fs&&e<=hs||e===ds},UA=function(e,A){return e!==ne?!1:A!==Xe},Ke=function(e,A,t){return e===Z?_e(A)||UA(A,t):_e(e)?!0:!!(e===ne&&UA(e,A))},ur=function(e,A,t){return e===DA||e===Z?P(A)?!0:A===ge&&P(t):P(e===ge?A:e)},xs=function(e){var A=0,t=1;(e[A]===DA||e[A]===Z)&&(e[A]===Z&&(t=-1),A++);for(var r=[];P(e[A]);)r.push(e[A++]);var B=r.length?parseInt(S.apply(void 0,r),10):0;e[A]===ge&&A++;for(var n=[];P(e[A]);)n.push(e[A++]);var s=n.length,i=s?parseInt(S.apply(void 0,n),10):0;(e[A]===lB||e[A]===cB)&&A++;var a=1;(e[A]===DA||e[A]===Z)&&(e[A]===Z&&(a=-1),A++);for(var o=[];P(e[A]);)o.push(e[A++]);var Q=o.length?parseInt(S.apply(void 0,o),10):0;return t*(B+i*Math.pow(10,-s))*Math.pow(10,a*Q)},Ts={type:2},Ss={type:3},Os={type:4},Ms={type:13},Gs={type:8},Rs={type:21},Vs={type:9},Ns={type:10},Xs={type:11},_s={type:12},Ps={type:14},me={type:23},Js={type:1},ks={type:25},Ys={type:24},Ws={type:26},Zs={type:27},qs={type:28},js={type:29},zs={type:31},Nr={type:32},fB=function(){function e(){this._value=[]}return e.prototype.write=function(A){this._value=this._value.concat($e(A))},e.prototype.read=function(){for(var A=[],t=this.consumeToken();t!==Nr;)A.push(t),t=this.consumeToken();return A},e.prototype.consumeToken=function(){var A=this.consumeCodePoint();switch(A){case He:return this.consumeStringToken(He);case zn:var t=this.peekCodePoint(0),r=this.peekCodePoint(1),B=this.peekCodePoint(2);if(dt(t)||UA(r,B)){var n=Ke(t,r,B)?Zn:Wn,s=this.consumeName();return{type:5,value:s,flags:n}}break;case $n:if(this.peekCodePoint(0)===qA)return this.consumeCodePoint(),Ms;break;case pe:return this.consumeStringToken(pe);case Ie:return Ts;case jA:return Ss;case cr:if(this.peekCodePoint(0)===qA)return this.consumeCodePoint(),Ps;break;case DA:if(ur(A,this.peekCodePoint(0),this.peekCodePoint(1)))return this.reconsumeCodePoint(A),this.consumeNumericToken();break;case cs:return Os;case Z:var i=A,a=this.peekCodePoint(0),o=this.peekCodePoint(1);if(ur(i,a,o))return this.reconsumeCodePoint(A),this.consumeNumericToken();if(Ke(i,a,o))return this.reconsumeCodePoint(A),this.consumeIdentLikeToken();if(a===Z&&o===Bs)return this.consumeCodePoint(),this.consumeCodePoint(),Ys;break;case ge:if(ur(A,this.peekCodePoint(0),this.peekCodePoint(1)))return this.reconsumeCodePoint(A),this.consumeNumericToken();break;case Ut:if(this.peekCodePoint(0)===cr)for(this.consumeCodePoint();;){var Q=this.consumeCodePoint();if(Q===cr&&(Q=this.consumeCodePoint(),Q===Ut))return this.consumeToken();if(Q===nA)return this.consumeToken()}break;case us:return Ws;case ls:return Zs;case ts:if(this.peekCodePoint(0)===rs&&this.peekCodePoint(1)===Z&&this.peekCodePoint(2)===Z)return this.consumeCodePoint(),this.consumeCodePoint(),ks;break;case ns:var g=this.peekCodePoint(0),w=this.peekCodePoint(1),f=this.peekCodePoint(2);if(Ke(g,w,f)){var s=this.consumeName();return{type:7,value:s}}break;case ss:return qs;case ne:if(UA(A,this.peekCodePoint(0)))return this.reconsumeCodePoint(A),this.consumeIdentLikeToken();break;case as:return js;case is:if(this.peekCodePoint(0)===qA)return this.consumeCodePoint(),Gs;break;case os:return Xs;case Qs:return _s;case Hs:case Is:var c=this.peekCodePoint(0),u=this.peekCodePoint(1);return c===DA&&(MA(u)||u===ve)&&(this.consumeCodePoint(),this.consumeUnicodeRangeToken()),this.reconsumeCodePoint(A),this.consumeIdentLikeToken();case Ft:if(this.peekCodePoint(0)===qA)return this.consumeCodePoint(),Vs;if(this.peekCodePoint(0)===Ft)return this.consumeCodePoint(),Rs;break;case gs:if(this.peekCodePoint(0)===qA)return this.consumeCodePoint(),Ns;break;case nA:return Nr}return ye(A)?(this.consumeWhiteSpace(),zs):P(A)?(this.reconsumeCodePoint(A),this.consumeNumericToken()):_e(A)?(this.reconsumeCodePoint(A),this.consumeIdentLikeToken()):{type:6,value:S(A)}},e.prototype.consumeCodePoint=function(){var A=this._value.shift();return typeof A>"u"?-1:A},e.prototype.reconsumeCodePoint=function(A){this._value.unshift(A)},e.prototype.peekCodePoint=function(A){return A>=this._value.length?-1:this._value[A]},e.prototype.consumeUnicodeRangeToken=function(){for(var A=[],t=this.consumeCodePoint();MA(t)&&A.length<6;)A.push(t),t=this.consumeCodePoint();for(var r=!1;t===ve&&A.length<6;)A.push(t),t=this.consumeCodePoint(),r=!0;if(r){var B=parseInt(S.apply(void 0,A.map(function(a){return a===ve?gB:a})),16),n=parseInt(S.apply(void 0,A.map(function(a){return a===ve?CB:a})),16);return{type:30,start:B,end:n}}var s=parseInt(S.apply(void 0,A),16);if(this.peekCodePoint(0)===Z&&MA(this.peekCodePoint(1))){this.consumeCodePoint(),t=this.consumeCodePoint();for(var i=[];MA(t)&&i.length<6;)i.push(t),t=this.consumeCodePoint();var n=parseInt(S.apply(void 0,i),16);return{type:30,start:s,end:n}}else return{type:30,start:s,end:s}},e.prototype.consumeIdentLikeToken=function(){var A=this.consumeName();return A.toLowerCase()==="url"&&this.peekCodePoint(0)===Ie?(this.consumeCodePoint(),this.consumeUrlToken()):this.peekCodePoint(0)===Ie?(this.consumeCodePoint(),{type:19,value:A}):{type:20,value:A}},e.prototype.consumeUrlToken=function(){var A=[];if(this.consumeWhiteSpace(),this.peekCodePoint(0)===nA)return{type:22,value:""};var t=this.peekCodePoint(0);if(t===pe||t===He){var r=this.consumeStringToken(this.consumeCodePoint());return r.type===0&&(this.consumeWhiteSpace(),this.peekCodePoint(0)===nA||this.peekCodePoint(0)===jA)?(this.consumeCodePoint(),{type:22,value:r.value}):(this.consumeBadUrlRemnants(),me)}for(;;){var B=this.consumeCodePoint();if(B===nA||B===jA)return{type:22,value:S.apply(void 0,A)};if(ye(B))return this.consumeWhiteSpace(),this.peekCodePoint(0)===nA||this.peekCodePoint(0)===jA?(this.consumeCodePoint(),{type:22,value:S.apply(void 0,A)}):(this.consumeBadUrlRemnants(),me);if(B===He||B===pe||B===Ie||bs(B))return this.consumeBadUrlRemnants(),me;if(B===ne)if(UA(B,this.peekCodePoint(0)))A.push(this.consumeEscapedCodePoint());else return this.consumeBadUrlRemnants(),me;else A.push(B)}},e.prototype.consumeWhiteSpace=function(){for(;ye(this.peekCodePoint(0));)this.consumeCodePoint()},e.prototype.consumeBadUrlRemnants=function(){for(;;){var A=this.consumeCodePoint();if(A===jA||A===nA)return;UA(A,this.peekCodePoint(0))&&this.consumeEscapedCodePoint()}},e.prototype.consumeStringSlice=function(A){for(var t=5e4,r="";A>0;){var B=Math.min(t,A);r+=S.apply(void 0,this._value.splice(0,B)),A-=B}return this._value.shift(),r},e.prototype.consumeStringToken=function(A){var t="",r=0;do{var B=this._value[r];if(B===nA||B===void 0||B===A)return t+=this.consumeStringSlice(r),{type:0,value:t};if(B===Xe)return this._value.splice(0,r),Js;if(B===ne){var n=this._value[r+1];n!==nA&&n!==void 0&&(n===Xe?(t+=this.consumeStringSlice(r),r=-1,this._value.shift()):UA(B,n)&&(t+=this.consumeStringSlice(r),t+=S(this.consumeEscapedCodePoint()),r=-1))}r++}while(!0)},e.prototype.consumeNumber=function(){var A=[],t=ce,r=this.peekCodePoint(0);for((r===DA||r===Z)&&A.push(this.consumeCodePoint());P(this.peekCodePoint(0));)A.push(this.consumeCodePoint());r=this.peekCodePoint(0);var B=this.peekCodePoint(1);if(r===ge&&P(B))for(A.push(this.consumeCodePoint(),this.consumeCodePoint()),t=ft;P(this.peekCodePoint(0));)A.push(this.consumeCodePoint());r=this.peekCodePoint(0),B=this.peekCodePoint(1);var n=this.peekCodePoint(2);if((r===lB||r===cB)&&((B===DA||B===Z)&&P(n)||P(B)))for(A.push(this.consumeCodePoint(),this.consumeCodePoint()),t=ft;P(this.peekCodePoint(0));)A.push(this.consumeCodePoint());return[xs(A),t]},e.prototype.consumeNumericToken=function(){var A=this.consumeNumber(),t=A[0],r=A[1],B=this.peekCodePoint(0),n=this.peekCodePoint(1),s=this.peekCodePoint(2);if(Ke(B,n,s)){var i=this.consumeName();return{type:15,number:t,flags:r,unit:i}}return B===As?(this.consumeCodePoint(),{type:16,number:t,flags:r}):{type:17,number:t,flags:r}},e.prototype.consumeEscapedCodePoint=function(){var A=this.consumeCodePoint();if(MA(A)){for(var t=S(A);MA(this.peekCodePoint(0))&&t.length<6;)t+=S(this.consumeCodePoint());ye(this.peekCodePoint(0))&&this.consumeCodePoint();var r=parseInt(t,16);return r===0||ys(r)||r>1114111?ht:r}return A===nA?ht:A},e.prototype.consumeName=function(){for(var A="";;){var t=this.consumeCodePoint();if(dt(t))A+=S(t);else if(UA(t,this.peekCodePoint(0)))A+=S(this.consumeEscapedCodePoint());else return this.reconsumeCodePoint(t),A}},e}(),UB=function(){function e(A){this._tokens=A}return e.create=function(A){var t=new fB;return t.write(A),new e(t.read())},e.parseValue=function(A){return e.create(A).parseComponentValue()},e.parseValues=function(A){return e.create(A).parseComponentValues()},e.prototype.parseComponentValue=function(){for(var A=this.consumeToken();A.type===31;)A=this.consumeToken();if(A.type===32)throw new SyntaxError("Error parsing CSS component value, unexpected EOF");this.reconsumeToken(A);var t=this.consumeComponentValue();do A=this.consumeToken();while(A.type===31);if(A.type===32)return t;throw new SyntaxError("Error parsing CSS component value, multiple values found when expecting only one")},e.prototype.parseComponentValues=function(){for(var A=[];;){var t=this.consumeComponentValue();if(t.type===32)return A;A.push(t),A.push()}},e.prototype.consumeComponentValue=function(){var A=this.consumeToken();switch(A.type){case 11:case 28:case 2:return this.consumeSimpleBlock(A.type);case 19:return this.consumeFunction(A)}return A},e.prototype.consumeSimpleBlock=function(A){for(var t={type:A,values:[]},r=this.consumeToken();;){if(r.type===32||Aa(r,A))return t;this.reconsumeToken(r),t.values.push(this.consumeComponentValue()),r=this.consumeToken()}},e.prototype.consumeFunction=function(A){for(var t={name:A.value,values:[],type:18};;){var r=this.consumeToken();if(r.type===32||r.type===3)return t;this.reconsumeToken(r),t.values.push(this.consumeComponentValue())}},e.prototype.consumeToken=function(){var A=this._tokens.shift();return typeof A>"u"?Nr:A},e.prototype.reconsumeToken=function(A){this._tokens.unshift(A)},e}(),ue=function(e){return e.type===15},kA=function(e){return e.type===17},D=function(e){return e.type===20},$s=function(e){return e.type===0},Xr=function(e,A){return D(e)&&e.value===A},FB=function(e){return e.type!==31},JA=function(e){return e.type!==31&&e.type!==4},sA=function(e){var A=[],t=[];return e.forEach(function(r){if(r.type===4){if(t.length===0)throw new Error("Error parsing function args, zero tokens for arg");A.push(t),t=[];return}r.type!==31&&t.push(r)}),t.length&&A.push(t),A},Aa=function(e,A){return A===11&&e.type===12||A===28&&e.type===29?!0:A===2&&e.type===3},pA=function(e){return e.type===17||e.type===15},M=function(e){return e.type===16||pA(e)},hB=function(e){return e.length>1?[e[0],e[1]]:[e[0]]},X={type:17,number:0,flags:ce},$r={type:16,number:50,flags:ce},hA={type:16,number:100,flags:ce},re=function(e,A,t){var r=e[0],B=e[1];return[b(r,A),b(typeof B<"u"?B:r,t)]},b=function(e,A){if(e.type===16)return e.number/100*A;if(ue(e))switch(e.unit){case"rem":case"em":return 16*e.number;case"px":default:return e.number}return e.number},dB="deg",EB="grad",HB="rad",pB="turn",Ar={name:"angle",parse:function(e,A){if(A.type===15)switch(A.unit){case dB:return Math.PI*A.number/180;case EB:return Math.PI/200*A.number;case HB:return A.number;case pB:return Math.PI*2*A.number}throw new Error("Unsupported angle type")}},IB=function(e){return e.type===15&&(e.unit===dB||e.unit===EB||e.unit===HB||e.unit===pB)},vB=function(e){var A=e.filter(D).map(function(t){return t.value}).join(" ");switch(A){case"to bottom right":case"to right bottom":case"left top":case"top left":return[X,X];case"to top":case"bottom":return AA(0);case"to bottom left":case"to left bottom":case"right top":case"top right":return[X,hA];case"to right":case"left":return AA(90);case"to top left":case"to left top":case"right bottom":case"bottom right":return[hA,hA];case"to bottom":case"top":return AA(180);case"to top right":case"to right top":case"left bottom":case"bottom left":return[hA,X];case"to left":case"right":return AA(270)}return 0},AA=function(e){return Math.PI*e/180},EA={name:"color",parse:function(e,A){if(A.type===18){var t=ea[A.name];if(typeof t>"u")throw new Error('Attempting to parse an unsupported color function "'+A.name+'"');return t(e,A.values)}if(A.type===5){if(A.value.length===3){var r=A.value.substring(0,1),B=A.value.substring(1,2),n=A.value.substring(2,3);return dA(parseInt(r+r,16),parseInt(B+B,16),parseInt(n+n,16),1)}if(A.value.length===4){var r=A.value.substring(0,1),B=A.value.substring(1,2),n=A.value.substring(2,3),s=A.value.substring(3,4);return dA(parseInt(r+r,16),parseInt(B+B,16),parseInt(n+n,16),parseInt(s+s,16)/255)}if(A.value.length===6){var r=A.value.substring(0,2),B=A.value.substring(2,4),n=A.value.substring(4,6);return dA(parseInt(r,16),parseInt(B,16),parseInt(n,16),1)}if(A.value.length===8){var r=A.value.substring(0,2),B=A.value.substring(2,4),n=A.value.substring(4,6),s=A.value.substring(6,8);return dA(parseInt(r,16),parseInt(B,16),parseInt(n,16),parseInt(s,16)/255)}}if(A.type===20){var i=wA[A.value.toUpperCase()];if(typeof i<"u")return i}return wA.TRANSPARENT}},HA=function(e){return(255&e)===0},R=function(e){var A=255&e,t=255&e>>8,r=255&e>>16,B=255&e>>24;return A<255?"rgba("+B+","+r+","+t+","+A/255+")":"rgb("+B+","+r+","+t+")"},dA=function(e,A,t,r){return(e<<24|A<<16|t<<8|Math.round(r*255)<<0)>>>0},Et=function(e,A){if(e.type===17)return e.number;if(e.type===16){var t=A===3?1:255;return A===3?e.number/100*t:Math.round(e.number/100*t)}return 0},Ht=function(e,A){var t=A.filter(JA);if(t.length===3){var r=t.map(Et),B=r[0],n=r[1],s=r[2];return dA(B,n,s,1)}if(t.length===4){var i=t.map(Et),B=i[0],n=i[1],s=i[2],a=i[3];return dA(B,n,s,a)}return 0};function lr(e,A,t){return t<0&&(t+=1),t>=1&&(t-=1),t<1/6?(A-e)*t*6+e:t<1/2?A:t<2/3?(A-e)*6*(2/3-t)+e:e}var pt=function(e,A){var t=A.filter(JA),r=t[0],B=t[1],n=t[2],s=t[3],i=(r.type===17?AA(r.number):Ar.parse(e,r))/(Math.PI*2),a=M(B)?B.number/100:0,o=M(n)?n.number/100:0,Q=typeof s<"u"&&M(s)?b(s,1):1;if(a===0)return dA(o*255,o*255,o*255,1);var g=o<=.5?o*(a+1):o+a-o*a,w=o*2-g,f=lr(w,g,i+1/3),c=lr(w,g,i),u=lr(w,g,i-1/3);return dA(f*255,c*255,u*255,Q)},ea={hsl:pt,hsla:pt,rgb:Ht,rgba:Ht},se=function(e,A){return EA.parse(e,UB.create(A).parseComponentValue())},wA={ALICEBLUE:4042850303,ANTIQUEWHITE:4209760255,AQUA:16777215,AQUAMARINE:2147472639,AZURE:4043309055,BEIGE:4126530815,BISQUE:4293182719,BLACK:255,BLANCHEDALMOND:4293643775,BLUE:65535,BLUEVIOLET:2318131967,BROWN:2771004159,BURLYWOOD:3736635391,CADETBLUE:1604231423,CHARTREUSE:2147418367,CHOCOLATE:3530104575,CORAL:4286533887,CORNFLOWERBLUE:1687547391,CORNSILK:4294499583,CRIMSON:3692313855,CYAN:16777215,DARKBLUE:35839,DARKCYAN:9145343,DARKGOLDENROD:3095837695,DARKGRAY:2846468607,DARKGREEN:6553855,DARKGREY:2846468607,DARKKHAKI:3182914559,DARKMAGENTA:2332068863,DARKOLIVEGREEN:1433087999,DARKORANGE:4287365375,DARKORCHID:2570243327,DARKRED:2332033279,DARKSALMON:3918953215,DARKSEAGREEN:2411499519,DARKSLATEBLUE:1211993087,DARKSLATEGRAY:793726975,DARKSLATEGREY:793726975,DARKTURQUOISE:13554175,DARKVIOLET:2483082239,DEEPPINK:4279538687,DEEPSKYBLUE:12582911,DIMGRAY:1768516095,DIMGREY:1768516095,DODGERBLUE:512819199,FIREBRICK:2988581631,FLORALWHITE:4294635775,FORESTGREEN:579543807,FUCHSIA:4278255615,GAINSBORO:3705462015,GHOSTWHITE:4177068031,GOLD:4292280575,GOLDENROD:3668254975,GRAY:2155905279,GREEN:8388863,GREENYELLOW:2919182335,GREY:2155905279,HONEYDEW:4043305215,HOTPINK:4285117695,INDIANRED:3445382399,INDIGO:1258324735,IVORY:4294963455,KHAKI:4041641215,LAVENDER:3873897215,LAVENDERBLUSH:4293981695,LAWNGREEN:2096890111,LEMONCHIFFON:4294626815,LIGHTBLUE:2916673279,LIGHTCORAL:4034953471,LIGHTCYAN:3774873599,LIGHTGOLDENRODYELLOW:4210742015,LIGHTGRAY:3553874943,LIGHTGREEN:2431553791,LIGHTGREY:3553874943,LIGHTPINK:4290167295,LIGHTSALMON:4288707327,LIGHTSEAGREEN:548580095,LIGHTSKYBLUE:2278488831,LIGHTSLATEGRAY:2005441023,LIGHTSLATEGREY:2005441023,LIGHTSTEELBLUE:2965692159,LIGHTYELLOW:4294959359,LIME:16711935,LIMEGREEN:852308735,LINEN:4210091775,MAGENTA:4278255615,MAROON:2147483903,MEDIUMAQUAMARINE:1724754687,MEDIUMBLUE:52735,MEDIUMORCHID:3126187007,MEDIUMPURPLE:2473647103,MEDIUMSEAGREEN:1018393087,MEDIUMSLATEBLUE:2070474495,MEDIUMSPRINGGREEN:16423679,MEDIUMTURQUOISE:1221709055,MEDIUMVIOLETRED:3340076543,MIDNIGHTBLUE:421097727,MINTCREAM:4127193855,MISTYROSE:4293190143,MOCCASIN:4293178879,NAVAJOWHITE:4292783615,NAVY:33023,OLDLACE:4260751103,OLIVE:2155872511,OLIVEDRAB:1804477439,ORANGE:4289003775,ORANGERED:4282712319,ORCHID:3664828159,PALEGOLDENROD:4008225535,PALEGREEN:2566625535,PALETURQUOISE:2951671551,PALEVIOLETRED:3681588223,PAPAYAWHIP:4293907967,PEACHPUFF:4292524543,PERU:3448061951,PINK:4290825215,PLUM:3718307327,POWDERBLUE:2967529215,PURPLE:2147516671,REBECCAPURPLE:1714657791,RED:4278190335,ROSYBROWN:3163525119,ROYALBLUE:1097458175,SADDLEBROWN:2336560127,SALMON:4202722047,SANDYBROWN:4104413439,SEAGREEN:780883967,SEASHELL:4294307583,SIENNA:2689740287,SILVER:3233857791,SKYBLUE:2278484991,SLATEBLUE:1784335871,SLATEGRAY:1887473919,SLATEGREY:1887473919,SNOW:4294638335,SPRINGGREEN:16744447,STEELBLUE:1182971135,TAN:3535047935,TEAL:8421631,THISTLE:3636451583,TOMATO:4284696575,TRANSPARENT:0,TURQUOISE:1088475391,VIOLET:4001558271,WHEAT:4125012991,WHITE:4294967295,WHITESMOKE:4126537215,YELLOW:4294902015,YELLOWGREEN:2597139199},ra={name:"background-clip",initialValue:"border-box",prefix:!1,type:1,parse:function(e,A){return A.map(function(t){if(D(t))switch(t.value){case"padding-box":return 1;case"content-box":return 2}return 0})}},ta={name:"background-color",initialValue:"transparent",prefix:!1,type:3,format:"color"},er=function(e,A){var t=EA.parse(e,A[0]),r=A[1];return r&&M(r)?{color:t,stop:r}:{color:t,stop:null}},It=function(e,A){var t=e[0],r=e[e.length-1];t.stop===null&&(t.stop=X),r.stop===null&&(r.stop=hA);for(var B=[],n=0,s=0;s<e.length;s++){var i=e[s].stop;if(i!==null){var a=b(i,A);a>n?B.push(a):B.push(n),n=a}else B.push(null)}for(var o=null,s=0;s<B.length;s++){var Q=B[s];if(Q===null)o===null&&(o=s);else if(o!==null){for(var g=s-o,w=B[o-1],f=(Q-w)/(g+1),c=1;c<=g;c++)B[o+c-1]=f*c;o=null}}return e.map(function(u,H){var h=u.color;return{color:h,stop:Math.max(Math.min(1,B[H]/A),0)}})},Ba=function(e,A,t){var r=A/2,B=t/2,n=b(e[0],A)-r,s=B-b(e[1],t);return(Math.atan2(s,n)+Math.PI*2)%(Math.PI*2)},na=function(e,A,t){var r=typeof e=="number"?e:Ba(e,A,t),B=Math.abs(A*Math.sin(r))+Math.abs(t*Math.cos(r)),n=A/2,s=t/2,i=B/2,a=Math.sin(r-Math.PI/2)*i,o=Math.cos(r-Math.PI/2)*i;return[B,n-o,n+o,s-a,s+a]},rA=function(e,A){return Math.sqrt(e*e+A*A)},vt=function(e,A,t,r,B){var n=[[0,0],[0,A],[e,0],[e,A]];return n.reduce(function(s,i){var a=i[0],o=i[1],Q=rA(t-a,r-o);return(B?Q<s.optimumDistance:Q>s.optimumDistance)?{optimumCorner:i,optimumDistance:Q}:s},{optimumDistance:B?1/0:-1/0,optimumCorner:null}).optimumCorner},sa=function(e,A,t,r,B){var n=0,s=0;switch(e.size){case 0:e.shape===0?n=s=Math.min(Math.abs(A),Math.abs(A-r),Math.abs(t),Math.abs(t-B)):e.shape===1&&(n=Math.min(Math.abs(A),Math.abs(A-r)),s=Math.min(Math.abs(t),Math.abs(t-B)));break;case 2:if(e.shape===0)n=s=Math.min(rA(A,t),rA(A,t-B),rA(A-r,t),rA(A-r,t-B));else if(e.shape===1){var i=Math.min(Math.abs(t),Math.abs(t-B))/Math.min(Math.abs(A),Math.abs(A-r)),a=vt(r,B,A,t,!0),o=a[0],Q=a[1];n=rA(o-A,(Q-t)/i),s=i*n}break;case 1:e.shape===0?n=s=Math.max(Math.abs(A),Math.abs(A-r),Math.abs(t),Math.abs(t-B)):e.shape===1&&(n=Math.max(Math.abs(A),Math.abs(A-r)),s=Math.max(Math.abs(t),Math.abs(t-B)));break;case 3:if(e.shape===0)n=s=Math.max(rA(A,t),rA(A,t-B),rA(A-r,t),rA(A-r,t-B));else if(e.shape===1){var i=Math.max(Math.abs(t),Math.abs(t-B))/Math.max(Math.abs(A),Math.abs(A-r)),g=vt(r,B,A,t,!1),o=g[0],Q=g[1];n=rA(o-A,(Q-t)/i),s=i*n}break}return Array.isArray(e.size)&&(n=b(e.size[0],r),s=e.size.length===2?b(e.size[1],B):n),[n,s]},aa=function(e,A){var t=AA(180),r=[];return sA(A).forEach(function(B,n){if(n===0){var s=B[0];if(s.type===20&&s.value==="to"){t=vB(B);return}else if(IB(s)){t=Ar.parse(e,s);return}}var i=er(e,B);r.push(i)}),{angle:t,stops:r,type:1}},Le=function(e,A){var t=AA(180),r=[];return sA(A).forEach(function(B,n){if(n===0){var s=B[0];if(s.type===20&&["top","left","right","bottom"].indexOf(s.value)!==-1){t=vB(B);return}else if(IB(s)){t=(Ar.parse(e,s)+AA(270))%AA(360);return}}var i=er(e,B);r.push(i)}),{angle:t,stops:r,type:1}},ia=function(e,A){var t=AA(180),r=[],B=1,n=0,s=3,i=[];return sA(A).forEach(function(a,o){var Q=a[0];if(o===0){if(D(Q)&&Q.value==="linear"){B=1;return}else if(D(Q)&&Q.value==="radial"){B=2;return}}if(Q.type===18){if(Q.name==="from"){var g=EA.parse(e,Q.values[0]);r.push({stop:X,color:g})}else if(Q.name==="to"){var g=EA.parse(e,Q.values[0]);r.push({stop:hA,color:g})}else if(Q.name==="color-stop"){var w=Q.values.filter(JA);if(w.length===2){var g=EA.parse(e,w[1]),f=w[0];kA(f)&&r.push({stop:{type:16,number:f.number*100,flags:f.flags},color:g})}}}}),B===1?{angle:(t+AA(180))%AA(360),stops:r,type:B}:{size:s,shape:n,stops:r,position:i,type:B}},yB="closest-side",KB="farthest-side",mB="closest-corner",LB="farthest-corner",DB="circle",bB="ellipse",xB="cover",TB="contain",oa=function(e,A){var t=0,r=3,B=[],n=[];return sA(A).forEach(function(s,i){var a=!0;if(i===0){var o=!1;a=s.reduce(function(g,w){if(o)if(D(w))switch(w.value){case"center":return n.push($r),g;case"top":case"left":return n.push(X),g;case"right":case"bottom":return n.push(hA),g}else(M(w)||pA(w))&&n.push(w);else if(D(w))switch(w.value){case DB:return t=0,!1;case bB:return t=1,!1;case"at":return o=!0,!1;case yB:return r=0,!1;case xB:case KB:return r=1,!1;case TB:case mB:return r=2,!1;case LB:return r=3,!1}else if(pA(w)||M(w))return Array.isArray(r)||(r=[]),r.push(w),!1;return g},a)}if(a){var Q=er(e,s);B.push(Q)}}),{size:r,shape:t,stops:B,position:n,type:2}},De=function(e,A){var t=0,r=3,B=[],n=[];return sA(A).forEach(function(s,i){var a=!0;if(i===0?a=s.reduce(function(Q,g){if(D(g))switch(g.value){case"center":return n.push($r),!1;case"top":case"left":return n.push(X),!1;case"right":case"bottom":return n.push(hA),!1}else if(M(g)||pA(g))return n.push(g),!1;return Q},a):i===1&&(a=s.reduce(function(Q,g){if(D(g))switch(g.value){case DB:return t=0,!1;case bB:return t=1,!1;case TB:case yB:return r=0,!1;case KB:return r=1,!1;case mB:return r=2,!1;case xB:case LB:return r=3,!1}else if(pA(g)||M(g))return Array.isArray(r)||(r=[]),r.push(g),!1;return Q},a)),a){var o=er(e,s);B.push(o)}}),{size:r,shape:t,stops:B,position:n,type:2}},Qa=function(e){return e.type===1},ga=function(e){return e.type===2},At={name:"image",parse:function(e,A){if(A.type===22){var t={url:A.value,type:0};return e.cache.addImage(A.value),t}if(A.type===18){var r=SB[A.name];if(typeof r>"u")throw new Error('Attempting to parse an unsupported image function "'+A.name+'"');return r(e,A.values)}throw new Error("Unsupported image type "+A.type)}};function wa(e){return!(e.type===20&&e.value==="none")&&(e.type!==18||!!SB[e.name])}var SB={"linear-gradient":aa,"-moz-linear-gradient":Le,"-ms-linear-gradient":Le,"-o-linear-gradient":Le,"-webkit-linear-gradient":Le,"radial-gradient":oa,"-moz-radial-gradient":De,"-ms-radial-gradient":De,"-o-radial-gradient":De,"-webkit-radial-gradient":De,"-webkit-gradient":ia},ca={name:"background-image",initialValue:"none",type:1,prefix:!1,parse:function(e,A){if(A.length===0)return[];var t=A[0];return t.type===20&&t.value==="none"?[]:A.filter(function(r){return JA(r)&&wa(r)}).map(function(r){return At.parse(e,r)})}},ua={name:"background-origin",initialValue:"border-box",prefix:!1,type:1,parse:function(e,A){return A.map(function(t){if(D(t))switch(t.value){case"padding-box":return 1;case"content-box":return 2}return 0})}},la={name:"background-position",initialValue:"0% 0%",type:1,prefix:!1,parse:function(e,A){return sA(A).map(function(t){return t.filter(M)}).map(hB)}},Ca={name:"background-repeat",initialValue:"repeat",prefix:!1,type:1,parse:function(e,A){return sA(A).map(function(t){return t.filter(D).map(function(r){return r.value}).join(" ")}).map(fa)}},fa=function(e){switch(e){case"no-repeat":return 1;case"repeat-x":case"repeat no-repeat":return 2;case"repeat-y":case"no-repeat repeat":return 3;case"repeat":default:return 0}},PA;(function(e){e.AUTO="auto",e.CONTAIN="contain",e.COVER="cover"})(PA||(PA={}));var Ua={name:"background-size",initialValue:"0",prefix:!1,type:1,parse:function(e,A){return sA(A).map(function(t){return t.filter(Fa)})}},Fa=function(e){return D(e)||M(e)},rr=function(e){return{name:"border-"+e+"-color",initialValue:"transparent",prefix:!1,type:3,format:"color"}},ha=rr("top"),da=rr("right"),Ea=rr("bottom"),Ha=rr("left"),tr=function(e){return{name:"border-radius-"+e,initialValue:"0 0",prefix:!1,type:1,parse:function(A,t){return hB(t.filter(M))}}},pa=tr("top-left"),Ia=tr("top-right"),va=tr("bottom-right"),ya=tr("bottom-left"),Br=function(e){return{name:"border-"+e+"-style",initialValue:"solid",prefix:!1,type:2,parse:function(A,t){switch(t){case"none":return 0;case"dashed":return 2;case"dotted":return 3;case"double":return 4}return 1}}},Ka=Br("top"),ma=Br("right"),La=Br("bottom"),Da=Br("left"),nr=function(e){return{name:"border-"+e+"-width",initialValue:"0",type:0,prefix:!1,parse:function(A,t){return ue(t)?t.number:0}}},ba=nr("top"),xa=nr("right"),Ta=nr("bottom"),Sa=nr("left"),Oa={name:"color",initialValue:"transparent",prefix:!1,type:3,format:"color"},Ma={name:"direction",initialValue:"ltr",prefix:!1,type:2,parse:function(e,A){switch(A){case"rtl":return 1;case"ltr":default:return 0}}},Ga={name:"display",initialValue:"inline-block",prefix:!1,type:1,parse:function(e,A){return A.filter(D).reduce(function(t,r){return t|Ra(r.value)},0)}},Ra=function(e){switch(e){case"block":case"-webkit-box":return 2;case"inline":return 4;case"run-in":return 8;case"flow":return 16;case"flow-root":return 32;case"table":return 64;case"flex":case"-webkit-flex":return 128;case"grid":case"-ms-grid":return 256;case"ruby":return 512;case"subgrid":return 1024;case"list-item":return 2048;case"table-row-group":return 4096;case"table-header-group":return 8192;case"table-footer-group":return 16384;case"table-row":return 32768;case"table-cell":return 65536;case"table-column-group":return 131072;case"table-column":return 262144;case"table-caption":return 524288;case"ruby-base":return 1048576;case"ruby-text":return 2097152;case"ruby-base-container":return 4194304;case"ruby-text-container":return 8388608;case"contents":return 16777216;case"inline-block":return 33554432;case"inline-list-item":return 67108864;case"inline-table":return 134217728;case"inline-flex":return 268435456;case"inline-grid":return 536870912}return 0},Va={name:"float",initialValue:"none",prefix:!1,type:2,parse:function(e,A){switch(A){case"left":return 1;case"right":return 2;case"inline-start":return 3;case"inline-end":return 4}return 0}},Na={name:"letter-spacing",initialValue:"0",prefix:!1,type:0,parse:function(e,A){return A.type===20&&A.value==="normal"?0:A.type===17||A.type===15?A.number:0}},Pe;(function(e){e.NORMAL="normal",e.STRICT="strict"})(Pe||(Pe={}));var Xa={name:"line-break",initialValue:"normal",prefix:!1,type:2,parse:function(e,A){switch(A){case"strict":return Pe.STRICT;case"normal":default:return Pe.NORMAL}}},_a={name:"line-height",initialValue:"normal",prefix:!1,type:4},yt=function(e,A){return D(e)&&e.value==="normal"?1.2*A:e.type===17?A*e.number:M(e)?b(e,A):A},Pa={name:"list-style-image",initialValue:"none",type:0,prefix:!1,parse:function(e,A){return A.type===20&&A.value==="none"?null:At.parse(e,A)}},Ja={name:"list-style-position",initialValue:"outside",prefix:!1,type:2,parse:function(e,A){switch(A){case"inside":return 0;case"outside":default:return 1}}},_r={name:"list-style-type",initialValue:"none",prefix:!1,type:2,parse:function(e,A){switch(A){case"disc":return 0;case"circle":return 1;case"square":return 2;case"decimal":return 3;case"cjk-decimal":return 4;case"decimal-leading-zero":return 5;case"lower-roman":return 6;case"upper-roman":return 7;case"lower-greek":return 8;case"lower-alpha":return 9;case"upper-alpha":return 10;case"arabic-indic":return 11;case"armenian":return 12;case"bengali":return 13;case"cambodian":return 14;case"cjk-earthly-branch":return 15;case"cjk-heavenly-stem":return 16;case"cjk-ideographic":return 17;case"devanagari":return 18;case"ethiopic-numeric":return 19;case"georgian":return 20;case"gujarati":return 21;case"gurmukhi":return 22;case"hebrew":return 22;case"hiragana":return 23;case"hiragana-iroha":return 24;case"japanese-formal":return 25;case"japanese-informal":return 26;case"kannada":return 27;case"katakana":return 28;case"katakana-iroha":return 29;case"khmer":return 30;case"korean-hangul-formal":return 31;case"korean-hanja-formal":return 32;case"korean-hanja-informal":return 33;case"lao":return 34;case"lower-armenian":return 35;case"malayalam":return 36;case"mongolian":return 37;case"myanmar":return 38;case"oriya":return 39;case"persian":return 40;case"simp-chinese-formal":return 41;case"simp-chinese-informal":return 42;case"tamil":return 43;case"telugu":return 44;case"thai":return 45;case"tibetan":return 46;case"trad-chinese-formal":return 47;case"trad-chinese-informal":return 48;case"upper-armenian":return 49;case"disclosure-open":return 50;case"disclosure-closed":return 51;case"none":default:return-1}}},sr=function(e){return{name:"margin-"+e,initialValue:"0",prefix:!1,type:4}},ka=sr("top"),Ya=sr("right"),Wa=sr("bottom"),Za=sr("left"),qa={name:"overflow",initialValue:"visible",prefix:!1,type:1,parse:function(e,A){return A.filter(D).map(function(t){switch(t.value){case"hidden":return 1;case"scroll":return 2;case"clip":return 3;case"auto":return 4;case"visible":default:return 0}})}},ja={name:"overflow-wrap",initialValue:"normal",prefix:!1,type:2,parse:function(e,A){switch(A){case"break-word":return"break-word";case"normal":default:return"normal"}}},ar=function(e){return{name:"padding-"+e,initialValue:"0",prefix:!1,type:3,format:"length-percentage"}},za=ar("top"),$a=ar("right"),Ai=ar("bottom"),ei=ar("left"),ri={name:"text-align",initialValue:"left",prefix:!1,type:2,parse:function(e,A){switch(A){case"right":return 2;case"center":case"justify":return 1;case"left":default:return 0}}},ti={name:"position",initialValue:"static",prefix:!1,type:2,parse:function(e,A){switch(A){case"relative":return 1;case"absolute":return 2;case"fixed":return 3;case"sticky":return 4}return 0}},Bi={name:"text-shadow",initialValue:"none",type:1,prefix:!1,parse:function(e,A){return A.length===1&&Xr(A[0],"none")?[]:sA(A).map(function(t){for(var r={color:wA.TRANSPARENT,offsetX:X,offsetY:X,blur:X},B=0,n=0;n<t.length;n++){var s=t[n];pA(s)?(B===0?r.offsetX=s:B===1?r.offsetY=s:r.blur=s,B++):r.color=EA.parse(e,s)}return r})}},ni={name:"text-transform",initialValue:"none",prefix:!1,type:2,parse:function(e,A){switch(A){case"uppercase":return 2;case"lowercase":return 1;case"capitalize":return 3}return 0}},si={name:"transform",initialValue:"none",prefix:!0,type:0,parse:function(e,A){if(A.type===20&&A.value==="none")return null;if(A.type===18){var t=oi[A.name];if(typeof t>"u")throw new Error('Attempting to parse an unsupported transform function "'+A.name+'"');return t(A.values)}return null}},ai=function(e){var A=e.filter(function(t){return t.type===17}).map(function(t){return t.number});return A.length===6?A:null},ii=function(e){var A=e.filter(function(a){return a.type===17}).map(function(a){return a.number}),t=A[0],r=A[1];A[2],A[3];var B=A[4],n=A[5];A[6],A[7],A[8],A[9],A[10],A[11];var s=A[12],i=A[13];return A[14],A[15],A.length===16?[t,r,B,n,s,i]:null},oi={matrix:ai,matrix3d:ii},Kt={type:16,number:50,flags:ce},Qi=[Kt,Kt],gi={name:"transform-origin",initialValue:"50% 50%",prefix:!0,type:1,parse:function(e,A){var t=A.filter(M);return t.length!==2?Qi:[t[0],t[1]]}},wi={name:"visible",initialValue:"none",prefix:!1,type:2,parse:function(e,A){switch(A){case"hidden":return 1;case"collapse":return 2;case"visible":default:return 0}}},ae;(function(e){e.NORMAL="normal",e.BREAK_ALL="break-all",e.KEEP_ALL="keep-all"})(ae||(ae={}));var ci={name:"word-break",initialValue:"normal",prefix:!1,type:2,parse:function(e,A){switch(A){case"break-all":return ae.BREAK_ALL;case"keep-all":return ae.KEEP_ALL;case"normal":default:return ae.NORMAL}}},ui={name:"z-index",initialValue:"auto",prefix:!1,type:0,parse:function(e,A){if(A.type===20)return{auto:!0,order:0};if(kA(A))return{auto:!1,order:A.number};throw new Error("Invalid z-index number parsed")}},OB={name:"time",parse:function(e,A){if(A.type===15)switch(A.unit.toLowerCase()){case"s":return 1e3*A.number;case"ms":return A.number}throw new Error("Unsupported time type")}},li={name:"opacity",initialValue:"1",type:0,prefix:!1,parse:function(e,A){return kA(A)?A.number:1}},Ci={name:"text-decoration-color",initialValue:"transparent",prefix:!1,type:3,format:"color"},fi={name:"text-decoration-line",initialValue:"none",prefix:!1,type:1,parse:function(e,A){return A.filter(D).map(function(t){switch(t.value){case"underline":return 1;case"overline":return 2;case"line-through":return 3;case"none":return 4}return 0}).filter(function(t){return t!==0})}},Ui={name:"font-family",initialValue:"",prefix:!1,type:1,parse:function(e,A){var t=[],r=[];return A.forEach(function(B){switch(B.type){case 20:case 0:t.push(B.value);break;case 17:t.push(B.number.toString());break;case 4:r.push(t.join(" ")),t.length=0;break}}),t.length&&r.push(t.join(" ")),r.map(function(B){return B.indexOf(" ")===-1?B:"'"+B+"'"})}},Fi={name:"font-size",initialValue:"0",prefix:!1,type:3,format:"length"},hi={name:"font-weight",initialValue:"normal",type:0,prefix:!1,parse:function(e,A){if(kA(A))return A.number;if(D(A))switch(A.value){case"bold":return 700;case"normal":default:return 400}return 400}},di={name:"font-variant",initialValue:"none",type:1,prefix:!1,parse:function(e,A){return A.filter(D).map(function(t){return t.value})}},Ei={name:"font-style",initialValue:"normal",prefix:!1,type:2,parse:function(e,A){switch(A){case"oblique":return"oblique";case"italic":return"italic";case"normal":default:return"normal"}}},G=function(e,A){return(e&A)!==0},Hi={name:"content",initialValue:"none",type:1,prefix:!1,parse:function(e,A){if(A.length===0)return[];var t=A[0];return t.type===20&&t.value==="none"?[]:A}},pi={name:"counter-increment",initialValue:"none",prefix:!0,type:1,parse:function(e,A){if(A.length===0)return null;var t=A[0];if(t.type===20&&t.value==="none")return null;for(var r=[],B=A.filter(FB),n=0;n<B.length;n++){var s=B[n],i=B[n+1];if(s.type===20){var a=i&&kA(i)?i.number:1;r.push({counter:s.value,increment:a})}}return r}},Ii={name:"counter-reset",initialValue:"none",prefix:!0,type:1,parse:function(e,A){if(A.length===0)return[];for(var t=[],r=A.filter(FB),B=0;B<r.length;B++){var n=r[B],s=r[B+1];if(D(n)&&n.value!=="none"){var i=s&&kA(s)?s.number:0;t.push({counter:n.value,reset:i})}}return t}},vi={name:"duration",initialValue:"0s",prefix:!1,type:1,parse:function(e,A){return A.filter(ue).map(function(t){return OB.parse(e,t)})}},yi={name:"quotes",initialValue:"none",prefix:!0,type:1,parse:function(e,A){if(A.length===0)return null;var t=A[0];if(t.type===20&&t.value==="none")return null;var r=[],B=A.filter($s);if(B.length%2!==0)return null;for(var n=0;n<B.length;n+=2){var s=B[n].value,i=B[n+1].value;r.push({open:s,close:i})}return r}},mt=function(e,A,t){if(!e)return"";var r=e[Math.min(A,e.length-1)];return r?t?r.open:r.close:""},Ki={name:"box-shadow",initialValue:"none",type:1,prefix:!1,parse:function(e,A){return A.length===1&&Xr(A[0],"none")?[]:sA(A).map(function(t){for(var r={color:255,offsetX:X,offsetY:X,blur:X,spread:X,inset:!1},B=0,n=0;n<t.length;n++){var s=t[n];Xr(s,"inset")?r.inset=!0:pA(s)?(B===0?r.offsetX=s:B===1?r.offsetY=s:B===2?r.blur=s:r.spread=s,B++):r.color=EA.parse(e,s)}return r})}},mi={name:"paint-order",initialValue:"normal",prefix:!1,type:1,parse:function(e,A){var t=[0,1,2],r=[];return A.filter(D).forEach(function(B){switch(B.value){case"stroke":r.push(1);break;case"fill":r.push(0);break;case"markers":r.push(2);break}}),t.forEach(function(B){r.indexOf(B)===-1&&r.push(B)}),r}},Li={name:"-webkit-text-stroke-color",initialValue:"currentcolor",prefix:!1,type:3,format:"color"},Di={name:"-webkit-text-stroke-width",initialValue:"0",type:0,prefix:!1,parse:function(e,A){return ue(A)?A.number:0}},bi=function(){function e(A,t){var r,B;this.animationDuration=U(A,vi,t.animationDuration),this.backgroundClip=U(A,ra,t.backgroundClip),this.backgroundColor=U(A,ta,t.backgroundColor),this.backgroundImage=U(A,ca,t.backgroundImage),this.backgroundOrigin=U(A,ua,t.backgroundOrigin),this.backgroundPosition=U(A,la,t.backgroundPosition),this.backgroundRepeat=U(A,Ca,t.backgroundRepeat),this.backgroundSize=U(A,Ua,t.backgroundSize),this.borderTopColor=U(A,ha,t.borderTopColor),this.borderRightColor=U(A,da,t.borderRightColor),this.borderBottomColor=U(A,Ea,t.borderBottomColor),this.borderLeftColor=U(A,Ha,t.borderLeftColor),this.borderTopLeftRadius=U(A,pa,t.borderTopLeftRadius),this.borderTopRightRadius=U(A,Ia,t.borderTopRightRadius),this.borderBottomRightRadius=U(A,va,t.borderBottomRightRadius),this.borderBottomLeftRadius=U(A,ya,t.borderBottomLeftRadius),this.borderTopStyle=U(A,Ka,t.borderTopStyle),this.borderRightStyle=U(A,ma,t.borderRightStyle),this.borderBottomStyle=U(A,La,t.borderBottomStyle),this.borderLeftStyle=U(A,Da,t.borderLeftStyle),this.borderTopWidth=U(A,ba,t.borderTopWidth),this.borderRightWidth=U(A,xa,t.borderRightWidth),this.borderBottomWidth=U(A,Ta,t.borderBottomWidth),this.borderLeftWidth=U(A,Sa,t.borderLeftWidth),this.boxShadow=U(A,Ki,t.boxShadow),this.color=U(A,Oa,t.color),this.direction=U(A,Ma,t.direction),this.display=U(A,Ga,t.display),this.float=U(A,Va,t.cssFloat),this.fontFamily=U(A,Ui,t.fontFamily),this.fontSize=U(A,Fi,t.fontSize),this.fontStyle=U(A,Ei,t.fontStyle),this.fontVariant=U(A,di,t.fontVariant),this.fontWeight=U(A,hi,t.fontWeight),this.letterSpacing=U(A,Na,t.letterSpacing),this.lineBreak=U(A,Xa,t.lineBreak),this.lineHeight=U(A,_a,t.lineHeight),this.listStyleImage=U(A,Pa,t.listStyleImage),this.listStylePosition=U(A,Ja,t.listStylePosition),this.listStyleType=U(A,_r,t.listStyleType),this.marginTop=U(A,ka,t.marginTop),this.marginRight=U(A,Ya,t.marginRight),this.marginBottom=U(A,Wa,t.marginBottom),this.marginLeft=U(A,Za,t.marginLeft),this.opacity=U(A,li,t.opacity);var n=U(A,qa,t.overflow);this.overflowX=n[0],this.overflowY=n[n.length>1?1:0],this.overflowWrap=U(A,ja,t.overflowWrap),this.paddingTop=U(A,za,t.paddingTop),this.paddingRight=U(A,$a,t.paddingRight),this.paddingBottom=U(A,Ai,t.paddingBottom),this.paddingLeft=U(A,ei,t.paddingLeft),this.paintOrder=U(A,mi,t.paintOrder),this.position=U(A,ti,t.position),this.textAlign=U(A,ri,t.textAlign),this.textDecorationColor=U(A,Ci,(r=t.textDecorationColor)!==null&&r!==void 0?r:t.color),this.textDecorationLine=U(A,fi,(B=t.textDecorationLine)!==null&&B!==void 0?B:t.textDecoration),this.textShadow=U(A,Bi,t.textShadow),this.textTransform=U(A,ni,t.textTransform),this.transform=U(A,si,t.transform),this.transformOrigin=U(A,gi,t.transformOrigin),this.visibility=U(A,wi,t.visibility),this.webkitTextStrokeColor=U(A,Li,t.webkitTextStrokeColor),this.webkitTextStrokeWidth=U(A,Di,t.webkitTextStrokeWidth),this.wordBreak=U(A,ci,t.wordBreak),this.zIndex=U(A,ui,t.zIndex)}return e.prototype.isVisible=function(){return this.display>0&&this.opacity>0&&this.visibility===0},e.prototype.isTransparent=function(){return HA(this.backgroundColor)},e.prototype.isTransformed=function(){return this.transform!==null},e.prototype.isPositioned=function(){return this.position!==0},e.prototype.isPositionedWithZIndex=function(){return this.isPositioned()&&!this.zIndex.auto},e.prototype.isFloating=function(){return this.float!==0},e.prototype.isInlineLevel=function(){return G(this.display,4)||G(this.display,33554432)||G(this.display,268435456)||G(this.display,536870912)||G(this.display,67108864)||G(this.display,134217728)},e}(),xi=function(){function e(A,t){this.content=U(A,Hi,t.content),this.quotes=U(A,yi,t.quotes)}return e}(),Lt=function(){function e(A,t){this.counterIncrement=U(A,pi,t.counterIncrement),this.counterReset=U(A,Ii,t.counterReset)}return e}(),U=function(e,A,t){var r=new fB,B=t!==null&&typeof t<"u"?t.toString():A.initialValue;r.write(B);var n=new UB(r.read());switch(A.type){case 2:var s=n.parseComponentValue();return A.parse(e,D(s)?s.value:A.initialValue);case 0:return A.parse(e,n.parseComponentValue());case 1:return A.parse(e,n.parseComponentValues());case 4:return n.parseComponentValue();case 3:switch(A.format){case"angle":return Ar.parse(e,n.parseComponentValue());case"color":return EA.parse(e,n.parseComponentValue());case"image":return At.parse(e,n.parseComponentValue());case"length":var i=n.parseComponentValue();return pA(i)?i:X;case"length-percentage":var a=n.parseComponentValue();return M(a)?a:X;case"time":return OB.parse(e,n.parseComponentValue())}break}},Ti="data-html2canvas-debug",Si=function(e){var A=e.getAttribute(Ti);switch(A){case"all":return 1;case"clone":return 2;case"parse":return 3;case"render":return 4;default:return 0}},Pr=function(e,A){var t=Si(e);return t===1||A===t},aA=function(){function e(A,t){if(this.context=A,this.textNodes=[],this.elements=[],this.flags=0,Pr(t,3))debugger;this.styles=new bi(A,window.getComputedStyle(t,null)),Yr(t)&&(this.styles.animationDuration.some(function(r){return r>0})&&(t.style.animationDuration="0s"),this.styles.transform!==null&&(t.style.transform="none")),this.bounds=ze(this.context,t),Pr(t,4)&&(this.flags|=16)}return e}(),Oi="AAAAAAAAAAAAEA4AGBkAAFAaAAACAAAAAAAIABAAGAAwADgACAAQAAgAEAAIABAACAAQAAgAEAAIABAACAAQAAgAEAAIABAAQABIAEQATAAIABAACAAQAAgAEAAIABAAVABcAAgAEAAIABAACAAQAGAAaABwAHgAgACIAI4AlgAIABAAmwCjAKgAsAC2AL4AvQDFAMoA0gBPAVYBWgEIAAgACACMANoAYgFkAWwBdAF8AX0BhQGNAZUBlgGeAaMBlQGWAasBswF8AbsBwwF0AcsBYwHTAQgA2wG/AOMBdAF8AekB8QF0AfkB+wHiAHQBfAEIAAMC5gQIAAsCEgIIAAgAFgIeAggAIgIpAggAMQI5AkACygEIAAgASAJQAlgCYAIIAAgACAAKBQoFCgUTBRMFGQUrBSsFCAAIAAgACAAIAAgACAAIAAgACABdAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACABoAmgCrwGvAQgAbgJ2AggAHgEIAAgACADnAXsCCAAIAAgAgwIIAAgACAAIAAgACACKAggAkQKZAggAPADJAAgAoQKkAqwCsgK6AsICCADJAggA0AIIAAgACAAIANYC3gIIAAgACAAIAAgACABAAOYCCAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAkASoB+QIEAAgACAA8AEMCCABCBQgACABJBVAFCAAIAAgACAAIAAgACAAIAAgACABTBVoFCAAIAFoFCABfBWUFCAAIAAgACAAIAAgAbQUIAAgACAAIAAgACABzBXsFfQWFBYoFigWKBZEFigWKBYoFmAWfBaYFrgWxBbkFCAAIAAgACAAIAAgACAAIAAgACAAIAMEFCAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAMgFCADQBQgACAAIAAgACAAIAAgACAAIAAgACAAIAO4CCAAIAAgAiQAIAAgACABAAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAD0AggACAD8AggACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIANYFCAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAMDvwAIAAgAJAIIAAgACAAIAAgACAAIAAgACwMTAwgACAB9BOsEGwMjAwgAKwMyAwsFYgE3A/MEPwMIAEUDTQNRAwgAWQOsAGEDCAAIAAgACAAIAAgACABpAzQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFIQUoBSwFCAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACABtAwgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACABMAEwACAAIAAgACAAIABgACAAIAAgACAC/AAgACAAyAQgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACACAAIAAwAAgACAAIAAgACAAIAAgACAAIAAAARABIAAgACAAIABQASAAIAAgAIABwAEAAjgCIABsAqAC2AL0AigDQAtwC+IJIQqVAZUBWQqVAZUBlQGVAZUBlQGrC5UBlQGVAZUBlQGVAZUBlQGVAXsKlQGVAbAK6wsrDGUMpQzlDJUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAfAKAAuZA64AtwCJALoC6ADwAAgAuACgA/oEpgO6AqsD+AAIAAgAswMIAAgACAAIAIkAuwP5AfsBwwPLAwgACAAIAAgACADRA9kDCAAIAOED6QMIAAgACAAIAAgACADuA/YDCAAIAP4DyQAIAAgABgQIAAgAXQAOBAgACAAIAAgACAAIABMECAAIAAgACAAIAAgACAD8AAQBCAAIAAgAGgQiBCoECAExBAgAEAEIAAgACAAIAAgACAAIAAgACAAIAAgACAA4BAgACABABEYECAAIAAgATAQYAQgAVAQIAAgACAAIAAgACAAIAAgACAAIAFoECAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgAOQEIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAB+BAcACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAEABhgSMBAgACAAIAAgAlAQIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAwAEAAQABAADAAMAAwADAAQABAAEAAQABAAEAAQABHATAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgAdQMIAAgACAAIAAgACAAIAMkACAAIAAgAfQMIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACACFA4kDCAAIAAgACAAIAOcBCAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAIcDCAAIAAgACAAIAAgACAAIAAgACAAIAJEDCAAIAAgACADFAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACABgBAgAZgQIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgAbAQCBXIECAAIAHkECAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACABAAJwEQACjBKoEsgQIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAC6BMIECAAIAAgACAAIAAgACABmBAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgAxwQIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAGYECAAIAAgAzgQIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgAigWKBYoFigWKBYoFigWKBd0FXwUIAOIF6gXxBYoF3gT5BQAGCAaKBYoFigWKBYoFigWKBYoFigWKBYoFigXWBIoFigWKBYoFigWKBYoFigWKBYsFEAaKBYoFigWKBYoFigWKBRQGCACKBYoFigWKBQgACAAIANEECAAIABgGigUgBggAJgYIAC4GMwaKBYoF0wQ3Bj4GigWKBYoFigWKBYoFigWKBYoFigWKBYoFigUIAAgACAAIAAgACAAIAAgAigWKBYoFigWKBYoFigWKBYoFigWKBYoFigWKBYoFigWKBYoFigWKBYoFigWKBYoFigWKBYoFigWKBYoFigWLBf///////wQABAAEAAQABAAEAAQABAAEAAQAAwAEAAQAAgAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAAAAAAAAAAAAAAAAAAAAAAAAAOAAAAAAAAAAQADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAAAUAAAAFAAUAAAAFAAUAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAEAAQABAAEAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAFAAUABQAFAAUABQAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAFAAUAAQAAAAUABQAFAAUABQAFAAAAAAAFAAUAAAAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABQAFAAUABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAFAAAAAAAFAAUAAQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABwAFAAUABQAFAAAABwAHAAcAAAAHAAcABwAFAAEAAAAAAAAAAAAAAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHAAcABwAFAAUABQAFAAcABwAFAAUAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHAAAAAQABAAAAAAAAAAAAAAAFAAUABQAFAAAABwAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAHAAcABwAHAAcAAAAHAAcAAAAAAAUABQAHAAUAAQAHAAEABwAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUABwABAAUABQAFAAUAAAAAAAAAAAAAAAEAAQABAAEAAQABAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABwAFAAUAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUAAQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQABQANAAQABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQABAAEAAQABAAEAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAEAAQABAAEAAQABAAEAAQABAAEAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAQABAAEAAQABAAEAAQABAAAAAAAAAAAAAAAAAAAAAAABQAHAAUABQAFAAAAAAAAAAcABQAFAAUABQAFAAQABAAEAAQABAAEAAQABAAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUAAAAFAAUABQAFAAUAAAAFAAUABQAAAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAAAAAAAAAAAAUABQAFAAcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAHAAUAAAAHAAcABwAFAAUABQAFAAUABQAFAAUABwAHAAcABwAFAAcABwAAAAUABQAFAAUABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABwAHAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAUABwAHAAUABQAFAAUAAAAAAAcABwAAAAAABwAHAAUAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAABQAFAAcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAAABwAHAAcABQAFAAAAAAAAAAAABQAFAAAAAAAFAAUABQAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAABwAFAAUABQAFAAUAAAAFAAUABwAAAAcABwAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUAAAAFAAUABwAFAAUABQAFAAAAAAAHAAcAAAAAAAcABwAFAAAAAAAAAAAAAAAAAAAABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAcABwAAAAAAAAAHAAcABwAAAAcABwAHAAUAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAABQAHAAcABwAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABwAHAAcABwAAAAUABQAFAAAABQAFAAUABQAAAAAAAAAAAAAAAAAAAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAcABQAHAAcABQAHAAcAAAAFAAcABwAAAAcABwAFAAUAAAAAAAAAAAAAAAAAAAAFAAUAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAcABwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAAAAUABwAAAAAAAAAAAAAAAAAAAAAAAAAAAAUAAAAAAAAAAAAFAAcABwAFAAUABQAAAAUAAAAHAAcABwAHAAcABwAHAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUAAAAHAAUABQAFAAUABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAAABwAFAAUABQAFAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAUAAAAFAAAAAAAAAAAABwAHAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABwAFAAUABQAFAAUAAAAFAAUAAAAAAAAAAAAAAAUABQAFAAUABQAFAAUABQAFAAUABQAAAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABwAFAAUABQAFAAUABQAAAAUABQAHAAcABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHAAcABQAFAAAAAAAAAAAABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAcABQAFAAAAAAAAAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAHAAUABQAFAAUABQAFAAUABwAHAAcABwAHAAcABwAHAAUABwAHAAUABQAFAAUABQAFAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABwAHAAcABwAFAAUABwAHAAcAAAAAAAAAAAAHAAcABQAHAAcABwAHAAcABwAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAcABwAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAcABQAHAAUABQAFAAUABQAFAAUAAAAFAAAABQAAAAAABQAFAAUABQAFAAUABQAFAAcABwAHAAcABwAHAAUABQAFAAUABQAFAAUABQAFAAUAAAAAAAUABQAFAAUABQAHAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAFAAUABwAFAAcABwAHAAcABwAFAAcABwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHAAUABQAFAAUABwAHAAUABQAHAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAcABQAFAAcABwAHAAUABwAFAAUABQAHAAcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABwAHAAcABwAHAAcABwAHAAUABQAFAAUABQAFAAUABQAHAAcABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUAAAAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAcABQAFAAUABQAFAAUABQAAAAAAAAAAAAUAAAAAAAAAAAAAAAAABQAAAAAABwAFAAUAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAAABQAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUAAAAFAAUABQAFAAUABQAFAAUABQAFAAAAAAAAAAAABQAAAAAAAAAFAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABwAHAAUABQAHAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAcABwAHAAcABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAFAAUABQAFAAUABQAHAAcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAcABwAFAAUABQAFAAcABwAFAAUABwAHAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAFAAcABwAFAAUABwAHAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAFAAcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUAAAAFAAUABQAAAAAABQAFAAAAAAAAAAAAAAAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAcABQAFAAcABwAAAAAAAAAAAAAABwAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAcABwAFAAcABwAFAAcABwAAAAcABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAAAAAAAAAAAAAAAAAFAAUABQAAAAUABQAAAAAAAAAAAAAABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAAAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAcABQAHAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABwAFAAUABQAFAAUABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABwAHAAcABQAFAAUABQAFAAUABQAFAAUABwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHAAcABwAFAAUABQAHAAcABQAHAAUABQAAAAAAAAAAAAAAAAAFAAAABwAHAAcABQAFAAUABQAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABwAHAAcABwAAAAAABwAHAAAAAAAHAAcABwAAAAAAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAABwAHAAAAAAAFAAUABQAFAAUABQAFAAAAAAAAAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHAAcABwAFAAUABQAFAAUABQAFAAUABwAHAAUABQAFAAcABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAHAAcABQAFAAUABQAFAAUABwAFAAcABwAFAAcABQAFAAcABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAHAAcABQAFAAUABQAAAAAABwAHAAcABwAFAAUABwAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAcABwAHAAUABQAFAAUABQAFAAUABQAHAAcABQAHAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABwAFAAcABwAFAAUABQAFAAUABQAHAAUAAAAAAAAAAAAAAAAAAAAAAAcABwAFAAUABQAFAAcABQAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHAAcABwAFAAUABQAFAAUABQAFAAUABQAHAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHAAcABwAFAAUABQAFAAAAAAAFAAUABwAHAAcABwAFAAAAAAAAAAcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABQAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUABwAHAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAcABQAFAAUABQAFAAUABQAAAAUABQAFAAUABQAFAAcABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAAAHAAUABQAFAAUABQAFAAUABwAFAAUABwAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUAAAAAAAAABQAAAAUABQAAAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAHAAcABwAHAAcAAAAFAAUAAAAHAAcABQAHAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABwAHAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAAAAAAAAAAAAAAAAAAABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAcABwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAAAAUABQAFAAAAAAAFAAUABQAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAAAAAAAAAAABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAAAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUABQAAAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAAAAABQAFAAUABQAFAAUABQAAAAUABQAAAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAFAAUABQAFAAUADgAOAA4ADgAOAA4ADwAPAA8ADwAPAA8ADwAPAA8ADwAPAA8ADwAPAA8ADwAPAA8ADwAPAA8ADwAPAA8ADwAPAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAcABwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABwAHAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAAAAAAAAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAMAAwADAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkAAAAAAAAAAAAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAAAAAAAAAAAAsADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwACwAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAAAAAADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAA4ADgAOAA4ADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA4ADgAAAAAAAAAAAAAAAAAAAAAADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAOAA4ADgAOAA4ADgAOAA4ADgAOAAAAAAAAAAAADgAOAA4AAAAAAAAAAAAAAAAAAAAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAOAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAAAAAAAAAAAAAAAAAAAAAAAAAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAOAA4ADgAAAA4ADgAOAA4ADgAOAAAADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4AAAAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4AAAAAAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAAAA4AAAAOAAAAAAAAAAAAAAAAAA4AAAAAAAAAAAAAAAAADgAAAAAAAAAAAAAAAAAAAAAAAAAAAA4ADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAAAAAADgAAAAAAAAAAAA4AAAAOAAAAAAAAAAAADgAOAA4AAAAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAA4ADgAOAA4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAA4ADgAAAAAAAAAAAAAAAAAAAAAAAAAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAA4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA4ADgAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAAAAAAAAAAAA4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAAAADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAA4ADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA4ADgAOAA4ADgAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA4ADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAAAAAADgAOAA4ADgAOAA4ADgAOAA4ADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAAAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA4AAAAAAA4ADgAOAA4ADgAOAA4ADgAOAAAADgAOAA4ADgAAAAAAAAAAAAAAAAAAAAAAAAAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4AAAAAAAAAAAAAAAAADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAA4ADgAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAOAA4ADgAOAA4ADgAOAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAOAA4ADgAOAA4AAAAAAAAAAAAAAAAAAAAAAA4ADgAOAA4ADgAOAA4ADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4AAAAOAA4ADgAOAA4ADgAAAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4AAAAAAAAAAAA=",Dt="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",te=typeof Uint8Array>"u"?[]:new Uint8Array(256);for(var be=0;be<Dt.length;be++)te[Dt.charCodeAt(be)]=be;var Mi=function(e){var A=e.length*.75,t=e.length,r,B=0,n,s,i,a;e[e.length-1]==="="&&(A--,e[e.length-2]==="="&&A--);var o=typeof ArrayBuffer<"u"&&typeof Uint8Array<"u"&&typeof Uint8Array.prototype.slice<"u"?new ArrayBuffer(A):new Array(A),Q=Array.isArray(o)?o:new Uint8Array(o);for(r=0;r<t;r+=4)n=te[e.charCodeAt(r)],s=te[e.charCodeAt(r+1)],i=te[e.charCodeAt(r+2)],a=te[e.charCodeAt(r+3)],Q[B++]=n<<2|s>>4,Q[B++]=(s&15)<<4|i>>2,Q[B++]=(i&3)<<6|a&63;return o},Gi=function(e){for(var A=e.length,t=[],r=0;r<A;r+=2)t.push(e[r+1]<<8|e[r]);return t},Ri=function(e){for(var A=e.length,t=[],r=0;r<A;r+=4)t.push(e[r+3]<<24|e[r+2]<<16|e[r+1]<<8|e[r]);return t},xA=5,et=6+5,Cr=2,Vi=et-xA,MB=65536>>xA,Ni=1<<xA,fr=Ni-1,Xi=1024>>xA,_i=MB+Xi,Pi=_i,Ji=32,ki=Pi+Ji,Yi=65536>>et,Wi=1<<Vi,Zi=Wi-1,bt=function(e,A,t){return e.slice?e.slice(A,t):new Uint16Array(Array.prototype.slice.call(e,A,t))},qi=function(e,A,t){return e.slice?e.slice(A,t):new Uint32Array(Array.prototype.slice.call(e,A,t))},ji=function(e,A){var t=Mi(e),r=Array.isArray(t)?Ri(t):new Uint32Array(t),B=Array.isArray(t)?Gi(t):new Uint16Array(t),n=24,s=bt(B,n/2,r[4]/2),i=r[5]===2?bt(B,(n+r[4])/2):qi(r,Math.ceil((n+r[4])/4));return new zi(r[0],r[1],r[2],r[3],s,i)},zi=function(){function e(A,t,r,B,n,s){this.initialValue=A,this.errorValue=t,this.highStart=r,this.highValueIndex=B,this.index=n,this.data=s}return e.prototype.get=function(A){var t;if(A>=0){if(A<55296||A>56319&&A<=65535)return t=this.index[A>>xA],t=(t<<Cr)+(A&fr),this.data[t];if(A<=65535)return t=this.index[MB+(A-55296>>xA)],t=(t<<Cr)+(A&fr),this.data[t];if(A<this.highStart)return t=ki-Yi+(A>>et),t=this.index[t],t+=A>>xA&Zi,t=this.index[t],t=(t<<Cr)+(A&fr),this.data[t];if(A<=1114111)return this.data[this.highValueIndex]}return this.errorValue},e}(),xt="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",$i=typeof Uint8Array>"u"?[]:new Uint8Array(256);for(var xe=0;xe<xt.length;xe++)$i[xt.charCodeAt(xe)]=xe;var Ao=1,Ur=2,Fr=3,Tt=4,St=5,eo=7,Ot=8,hr=9,dr=10,Mt=11,Gt=12,Rt=13,Vt=14,Er=15,ro=function(e){for(var A=[],t=0,r=e.length;t<r;){var B=e.charCodeAt(t++);if(B>=55296&&B<=56319&&t<r){var n=e.charCodeAt(t++);(n&64512)===56320?A.push(((B&1023)<<10)+(n&1023)+65536):(A.push(B),t--)}else A.push(B)}return A},to=function(){for(var e=[],A=0;A<arguments.length;A++)e[A]=arguments[A];if(String.fromCodePoint)return String.fromCodePoint.apply(String,e);var t=e.length;if(!t)return"";for(var r=[],B=-1,n="";++B<t;){var s=e[B];s<=65535?r.push(s):(s-=65536,r.push((s>>10)+55296,s%1024+56320)),(B+1===t||r.length>16384)&&(n+=String.fromCharCode.apply(String,r),r.length=0)}return n},Bo=ji(Oi),z="×",Hr="÷",no=function(e){return Bo.get(e)},so=function(e,A,t){var r=t-2,B=A[r],n=A[t-1],s=A[t];if(n===Ur&&s===Fr)return z;if(n===Ur||n===Fr||n===Tt||s===Ur||s===Fr||s===Tt)return Hr;if(n===Ot&&[Ot,hr,Mt,Gt].indexOf(s)!==-1||(n===Mt||n===hr)&&(s===hr||s===dr)||(n===Gt||n===dr)&&s===dr||s===Rt||s===St||s===eo||n===Ao)return z;if(n===Rt&&s===Vt){for(;B===St;)B=A[--r];if(B===Vt)return z}if(n===Er&&s===Er){for(var i=0;B===Er;)i++,B=A[--r];if(i%2===0)return z}return Hr},ao=function(e){var A=ro(e),t=A.length,r=0,B=0,n=A.map(no);return{next:function(){if(r>=t)return{done:!0,value:null};for(var s=z;r<t&&(s=so(A,n,++r))===z;);if(s!==z||r===t){var i=to.apply(null,A.slice(B,r));return B=r,{value:i,done:!1}}return{done:!0,value:null}}}},io=function(e){for(var A=ao(e),t=[],r;!(r=A.next()).done;)r.value&&t.push(r.value.slice());return t},oo=function(e){var A=123;if(e.createRange){var t=e.createRange();if(t.getBoundingClientRect){var r=e.createElement("boundtest");r.style.height=A+"px",r.style.display="block",e.body.appendChild(r),t.selectNode(r);var B=t.getBoundingClientRect(),n=Math.round(B.height);if(e.body.removeChild(r),n===A)return!0}}return!1},Qo=function(e){var A=e.createElement("boundtest");A.style.width="50px",A.style.display="block",A.style.fontSize="12px",A.style.letterSpacing="0px",A.style.wordSpacing="0px",e.body.appendChild(A);var t=e.createRange();A.innerHTML=typeof"".repeat=="function"?"&#128104;".repeat(10):"";var r=A.firstChild,B=$e(r.data).map(function(a){return S(a)}),n=0,s={},i=B.every(function(a,o){t.setStart(r,n),t.setEnd(r,n+a.length);var Q=t.getBoundingClientRect();n+=a.length;var g=Q.x>s.x||Q.y>s.y;return s=Q,o===0?!0:g});return e.body.removeChild(A),i},go=function(){return typeof new Image().crossOrigin<"u"},wo=function(){return typeof new XMLHttpRequest().responseType=="string"},co=function(e){var A=new Image,t=e.createElement("canvas"),r=t.getContext("2d");if(!r)return!1;A.src="data:image/svg+xml,<svg xmlns='http://www.w3.org/2000/svg'></svg>";try{r.drawImage(A,0,0),t.toDataURL()}catch{return!1}return!0},Nt=function(e){return e[0]===0&&e[1]===255&&e[2]===0&&e[3]===255},uo=function(e){var A=e.createElement("canvas"),t=100;A.width=t,A.height=t;var r=A.getContext("2d");if(!r)return Promise.reject(!1);r.fillStyle="rgb(0, 255, 0)",r.fillRect(0,0,t,t);var B=new Image,n=A.toDataURL();B.src=n;var s=Jr(t,t,0,0,B);return r.fillStyle="red",r.fillRect(0,0,t,t),Xt(s).then(function(i){r.drawImage(i,0,0);var a=r.getImageData(0,0,t,t).data;r.fillStyle="red",r.fillRect(0,0,t,t);var o=e.createElement("div");return o.style.backgroundImage="url("+n+")",o.style.height=t+"px",Nt(a)?Xt(Jr(t,t,0,0,o)):Promise.reject(!1)}).then(function(i){return r.drawImage(i,0,0),Nt(r.getImageData(0,0,t,t).data)}).catch(function(){return!1})},Jr=function(e,A,t,r,B){var n="http://www.w3.org/2000/svg",s=document.createElementNS(n,"svg"),i=document.createElementNS(n,"foreignObject");return s.setAttributeNS(null,"width",e.toString()),s.setAttributeNS(null,"height",A.toString()),i.setAttributeNS(null,"width","100%"),i.setAttributeNS(null,"height","100%"),i.setAttributeNS(null,"x",t.toString()),i.setAttributeNS(null,"y",r.toString()),i.setAttributeNS(null,"externalResourcesRequired","true"),s.appendChild(i),i.appendChild(B),s},Xt=function(e){return new Promise(function(A,t){var r=new Image;r.onload=function(){return A(r)},r.onerror=t,r.src="data:image/svg+xml;charset=utf-8,"+encodeURIComponent(new XMLSerializer().serializeToString(e))})},N={get SUPPORT_RANGE_BOUNDS(){var e=oo(document);return Object.defineProperty(N,"SUPPORT_RANGE_BOUNDS",{value:e}),e},get SUPPORT_WORD_BREAKING(){var e=N.SUPPORT_RANGE_BOUNDS&&Qo(document);return Object.defineProperty(N,"SUPPORT_WORD_BREAKING",{value:e}),e},get SUPPORT_SVG_DRAWING(){var e=co(document);return Object.defineProperty(N,"SUPPORT_SVG_DRAWING",{value:e}),e},get SUPPORT_FOREIGNOBJECT_DRAWING(){var e=typeof Array.from=="function"&&typeof window.fetch=="function"?uo(document):Promise.resolve(!1);return Object.defineProperty(N,"SUPPORT_FOREIGNOBJECT_DRAWING",{value:e}),e},get SUPPORT_CORS_IMAGES(){var e=go();return Object.defineProperty(N,"SUPPORT_CORS_IMAGES",{value:e}),e},get SUPPORT_RESPONSE_TYPE(){var e=wo();return Object.defineProperty(N,"SUPPORT_RESPONSE_TYPE",{value:e}),e},get SUPPORT_CORS_XHR(){var e="withCredentials"in new XMLHttpRequest;return Object.defineProperty(N,"SUPPORT_CORS_XHR",{value:e}),e},get SUPPORT_NATIVE_TEXT_SEGMENTATION(){var e=!!(typeof Intl<"u"&&Intl.Segmenter);return Object.defineProperty(N,"SUPPORT_NATIVE_TEXT_SEGMENTATION",{value:e}),e}},ie=function(){function e(A,t){this.text=A,this.bounds=t}return e}(),lo=function(e,A,t,r){var B=Uo(A,t),n=[],s=0;return B.forEach(function(i){if(t.textDecorationLine.length||i.trim().length>0)if(N.SUPPORT_RANGE_BOUNDS){var a=_t(r,s,i.length).getClientRects();if(a.length>1){var o=rt(i),Q=0;o.forEach(function(w){n.push(new ie(w,cA.fromDOMRectList(e,_t(r,Q+s,w.length).getClientRects()))),Q+=w.length})}else n.push(new ie(i,cA.fromDOMRectList(e,a)))}else{var g=r.splitText(i.length);n.push(new ie(i,Co(e,r))),r=g}else N.SUPPORT_RANGE_BOUNDS||(r=r.splitText(i.length));s+=i.length}),n},Co=function(e,A){var t=A.ownerDocument;if(t){var r=t.createElement("html2canvaswrapper");r.appendChild(A.cloneNode(!0));var B=A.parentNode;if(B){B.replaceChild(r,A);var n=ze(e,r);return r.firstChild&&B.replaceChild(r.firstChild,r),n}}return cA.EMPTY},_t=function(e,A,t){var r=e.ownerDocument;if(!r)throw new Error("Node has no owner document");var B=r.createRange();return B.setStart(e,A),B.setEnd(e,A+t),B},rt=function(e){if(N.SUPPORT_NATIVE_TEXT_SEGMENTATION){var A=new Intl.Segmenter(void 0,{granularity:"grapheme"});return Array.from(A.segment(e)).map(function(t){return t.segment})}return io(e)},fo=function(e,A){if(N.SUPPORT_NATIVE_TEXT_SEGMENTATION){var t=new Intl.Segmenter(void 0,{granularity:"word"});return Array.from(t.segment(e)).map(function(r){return r.segment})}return ho(e,A)},Uo=function(e,A){return A.letterSpacing!==0?rt(e):fo(e,A)},Fo=[32,160,4961,65792,65793,4153,4241],ho=function(e,A){for(var t=Yn(e,{lineBreak:A.lineBreak,wordBreak:A.overflowWrap==="break-word"?"break-word":A.wordBreak}),r=[],B,n=function(){if(B.value){var s=B.value.slice(),i=$e(s),a="";i.forEach(function(o){Fo.indexOf(o)===-1?a+=S(o):(a.length&&r.push(a),r.push(S(o)),a="")}),a.length&&r.push(a)}};!(B=t.next()).done;)n();return r},Eo=function(){function e(A,t,r){this.text=Ho(t.data,r.textTransform),this.textBounds=lo(A,this.text,r,t)}return e}(),Ho=function(e,A){switch(A){case 1:return e.toLowerCase();case 3:return e.replace(po,Io);case 2:return e.toUpperCase();default:return e}},po=/(^|\s|:|-|\(|\))([a-z])/g,Io=function(e,A,t){return e.length>0?A+t.toUpperCase():e},GB=function(e){tA(A,e);function A(t,r){var B=e.call(this,t,r)||this;return B.src=r.currentSrc||r.src,B.intrinsicWidth=r.naturalWidth,B.intrinsicHeight=r.naturalHeight,B.context.cache.addImage(B.src),B}return A}(aA),RB=function(e){tA(A,e);function A(t,r){var B=e.call(this,t,r)||this;return B.canvas=r,B.intrinsicWidth=r.width,B.intrinsicHeight=r.height,B}return A}(aA),VB=function(e){tA(A,e);function A(t,r){var B=e.call(this,t,r)||this,n=new XMLSerializer,s=ze(t,r);return r.setAttribute("width",s.width+"px"),r.setAttribute("height",s.height+"px"),B.svg="data:image/svg+xml,"+encodeURIComponent(n.serializeToString(r)),B.intrinsicWidth=r.width.baseVal.value,B.intrinsicHeight=r.height.baseVal.value,B.context.cache.addImage(B.svg),B}return A}(aA),NB=function(e){tA(A,e);function A(t,r){var B=e.call(this,t,r)||this;return B.value=r.value,B}return A}(aA),kr=function(e){tA(A,e);function A(t,r){var B=e.call(this,t,r)||this;return B.start=r.start,B.reversed=typeof r.reversed=="boolean"&&r.reversed===!0,B}return A}(aA),vo=[{type:15,flags:0,unit:"px",number:3}],yo=[{type:16,flags:0,number:50}],Ko=function(e){return e.width>e.height?new cA(e.left+(e.width-e.height)/2,e.top,e.height,e.height):e.width<e.height?new cA(e.left,e.top+(e.height-e.width)/2,e.width,e.width):e},mo=function(e){var A=e.type===Lo?new Array(e.value.length+1).join("•"):e.value;return A.length===0?e.placeholder||"":A},Je="checkbox",ke="radio",Lo="password",Pt=707406591,tt=function(e){tA(A,e);function A(t,r){var B=e.call(this,t,r)||this;switch(B.type=r.type.toLowerCase(),B.checked=r.checked,B.value=mo(r),(B.type===Je||B.type===ke)&&(B.styles.backgroundColor=3739148031,B.styles.borderTopColor=B.styles.borderRightColor=B.styles.borderBottomColor=B.styles.borderLeftColor=2779096575,B.styles.borderTopWidth=B.styles.borderRightWidth=B.styles.borderBottomWidth=B.styles.borderLeftWidth=1,B.styles.borderTopStyle=B.styles.borderRightStyle=B.styles.borderBottomStyle=B.styles.borderLeftStyle=1,B.styles.backgroundClip=[0],B.styles.backgroundOrigin=[0],B.bounds=Ko(B.bounds)),B.type){case Je:B.styles.borderTopRightRadius=B.styles.borderTopLeftRadius=B.styles.borderBottomRightRadius=B.styles.borderBottomLeftRadius=vo;break;case ke:B.styles.borderTopRightRadius=B.styles.borderTopLeftRadius=B.styles.borderBottomRightRadius=B.styles.borderBottomLeftRadius=yo;break}return B}return A}(aA),XB=function(e){tA(A,e);function A(t,r){var B=e.call(this,t,r)||this,n=r.options[r.selectedIndex||0];return B.value=n&&n.text||"",B}return A}(aA),_B=function(e){tA(A,e);function A(t,r){var B=e.call(this,t,r)||this;return B.value=r.value,B}return A}(aA),PB=function(e){tA(A,e);function A(t,r){var B=e.call(this,t,r)||this;B.src=r.src,B.width=parseInt(r.width,10)||0,B.height=parseInt(r.height,10)||0,B.backgroundColor=B.styles.backgroundColor;try{if(r.contentWindow&&r.contentWindow.document&&r.contentWindow.document.documentElement){B.tree=kB(t,r.contentWindow.document.documentElement);var n=r.contentWindow.document.documentElement?se(t,getComputedStyle(r.contentWindow.document.documentElement).backgroundColor):wA.TRANSPARENT,s=r.contentWindow.document.body?se(t,getComputedStyle(r.contentWindow.document.body).backgroundColor):wA.TRANSPARENT;B.backgroundColor=HA(n)?HA(s)?B.styles.backgroundColor:s:n}}catch{}return B}return A}(aA),Do=["OL","UL","MENU"],Ve=function(e,A,t,r){for(var B=A.firstChild,n=void 0;B;B=n)if(n=B.nextSibling,YB(B)&&B.data.trim().length>0)t.textNodes.push(new Eo(e,B,t.styles));else if(_A(B))if(jB(B)&&B.assignedNodes)B.assignedNodes().forEach(function(i){return Ve(e,i,t,r)});else{var s=JB(e,B);s.styles.isVisible()&&(bo(B,s,r)?s.flags|=4:xo(s.styles)&&(s.flags|=2),Do.indexOf(B.tagName)!==-1&&(s.flags|=8),t.elements.push(s),B.slot,B.shadowRoot?Ve(e,B.shadowRoot,s,r):!Ye(B)&&!WB(B)&&!We(B)&&Ve(e,B,s,r))}},JB=function(e,A){return Wr(A)?new GB(e,A):ZB(A)?new RB(e,A):WB(A)?new VB(e,A):To(A)?new NB(e,A):So(A)?new kr(e,A):Oo(A)?new tt(e,A):We(A)?new XB(e,A):Ye(A)?new _B(e,A):qB(A)?new PB(e,A):new aA(e,A)},kB=function(e,A){var t=JB(e,A);return t.flags|=4,Ve(e,A,t,t),t},bo=function(e,A,t){return A.styles.isPositionedWithZIndex()||A.styles.opacity<1||A.styles.isTransformed()||Bt(e)&&t.styles.isTransparent()},xo=function(e){return e.isPositioned()||e.isFloating()},YB=function(e){return e.nodeType===Node.TEXT_NODE},_A=function(e){return e.nodeType===Node.ELEMENT_NODE},Yr=function(e){return _A(e)&&typeof e.style<"u"&&!Ne(e)},Ne=function(e){return typeof e.className=="object"},To=function(e){return e.tagName==="LI"},So=function(e){return e.tagName==="OL"},Oo=function(e){return e.tagName==="INPUT"},Mo=function(e){return e.tagName==="HTML"},WB=function(e){return e.tagName==="svg"},Bt=function(e){return e.tagName==="BODY"},ZB=function(e){return e.tagName==="CANVAS"},Jt=function(e){return e.tagName==="VIDEO"},Wr=function(e){return e.tagName==="IMG"},qB=function(e){return e.tagName==="IFRAME"},kt=function(e){return e.tagName==="STYLE"},Go=function(e){return e.tagName==="SCRIPT"},Ye=function(e){return e.tagName==="TEXTAREA"},We=function(e){return e.tagName==="SELECT"},jB=function(e){return e.tagName==="SLOT"},Yt=function(e){return e.tagName.indexOf("-")>0},Ro=function(){function e(){this.counters={}}return e.prototype.getCounterValue=function(A){var t=this.counters[A];return t&&t.length?t[t.length-1]:1},e.prototype.getCounterValues=function(A){var t=this.counters[A];return t||[]},e.prototype.pop=function(A){var t=this;A.forEach(function(r){return t.counters[r].pop()})},e.prototype.parse=function(A){var t=this,r=A.counterIncrement,B=A.counterReset,n=!0;r!==null&&r.forEach(function(i){var a=t.counters[i.counter];a&&i.increment!==0&&(n=!1,a.length||a.push(1),a[Math.max(0,a.length-1)]+=i.increment)});var s=[];return n&&B.forEach(function(i){var a=t.counters[i.counter];s.push(i.counter),a||(a=t.counters[i.counter]=[]),a.push(i.reset)}),s},e}(),Wt={integers:[1e3,900,500,400,100,90,50,40,10,9,5,4,1],values:["M","CM","D","CD","C","XC","L","XL","X","IX","V","IV","I"]},Zt={integers:[9e3,8e3,7e3,6e3,5e3,4e3,3e3,2e3,1e3,900,800,700,600,500,400,300,200,100,90,80,70,60,50,40,30,20,10,9,8,7,6,5,4,3,2,1],values:["Ք","Փ","Ւ","Ց","Ր","Տ","Վ","Ս","Ռ","Ջ","Պ","Չ","Ո","Շ","Ն","Յ","Մ","Ճ","Ղ","Ձ","Հ","Կ","Ծ","Խ","Լ","Ի","Ժ","Թ","Ը","Է","Զ","Ե","Դ","Գ","Բ","Ա"]},Vo={integers:[1e4,9e3,8e3,7e3,6e3,5e3,4e3,3e3,2e3,1e3,400,300,200,100,90,80,70,60,50,40,30,20,19,18,17,16,15,10,9,8,7,6,5,4,3,2,1],values:["י׳","ט׳","ח׳","ז׳","ו׳","ה׳","ד׳","ג׳","ב׳","א׳","ת","ש","ר","ק","צ","פ","ע","ס","נ","מ","ל","כ","יט","יח","יז","טז","טו","י","ט","ח","ז","ו","ה","ד","ג","ב","א"]},No={integers:[1e4,9e3,8e3,7e3,6e3,5e3,4e3,3e3,2e3,1e3,900,800,700,600,500,400,300,200,100,90,80,70,60,50,40,30,20,10,9,8,7,6,5,4,3,2,1],values:["ჵ","ჰ","ჯ","ჴ","ხ","ჭ","წ","ძ","ც","ჩ","შ","ყ","ღ","ქ","ფ","ჳ","ტ","ს","რ","ჟ","პ","ო","ჲ","ნ","მ","ლ","კ","ი","თ","ჱ","ზ","ვ","ე","დ","გ","ბ","ა"]},GA=function(e,A,t,r,B,n){return e<A||e>t?we(e,B,n.length>0):r.integers.reduce(function(s,i,a){for(;e>=i;)e-=i,s+=r.values[a];return s},"")+n},zB=function(e,A,t,r){var B="";do t||e--,B=r(e)+B,e/=A;while(e*A>=A);return B},T=function(e,A,t,r,B){var n=t-A+1;return(e<0?"-":"")+(zB(Math.abs(e),n,r,function(s){return S(Math.floor(s%n)+A)})+B)},mA=function(e,A,t){t===void 0&&(t=". ");var r=A.length;return zB(Math.abs(e),r,!1,function(B){return A[Math.floor(B%r)]})+t},NA=1<<0,CA=1<<1,fA=1<<2,Be=1<<3,gA=function(e,A,t,r,B,n){if(e<-9999||e>9999)return we(e,4,B.length>0);var s=Math.abs(e),i=B;if(s===0)return A[0]+i;for(var a=0;s>0&&a<=4;a++){var o=s%10;o===0&&G(n,NA)&&i!==""?i=A[o]+i:o>1||o===1&&a===0||o===1&&a===1&&G(n,CA)||o===1&&a===1&&G(n,fA)&&e>100||o===1&&a>1&&G(n,Be)?i=A[o]+(a>0?t[a-1]:"")+i:o===1&&a>0&&(i=t[a-1]+i),s=Math.floor(s/10)}return(e<0?r:"")+i},qt="十百千萬",jt="拾佰仟萬",zt="マイナス",pr="마이너스",we=function(e,A,t){var r=t?". ":"",B=t?"、":"",n=t?", ":"",s=t?" ":"";switch(A){case 0:return"•"+s;case 1:return"◦"+s;case 2:return"◾"+s;case 5:var i=T(e,48,57,!0,r);return i.length<4?"0"+i:i;case 4:return mA(e,"〇一二三四五六七八九",B);case 6:return GA(e,1,3999,Wt,3,r).toLowerCase();case 7:return GA(e,1,3999,Wt,3,r);case 8:return T(e,945,969,!1,r);case 9:return T(e,97,122,!1,r);case 10:return T(e,65,90,!1,r);case 11:return T(e,1632,1641,!0,r);case 12:case 49:return GA(e,1,9999,Zt,3,r);case 35:return GA(e,1,9999,Zt,3,r).toLowerCase();case 13:return T(e,2534,2543,!0,r);case 14:case 30:return T(e,6112,6121,!0,r);case 15:return mA(e,"子丑寅卯辰巳午未申酉戌亥",B);case 16:return mA(e,"甲乙丙丁戊己庚辛壬癸",B);case 17:case 48:return gA(e,"零一二三四五六七八九",qt,"負",B,CA|fA|Be);case 47:return gA(e,"零壹貳參肆伍陸柒捌玖",jt,"負",B,NA|CA|fA|Be);case 42:return gA(e,"零一二三四五六七八九",qt,"负",B,CA|fA|Be);case 41:return gA(e,"零壹贰叁肆伍陆柒捌玖",jt,"负",B,NA|CA|fA|Be);case 26:return gA(e,"〇一二三四五六七八九","十百千万",zt,B,0);case 25:return gA(e,"零壱弐参四伍六七八九","拾百千万",zt,B,NA|CA|fA);case 31:return gA(e,"영일이삼사오육칠팔구","십백천만",pr,n,NA|CA|fA);case 33:return gA(e,"零一二三四五六七八九","十百千萬",pr,n,0);case 32:return gA(e,"零壹貳參四五六七八九","拾百千",pr,n,NA|CA|fA);case 18:return T(e,2406,2415,!0,r);case 20:return GA(e,1,19999,No,3,r);case 21:return T(e,2790,2799,!0,r);case 22:return T(e,2662,2671,!0,r);case 22:return GA(e,1,10999,Vo,3,r);case 23:return mA(e,"あいうえおかきくけこさしすせそたちつてとなにぬねのはひふへほまみむめもやゆよらりるれろわゐゑをん");case 24:return mA(e,"いろはにほへとちりぬるをわかよたれそつねならむうゐのおくやまけふこえてあさきゆめみしゑひもせす");case 27:return T(e,3302,3311,!0,r);case 28:return mA(e,"アイウエオカキクケコサシスセソタチツテトナニヌネノハヒフヘホマミムメモヤユヨラリルレロワヰヱヲン",B);case 29:return mA(e,"イロハニホヘトチリヌルヲワカヨタレソツネナラムウヰノオクヤマケフコエテアサキユメミシヱヒモセス",B);case 34:return T(e,3792,3801,!0,r);case 37:return T(e,6160,6169,!0,r);case 38:return T(e,4160,4169,!0,r);case 39:return T(e,2918,2927,!0,r);case 40:return T(e,1776,1785,!0,r);case 43:return T(e,3046,3055,!0,r);case 44:return T(e,3174,3183,!0,r);case 45:return T(e,3664,3673,!0,r);case 46:return T(e,3872,3881,!0,r);case 3:default:return T(e,48,57,!0,r)}},$B="data-html2canvas-ignore",$t=function(){function e(A,t,r){if(this.context=A,this.options=r,this.scrolledElements=[],this.referenceElement=t,this.counters=new Ro,this.quoteDepth=0,!t.ownerDocument)throw new Error("Cloned element does not have an owner document");this.documentElement=this.cloneNode(t.ownerDocument.documentElement,!1)}return e.prototype.toIFrame=function(A,t){var r=this,B=Xo(A,t);if(!B.contentWindow)return Promise.reject("Unable to find iframe window");var n=A.defaultView.pageXOffset,s=A.defaultView.pageYOffset,i=B.contentWindow,a=i.document,o=Jo(B).then(function(){return J(r,void 0,void 0,function(){var Q,g;return _(this,function(w){switch(w.label){case 0:return this.scrolledElements.forEach(Zo),i&&(i.scrollTo(t.left,t.top),/(iPad|iPhone|iPod)/g.test(navigator.userAgent)&&(i.scrollY!==t.top||i.scrollX!==t.left)&&(this.context.logger.warn("Unable to restore scroll position for cloned document"),this.context.windowBounds=this.context.windowBounds.add(i.scrollX-t.left,i.scrollY-t.top,0,0))),Q=this.options.onclone,g=this.clonedReferenceElement,typeof g>"u"?[2,Promise.reject("Error finding the "+this.referenceElement.nodeName+" in the cloned document")]:a.fonts&&a.fonts.ready?[4,a.fonts.ready]:[3,2];case 1:w.sent(),w.label=2;case 2:return/(AppleWebKit)/g.test(navigator.userAgent)?[4,Po(a)]:[3,4];case 3:w.sent(),w.label=4;case 4:return typeof Q=="function"?[2,Promise.resolve().then(function(){return Q(a,g)}).then(function(){return B})]:[2,B]}})})});return a.open(),a.write(Yo(document.doctype)+"<html></html>"),Wo(this.referenceElement.ownerDocument,n,s),a.replaceChild(a.adoptNode(this.documentElement),a.documentElement),a.close(),o},e.prototype.createElementClone=function(A){if(Pr(A,2))debugger;if(ZB(A))return this.createCanvasClone(A);if(Jt(A))return this.createVideoClone(A);if(kt(A))return this.createStyleClone(A);var t=A.cloneNode(!1);return Wr(t)&&(Wr(A)&&A.currentSrc&&A.currentSrc!==A.src&&(t.src=A.currentSrc,t.srcset=""),t.loading==="lazy"&&(t.loading="eager")),Yt(t)?this.createCustomElementClone(t):t},e.prototype.createCustomElementClone=function(A){var t=document.createElement("html2canvascustomelement");return Ir(A.style,t),t},e.prototype.createStyleClone=function(A){try{var t=A.sheet;if(t&&t.cssRules){var r=[].slice.call(t.cssRules,0).reduce(function(n,s){return s&&typeof s.cssText=="string"?n+s.cssText:n},""),B=A.cloneNode(!1);return B.textContent=r,B}}catch(n){if(this.context.logger.error("Unable to access cssRules property",n),n.name!=="SecurityError")throw n}return A.cloneNode(!1)},e.prototype.createCanvasClone=function(A){var t;if(this.options.inlineImages&&A.ownerDocument){var r=A.ownerDocument.createElement("img");try{return r.src=A.toDataURL(),r}catch{this.context.logger.info("Unable to inline canvas contents, canvas is tainted",A)}}var B=A.cloneNode(!1);try{B.width=A.width,B.height=A.height;var n=A.getContext("2d"),s=B.getContext("2d");if(s)if(!this.options.allowTaint&&n)s.putImageData(n.getImageData(0,0,A.width,A.height),0,0);else{var i=(t=A.getContext("webgl2"))!==null&&t!==void 0?t:A.getContext("webgl");if(i){var a=i.getContextAttributes();(a==null?void 0:a.preserveDrawingBuffer)===!1&&this.context.logger.warn("Unable to clone WebGL context as it has preserveDrawingBuffer=false",A)}s.drawImage(A,0,0)}return B}catch{this.context.logger.info("Unable to clone canvas as it is tainted",A)}return B},e.prototype.createVideoClone=function(A){var t=A.ownerDocument.createElement("canvas");t.width=A.offsetWidth,t.height=A.offsetHeight;var r=t.getContext("2d");try{return r&&(r.drawImage(A,0,0,t.width,t.height),this.options.allowTaint||r.getImageData(0,0,t.width,t.height)),t}catch{this.context.logger.info("Unable to clone video as it is tainted",A)}var B=A.ownerDocument.createElement("canvas");return B.width=A.offsetWidth,B.height=A.offsetHeight,B},e.prototype.appendChildNode=function(A,t,r){(!_A(t)||!Go(t)&&!t.hasAttribute($B)&&(typeof this.options.ignoreElements!="function"||!this.options.ignoreElements(t)))&&(!this.options.copyStyles||!_A(t)||!kt(t))&&A.appendChild(this.cloneNode(t,r))},e.prototype.cloneChildNodes=function(A,t,r){for(var B=this,n=A.shadowRoot?A.shadowRoot.firstChild:A.firstChild;n;n=n.nextSibling)if(_A(n)&&jB(n)&&typeof n.assignedNodes=="function"){var s=n.assignedNodes();s.length&&s.forEach(function(i){return B.appendChildNode(t,i,r)})}else this.appendChildNode(t,n,r)},e.prototype.cloneNode=function(A,t){if(YB(A))return document.createTextNode(A.data);if(!A.ownerDocument)return A.cloneNode(!1);var r=A.ownerDocument.defaultView;if(r&&_A(A)&&(Yr(A)||Ne(A))){var B=this.createElementClone(A);B.style.transitionProperty="none";var n=r.getComputedStyle(A),s=r.getComputedStyle(A,":before"),i=r.getComputedStyle(A,":after");this.referenceElement===A&&Yr(B)&&(this.clonedReferenceElement=B),Bt(B)&&zo(B);var a=this.counters.parse(new Lt(this.context,n)),o=this.resolvePseudoContent(A,B,s,oe.BEFORE);Yt(A)&&(t=!0),Jt(A)||this.cloneChildNodes(A,B,t),o&&B.insertBefore(o,B.firstChild);var Q=this.resolvePseudoContent(A,B,i,oe.AFTER);return Q&&B.appendChild(Q),this.counters.pop(a),(n&&(this.options.copyStyles||Ne(A))&&!qB(A)||t)&&Ir(n,B),(A.scrollTop!==0||A.scrollLeft!==0)&&this.scrolledElements.push([B,A.scrollLeft,A.scrollTop]),(Ye(A)||We(A))&&(Ye(B)||We(B))&&(B.value=A.value),B}return A.cloneNode(!1)},e.prototype.resolvePseudoContent=function(A,t,r,B){var n=this;if(r){var s=r.content,i=t.ownerDocument;if(!(!i||!s||s==="none"||s==="-moz-alt-content"||r.display==="none")){this.counters.parse(new Lt(this.context,r));var a=new xi(this.context,r),o=i.createElement("html2canvaspseudoelement");Ir(r,o),a.content.forEach(function(g){if(g.type===0)o.appendChild(i.createTextNode(g.value));else if(g.type===22){var w=i.createElement("img");w.src=g.value,w.style.opacity="1",o.appendChild(w)}else if(g.type===18){if(g.name==="attr"){var f=g.values.filter(D);f.length&&o.appendChild(i.createTextNode(A.getAttribute(f[0].value)||""))}else if(g.name==="counter"){var c=g.values.filter(JA),u=c[0],H=c[1];if(u&&D(u)){var h=n.counters.getCounterValue(u.value),F=H&&D(H)?_r.parse(n.context,H.value):3;o.appendChild(i.createTextNode(we(h,F,!1)))}}else if(g.name==="counters"){var K=g.values.filter(JA),u=K[0],p=K[1],H=K[2];if(u&&D(u)){var d=n.counters.getCounterValues(u.value),C=H&&D(H)?_r.parse(n.context,H.value):3,v=p&&p.type===0?p.value:"",y=d.map(function(k){return we(k,C,!1)}).join(v);o.appendChild(i.createTextNode(y))}}}else if(g.type===20)switch(g.value){case"open-quote":o.appendChild(i.createTextNode(mt(a.quotes,n.quoteDepth++,!0)));break;case"close-quote":o.appendChild(i.createTextNode(mt(a.quotes,--n.quoteDepth,!1)));break;default:o.appendChild(i.createTextNode(g.value))}}),o.className=Zr+" "+qr;var Q=B===oe.BEFORE?" "+Zr:" "+qr;return Ne(t)?t.className.baseValue+=Q:t.className+=Q,o}}},e.destroy=function(A){return A.parentNode?(A.parentNode.removeChild(A),!0):!1},e}(),oe;(function(e){e[e.BEFORE=0]="BEFORE",e[e.AFTER=1]="AFTER"})(oe||(oe={}));var Xo=function(e,A){var t=e.createElement("iframe");return t.className="html2canvas-container",t.style.visibility="hidden",t.style.position="fixed",t.style.left="-10000px",t.style.top="0px",t.style.border="0",t.width=A.width.toString(),t.height=A.height.toString(),t.scrolling="no",t.setAttribute($B,"true"),e.body.appendChild(t),t},_o=function(e){return new Promise(function(A){if(e.complete){A();return}if(!e.src){A();return}e.onload=A,e.onerror=A})},Po=function(e){return Promise.all([].slice.call(e.images,0).map(_o))},Jo=function(e){return new Promise(function(A,t){var r=e.contentWindow;if(!r)return t("No window assigned for iframe");var B=r.document;r.onload=e.onload=function(){r.onload=e.onload=null;var n=setInterval(function(){B.body.childNodes.length>0&&B.readyState==="complete"&&(clearInterval(n),A(e))},50)}})},ko=["all","d","content"],Ir=function(e,A){for(var t=e.length-1;t>=0;t--){var r=e.item(t);ko.indexOf(r)===-1&&A.style.setProperty(r,e.getPropertyValue(r))}return A},Yo=function(e){var A="";return e&&(A+="<!DOCTYPE ",e.name&&(A+=e.name),e.internalSubset&&(A+=e.internalSubset),e.publicId&&(A+='"'+e.publicId+'"'),e.systemId&&(A+='"'+e.systemId+'"'),A+=">"),A},Wo=function(e,A,t){e&&e.defaultView&&(A!==e.defaultView.pageXOffset||t!==e.defaultView.pageYOffset)&&e.defaultView.scrollTo(A,t)},Zo=function(e){var A=e[0],t=e[1],r=e[2];A.scrollLeft=t,A.scrollTop=r},qo=":before",jo=":after",Zr="___html2canvas___pseudoelement_before",qr="___html2canvas___pseudoelement_after",AB=`{ content: "" !important; display: none !important; }`,zo=function(e){$o(e,"."+Zr+qo+AB+` .`+qr+jo+AB)},$o=function(e,A){var t=e.ownerDocument;if(t){var r=t.createElement("style");r.textContent=A,e.appendChild(r)}},An=function(){function e(){}return e.getOrigin=function(A){var t=e._link;return t?(t.href=A,t.href=t.href,t.protocol+t.hostname+t.port):"about:blank"},e.isSameOrigin=function(A){return e.getOrigin(A)===e._origin},e.setContext=function(A){e._link=A.document.createElement("a"),e._origin=e.getOrigin(A.location.href)},e._origin="about:blank",e}(),AQ=function(){function e(A,t){this.context=A,this._options=t,this._cache={}}return e.prototype.addImage=function(A){var t=Promise.resolve();return this.has(A)||(yr(A)||BQ(A))&&(this._cache[A]=this.loadImage(A)).catch(function(){}),t},e.prototype.match=function(A){return this._cache[A]},e.prototype.loadImage=function(A){return J(this,void 0,void 0,function(){var t,r,B,n,s=this;return _(this,function(i){switch(i.label){case 0:return t=An.isSameOrigin(A),r=!vr(A)&&this._options.useCORS===!0&&N.SUPPORT_CORS_IMAGES&&!t,B=!vr(A)&&!t&&!yr(A)&&typeof this._options.proxy=="string"&&N.SUPPORT_CORS_XHR&&!r,!t&&this._options.allowTaint===!1&&!vr(A)&&!yr(A)&&!B&&!r?[2]:(n=A,B?[4,this.proxy(n)]:[3,2]);case 1:n=i.sent(),i.label=2;case 2:return this.context.logger.debug("Added image "+A.substring(0,256)),[4,new Promise(function(a,o){var Q=new Image;Q.onload=function(){return a(Q)},Q.onerror=o,(nQ(n)||r)&&(Q.crossOrigin="anonymous"),Q.src=n,Q.complete===!0&&setTimeout(function(){return a(Q)},500),s._options.imageTimeout>0&&setTimeout(function(){return o("Timed out ("+s._options.imageTimeout+"ms) loading image")},s._options.imageTimeout)})];case 3:return[2,i.sent()]}})})},e.prototype.has=function(A){return typeof this._cache[A]<"u"},e.prototype.keys=function(){return Promise.resolve(Object.keys(this._cache))},e.prototype.proxy=function(A){var t=this,r=this._options.proxy;if(!r)throw new Error("No proxy defined");var B=A.substring(0,256);return new Promise(function(n,s){var i=N.SUPPORT_RESPONSE_TYPE?"blob":"text",a=new XMLHttpRequest;a.onload=function(){if(a.status===200)if(i==="text")n(a.response);else{var g=new FileReader;g.addEventListener("load",function(){return n(g.result)},!1),g.addEventListener("error",function(w){return s(w)},!1),g.readAsDataURL(a.response)}else s("Failed to proxy resource "+B+" with status code "+a.status)},a.onerror=s;var o=r.indexOf("?")>-1?"&":"?";if(a.open("GET",""+r+o+"url="+encodeURIComponent(A)+"&responseType="+i),i!=="text"&&a instanceof XMLHttpRequest&&(a.responseType=i),t._options.imageTimeout){var Q=t._options.imageTimeout;a.timeout=Q,a.ontimeout=function(){return s("Timed out ("+Q+"ms) proxying "+B)}}a.send()})},e}(),eQ=/^data:image\/svg\+xml/i,rQ=/^data:image\/.*;base64,/i,tQ=/^data:image\/.*/i,BQ=function(e){return N.SUPPORT_SVG_DRAWING||!sQ(e)},vr=function(e){return tQ.test(e)},nQ=function(e){return rQ.test(e)},yr=function(e){return e.substr(0,4)==="blob"},sQ=function(e){return e.substr(-3).toLowerCase()==="svg"||eQ.test(e)},l=function(){function e(A,t){this.type=0,this.x=A,this.y=t}return e.prototype.add=function(A,t){return new e(this.x+A,this.y+t)},e}(),RA=function(e,A,t){return new l(e.x+(A.x-e.x)*t,e.y+(A.y-e.y)*t)},Te=function(){function e(A,t,r,B){this.type=1,this.start=A,this.startControl=t,this.endControl=r,this.end=B}return e.prototype.subdivide=function(A,t){var r=RA(this.start,this.startControl,A),B=RA(this.startControl,this.endControl,A),n=RA(this.endControl,this.end,A),s=RA(r,B,A),i=RA(B,n,A),a=RA(s,i,A);return t?new e(this.start,r,s,a):new e(a,i,n,this.end)},e.prototype.add=function(A,t){return new e(this.start.add(A,t),this.startControl.add(A,t),this.endControl.add(A,t),this.end.add(A,t))},e.prototype.reverse=function(){return new e(this.end,this.endControl,this.startControl,this.start)},e}(),$=function(e){return e.type===1},aQ=function(){function e(A){var t=A.styles,r=A.bounds,B=re(t.borderTopLeftRadius,r.width,r.height),n=B[0],s=B[1],i=re(t.borderTopRightRadius,r.width,r.height),a=i[0],o=i[1],Q=re(t.borderBottomRightRadius,r.width,r.height),g=Q[0],w=Q[1],f=re(t.borderBottomLeftRadius,r.width,r.height),c=f[0],u=f[1],H=[];H.push((n+a)/r.width),H.push((c+g)/r.width),H.push((s+u)/r.height),H.push((o+w)/r.height);var h=Math.max.apply(Math,H);h>1&&(n/=h,s/=h,a/=h,o/=h,g/=h,w/=h,c/=h,u/=h);var F=r.width-a,K=r.height-w,p=r.width-g,d=r.height-u,C=t.borderTopWidth,v=t.borderRightWidth,y=t.borderBottomWidth,E=t.borderLeftWidth,O=b(t.paddingTop,A.bounds.width),k=b(t.paddingRight,A.bounds.width),q=b(t.paddingBottom,A.bounds.width),L=b(t.paddingLeft,A.bounds.width);this.topLeftBorderDoubleOuterBox=n>0||s>0?x(r.left+E/3,r.top+C/3,n-E/3,s-C/3,m.TOP_LEFT):new l(r.left+E/3,r.top+C/3),this.topRightBorderDoubleOuterBox=n>0||s>0?x(r.left+F,r.top+C/3,a-v/3,o-C/3,m.TOP_RIGHT):new l(r.left+r.width-v/3,r.top+C/3),this.bottomRightBorderDoubleOuterBox=g>0||w>0?x(r.left+p,r.top+K,g-v/3,w-y/3,m.BOTTOM_RIGHT):new l(r.left+r.width-v/3,r.top+r.height-y/3),this.bottomLeftBorderDoubleOuterBox=c>0||u>0?x(r.left+E/3,r.top+d,c-E/3,u-y/3,m.BOTTOM_LEFT):new l(r.left+E/3,r.top+r.height-y/3),this.topLeftBorderDoubleInnerBox=n>0||s>0?x(r.left+E*2/3,r.top+C*2/3,n-E*2/3,s-C*2/3,m.TOP_LEFT):new l(r.left+E*2/3,r.top+C*2/3),this.topRightBorderDoubleInnerBox=n>0||s>0?x(r.left+F,r.top+C*2/3,a-v*2/3,o-C*2/3,m.TOP_RIGHT):new l(r.left+r.width-v*2/3,r.top+C*2/3),this.bottomRightBorderDoubleInnerBox=g>0||w>0?x(r.left+p,r.top+K,g-v*2/3,w-y*2/3,m.BOTTOM_RIGHT):new l(r.left+r.width-v*2/3,r.top+r.height-y*2/3),this.bottomLeftBorderDoubleInnerBox=c>0||u>0?x(r.left+E*2/3,r.top+d,c-E*2/3,u-y*2/3,m.BOTTOM_LEFT):new l(r.left+E*2/3,r.top+r.height-y*2/3),this.topLeftBorderStroke=n>0||s>0?x(r.left+E/2,r.top+C/2,n-E/2,s-C/2,m.TOP_LEFT):new l(r.left+E/2,r.top+C/2),this.topRightBorderStroke=n>0||s>0?x(r.left+F,r.top+C/2,a-v/2,o-C/2,m.TOP_RIGHT):new l(r.left+r.width-v/2,r.top+C/2),this.bottomRightBorderStroke=g>0||w>0?x(r.left+p,r.top+K,g-v/2,w-y/2,m.BOTTOM_RIGHT):new l(r.left+r.width-v/2,r.top+r.height-y/2),this.bottomLeftBorderStroke=c>0||u>0?x(r.left+E/2,r.top+d,c-E/2,u-y/2,m.BOTTOM_LEFT):new l(r.left+E/2,r.top+r.height-y/2),this.topLeftBorderBox=n>0||s>0?x(r.left,r.top,n,s,m.TOP_LEFT):new l(r.left,r.top),this.topRightBorderBox=a>0||o>0?x(r.left+F,r.top,a,o,m.TOP_RIGHT):new l(r.left+r.width,r.top),this.bottomRightBorderBox=g>0||w>0?x(r.left+p,r.top+K,g,w,m.BOTTOM_RIGHT):new l(r.left+r.width,r.top+r.height),this.bottomLeftBorderBox=c>0||u>0?x(r.left,r.top+d,c,u,m.BOTTOM_LEFT):new l(r.left,r.top+r.height),this.topLeftPaddingBox=n>0||s>0?x(r.left+E,r.top+C,Math.max(0,n-E),Math.max(0,s-C),m.TOP_LEFT):new l(r.left+E,r.top+C),this.topRightPaddingBox=a>0||o>0?x(r.left+Math.min(F,r.width-v),r.top+C,F>r.width+v?0:Math.max(0,a-v),Math.max(0,o-C),m.TOP_RIGHT):new l(r.left+r.width-v,r.top+C),this.bottomRightPaddingBox=g>0||w>0?x(r.left+Math.min(p,r.width-E),r.top+Math.min(K,r.height-y),Math.max(0,g-v),Math.max(0,w-y),m.BOTTOM_RIGHT):new l(r.left+r.width-v,r.top+r.height-y),this.bottomLeftPaddingBox=c>0||u>0?x(r.left+E,r.top+Math.min(d,r.height-y),Math.max(0,c-E),Math.max(0,u-y),m.BOTTOM_LEFT):new l(r.left+E,r.top+r.height-y),this.topLeftContentBox=n>0||s>0?x(r.left+E+L,r.top+C+O,Math.max(0,n-(E+L)),Math.max(0,s-(C+O)),m.TOP_LEFT):new l(r.left+E+L,r.top+C+O),this.topRightContentBox=a>0||o>0?x(r.left+Math.min(F,r.width+E+L),r.top+C+O,F>r.width+E+L?0:a-E+L,o-(C+O),m.TOP_RIGHT):new l(r.left+r.width-(v+k),r.top+C+O),this.bottomRightContentBox=g>0||w>0?x(r.left+Math.min(p,r.width-(E+L)),r.top+Math.min(K,r.height+C+O),Math.max(0,g-(v+k)),w-(y+q),m.BOTTOM_RIGHT):new l(r.left+r.width-(v+k),r.top+r.height-(y+q)),this.bottomLeftContentBox=c>0||u>0?x(r.left+E+L,r.top+d,Math.max(0,c-(E+L)),u-(y+q),m.BOTTOM_LEFT):new l(r.left+E+L,r.top+r.height-(y+q))}return e}(),m;(function(e){e[e.TOP_LEFT=0]="TOP_LEFT",e[e.TOP_RIGHT=1]="TOP_RIGHT",e[e.BOTTOM_RIGHT=2]="BOTTOM_RIGHT",e[e.BOTTOM_LEFT=3]="BOTTOM_LEFT"})(m||(m={}));var x=function(e,A,t,r,B){var n=4*((Math.sqrt(2)-1)/3),s=t*n,i=r*n,a=e+t,o=A+r;switch(B){case m.TOP_LEFT:return new Te(new l(e,o),new l(e,o-i),new l(a-s,A),new l(a,A));case m.TOP_RIGHT:return new Te(new l(e,A),new l(e+s,A),new l(a,o-i),new l(a,o));case m.BOTTOM_RIGHT:return new Te(new l(a,A),new l(a,A+i),new l(e+s,o),new l(e,o));case m.BOTTOM_LEFT:default:return new Te(new l(a,o),new l(a-s,o),new l(e,A+i),new l(e,A))}},Ze=function(e){return[e.topLeftBorderBox,e.topRightBorderBox,e.bottomRightBorderBox,e.bottomLeftBorderBox]},iQ=function(e){return[e.topLeftContentBox,e.topRightContentBox,e.bottomRightContentBox,e.bottomLeftContentBox]},qe=function(e){return[e.topLeftPaddingBox,e.topRightPaddingBox,e.bottomRightPaddingBox,e.bottomLeftPaddingBox]},oQ=function(){function e(A,t,r){this.offsetX=A,this.offsetY=t,this.matrix=r,this.type=0,this.target=6}return e}(),Se=function(){function e(A,t){this.path=A,this.target=t,this.type=1}return e}(),QQ=function(){function e(A){this.opacity=A,this.type=2,this.target=6}return e}(),gQ=function(e){return e.type===0},en=function(e){return e.type===1},wQ=function(e){return e.type===2},eB=function(e,A){return e.length===A.length?e.some(function(t,r){return t===A[r]}):!1},cQ=function(e,A,t,r,B){return e.map(function(n,s){switch(s){case 0:return n.add(A,t);case 1:return n.add(A+r,t);case 2:return n.add(A+r,t+B);case 3:return n.add(A,t+B)}return n})},rn=function(){function e(A){this.element=A,this.inlineLevel=[],this.nonInlineLevel=[],this.negativeZIndex=[],this.zeroOrAutoZIndexOrTransformedOrOpacity=[],this.positiveZIndex=[],this.nonPositionedFloats=[],this.nonPositionedInlineLevel=[]}return e}(),tn=function(){function e(A,t){if(this.container=A,this.parent=t,this.effects=[],this.curves=new aQ(this.container),this.container.styles.opacity<1&&this.effects.push(new QQ(this.container.styles.opacity)),this.container.styles.transform!==null){var r=this.container.bounds.left+this.container.styles.transformOrigin[0].number,B=this.container.bounds.top+this.container.styles.transformOrigin[1].number,n=this.container.styles.transform;this.effects.push(new oQ(r,B,n))}if(this.container.styles.overflowX!==0){var s=Ze(this.curves),i=qe(this.curves);eB(s,i)?this.effects.push(new Se(s,6)):(this.effects.push(new Se(s,2)),this.effects.push(new Se(i,4)))}}return e.prototype.getEffects=function(A){for(var t=[2,3].indexOf(this.container.styles.position)===-1,r=this.parent,B=this.effects.slice(0);r;){var n=r.effects.filter(function(a){return!en(a)});if(t||r.container.styles.position!==0||!r.parent){if(B.unshift.apply(B,n),t=[2,3].indexOf(r.container.styles.position)===-1,r.container.styles.overflowX!==0){var s=Ze(r.curves),i=qe(r.curves);eB(s,i)||B.unshift(new Se(i,6))}}else B.unshift.apply(B,n);r=r.parent}return B.filter(function(a){return G(a.target,A)})},e}(),jr=function(e,A,t,r){e.container.elements.forEach(function(B){var n=G(B.flags,4),s=G(B.flags,2),i=new tn(B,e);G(B.styles.display,2048)&&r.push(i);var a=G(B.flags,8)?[]:r;if(n||s){var o=n||B.styles.isPositioned()?t:A,Q=new rn(i);if(B.styles.isPositioned()||B.styles.opacity<1||B.styles.isTransformed()){var g=B.styles.zIndex.order;if(g<0){var w=0;o.negativeZIndex.some(function(c,u){return g>c.element.container.styles.zIndex.order?(w=u,!1):w>0}),o.negativeZIndex.splice(w,0,Q)}else if(g>0){var f=0;o.positiveZIndex.some(function(c,u){return g>=c.element.container.styles.zIndex.order?(f=u+1,!1):f>0}),o.positiveZIndex.splice(f,0,Q)}else o.zeroOrAutoZIndexOrTransformedOrOpacity.push(Q)}else B.styles.isFloating()?o.nonPositionedFloats.push(Q):o.nonPositionedInlineLevel.push(Q);jr(i,Q,n?Q:t,a)}else B.styles.isInlineLevel()?A.inlineLevel.push(i):A.nonInlineLevel.push(i),jr(i,A,t,a);G(B.flags,8)&&Bn(B,a)})},Bn=function(e,A){for(var t=e instanceof kr?e.start:1,r=e instanceof kr?e.reversed:!1,B=0;B<A.length;B++){var n=A[B];n.container instanceof NB&&typeof n.container.value=="number"&&n.container.value!==0&&(t=n.container.value),n.listValue=we(t,n.container.styles.listStyleType,!0),t+=r?-1:1}},uQ=function(e){var A=new tn(e,null),t=new rn(A),r=[];return jr(A,t,t,r),Bn(A.container,r),t},rB=function(e,A){switch(A){case 0:return eA(e.topLeftBorderBox,e.topLeftPaddingBox,e.topRightBorderBox,e.topRightPaddingBox);case 1:return eA(e.topRightBorderBox,e.topRightPaddingBox,e.bottomRightBorderBox,e.bottomRightPaddingBox);case 2:return eA(e.bottomRightBorderBox,e.bottomRightPaddingBox,e.bottomLeftBorderBox,e.bottomLeftPaddingBox);case 3:default:return eA(e.bottomLeftBorderBox,e.bottomLeftPaddingBox,e.topLeftBorderBox,e.topLeftPaddingBox)}},lQ=function(e,A){switch(A){case 0:return eA(e.topLeftBorderBox,e.topLeftBorderDoubleOuterBox,e.topRightBorderBox,e.topRightBorderDoubleOuterBox);case 1:return eA(e.topRightBorderBox,e.topRightBorderDoubleOuterBox,e.bottomRightBorderBox,e.bottomRightBorderDoubleOuterBox);case 2:return eA(e.bottomRightBorderBox,e.bottomRightBorderDoubleOuterBox,e.bottomLeftBorderBox,e.bottomLeftBorderDoubleOuterBox);case 3:default:return eA(e.bottomLeftBorderBox,e.bottomLeftBorderDoubleOuterBox,e.topLeftBorderBox,e.topLeftBorderDoubleOuterBox)}},CQ=function(e,A){switch(A){case 0:return eA(e.topLeftBorderDoubleInnerBox,e.topLeftPaddingBox,e.topRightBorderDoubleInnerBox,e.topRightPaddingBox);case 1:return eA(e.topRightBorderDoubleInnerBox,e.topRightPaddingBox,e.bottomRightBorderDoubleInnerBox,e.bottomRightPaddingBox);case 2:return eA(e.bottomRightBorderDoubleInnerBox,e.bottomRightPaddingBox,e.bottomLeftBorderDoubleInnerBox,e.bottomLeftPaddingBox);case 3:default:return eA(e.bottomLeftBorderDoubleInnerBox,e.bottomLeftPaddingBox,e.topLeftBorderDoubleInnerBox,e.topLeftPaddingBox)}},fQ=function(e,A){switch(A){case 0:return Oe(e.topLeftBorderStroke,e.topRightBorderStroke);case 1:return Oe(e.topRightBorderStroke,e.bottomRightBorderStroke);case 2:return Oe(e.bottomRightBorderStroke,e.bottomLeftBorderStroke);case 3:default:return Oe(e.bottomLeftBorderStroke,e.topLeftBorderStroke)}},Oe=function(e,A){var t=[];return $(e)?t.push(e.subdivide(.5,!1)):t.push(e),$(A)?t.push(A.subdivide(.5,!0)):t.push(A),t},eA=function(e,A,t,r){var B=[];return $(e)?B.push(e.subdivide(.5,!1)):B.push(e),$(t)?B.push(t.subdivide(.5,!0)):B.push(t),$(r)?B.push(r.subdivide(.5,!0).reverse()):B.push(r),$(A)?B.push(A.subdivide(.5,!1).reverse()):B.push(A),B},nn=function(e){var A=e.bounds,t=e.styles;return A.add(t.borderLeftWidth,t.borderTopWidth,-(t.borderRightWidth+t.borderLeftWidth),-(t.borderTopWidth+t.borderBottomWidth))},je=function(e){var A=e.styles,t=e.bounds,r=b(A.paddingLeft,t.width),B=b(A.paddingRight,t.width),n=b(A.paddingTop,t.width),s=b(A.paddingBottom,t.width);return t.add(r+A.borderLeftWidth,n+A.borderTopWidth,-(A.borderRightWidth+A.borderLeftWidth+r+B),-(A.borderTopWidth+A.borderBottomWidth+n+s))},UQ=function(e,A){return e===0?A.bounds:e===2?je(A):nn(A)},FQ=function(e,A){return e===0?A.bounds:e===2?je(A):nn(A)},Kr=function(e,A,t){var r=UQ(XA(e.styles.backgroundOrigin,A),e),B=FQ(XA(e.styles.backgroundClip,A),e),n=hQ(XA(e.styles.backgroundSize,A),t,r),s=n[0],i=n[1],a=re(XA(e.styles.backgroundPosition,A),r.width-s,r.height-i),o=dQ(XA(e.styles.backgroundRepeat,A),a,n,r,B),Q=Math.round(r.left+a[0]),g=Math.round(r.top+a[1]);return[o,Q,g,s,i]},VA=function(e){return D(e)&&e.value===PA.AUTO},Me=function(e){return typeof e=="number"},hQ=function(e,A,t){var r=A[0],B=A[1],n=A[2],s=e[0],i=e[1];if(!s)return[0,0];if(M(s)&&i&&M(i))return[b(s,t.width),b(i,t.height)];var a=Me(n);if(D(s)&&(s.value===PA.CONTAIN||s.value===PA.COVER)){if(Me(n)){var o=t.width/t.height;return o<n!=(s.value===PA.COVER)?[t.width,t.width/n]:[t.height*n,t.height]}return[t.width,t.height]}var Q=Me(r),g=Me(B),w=Q||g;if(VA(s)&&(!i||VA(i))){if(Q&&g)return[r,B];if(!a&&!w)return[t.width,t.height];if(w&&a){var f=Q?r:B*n,c=g?B:r/n;return[f,c]}var u=Q?r:t.width,H=g?B:t.height;return[u,H]}if(a){var h=0,F=0;return M(s)?h=b(s,t.width):M(i)&&(F=b(i,t.height)),VA(s)?h=F*n:(!i||VA(i))&&(F=h/n),[h,F]}var K=null,p=null;if(M(s)?K=b(s,t.width):i&&M(i)&&(p=b(i,t.height)),K!==null&&(!i||VA(i))&&(p=Q&&g?K/r*B:t.height),p!==null&&VA(s)&&(K=Q&&g?p/B*r:t.width),K!==null&&p!==null)return[K,p];throw new Error("Unable to calculate background-size for element")},XA=function(e,A){var t=e[A];return typeof t>"u"?e[0]:t},dQ=function(e,A,t,r,B){var n=A[0],s=A[1],i=t[0],a=t[1];switch(e){case 2:return[new l(Math.round(r.left),Math.round(r.top+s)),new l(Math.round(r.left+r.width),Math.round(r.top+s)),new l(Math.round(r.left+r.width),Math.round(a+r.top+s)),new l(Math.round(r.left),Math.round(a+r.top+s))];case 3:return[new l(Math.round(r.left+n),Math.round(r.top)),new l(Math.round(r.left+n+i),Math.round(r.top)),new l(Math.round(r.left+n+i),Math.round(r.height+r.top)),new l(Math.round(r.left+n),Math.round(r.height+r.top))];case 1:return[new l(Math.round(r.left+n),Math.round(r.top+s)),new l(Math.round(r.left+n+i),Math.round(r.top+s)),new l(Math.round(r.left+n+i),Math.round(r.top+s+a)),new l(Math.round(r.left+n),Math.round(r.top+s+a))];default:return[new l(Math.round(B.left),Math.round(B.top)),new l(Math.round(B.left+B.width),Math.round(B.top)),new l(Math.round(B.left+B.width),Math.round(B.height+B.top)),new l(Math.round(B.left),Math.round(B.height+B.top))]}},EQ="data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7",tB="Hidden Text",HQ=function(){function e(A){this._data={},this._document=A}return e.prototype.parseMetrics=function(A,t){var r=this._document.createElement("div"),B=this._document.createElement("img"),n=this._document.createElement("span"),s=this._document.body;r.style.visibility="hidden",r.style.fontFamily=A,r.style.fontSize=t,r.style.margin="0",r.style.padding="0",r.style.whiteSpace="nowrap",s.appendChild(r),B.src=EQ,B.width=1,B.height=1,B.style.margin="0",B.style.padding="0",B.style.verticalAlign="baseline",n.style.fontFamily=A,n.style.fontSize=t,n.style.margin="0",n.style.padding="0",n.appendChild(this._document.createTextNode(tB)),r.appendChild(n),r.appendChild(B);var i=B.offsetTop-n.offsetTop+2;r.removeChild(n),r.appendChild(this._document.createTextNode(tB)),r.style.lineHeight="normal",B.style.verticalAlign="super";var a=B.offsetTop-r.offsetTop+2;return s.removeChild(r),{baseline:i,middle:a}},e.prototype.getMetrics=function(A,t){var r=A+" "+t;return typeof this._data[r]>"u"&&(this._data[r]=this.parseMetrics(A,t)),this._data[r]},e}(),sn=function(){function e(A,t){this.context=A,this.options=t}return e}(),pQ=1e4,IQ=function(e){tA(A,e);function A(t,r){var B=e.call(this,t,r)||this;return B._activeEffects=[],B.canvas=r.canvas?r.canvas:document.createElement("canvas"),B.ctx=B.canvas.getContext("2d"),r.canvas||(B.canvas.width=Math.floor(r.width*r.scale),B.canvas.height=Math.floor(r.height*r.scale),B.canvas.style.width=r.width+"px",B.canvas.style.height=r.height+"px"),B.fontMetrics=new HQ(document),B.ctx.scale(B.options.scale,B.options.scale),B.ctx.translate(-r.x,-r.y),B.ctx.textBaseline="bottom",B._activeEffects=[],B.context.logger.debug("Canvas renderer initialized ("+r.width+"x"+r.height+") with scale "+r.scale),B}return A.prototype.applyEffects=function(t){for(var r=this;this._activeEffects.length;)this.popEffect();t.forEach(function(B){return r.applyEffect(B)})},A.prototype.applyEffect=function(t){this.ctx.save(),wQ(t)&&(this.ctx.globalAlpha=t.opacity),gQ(t)&&(this.ctx.translate(t.offsetX,t.offsetY),this.ctx.transform(t.matrix[0],t.matrix[1],t.matrix[2],t.matrix[3],t.matrix[4],t.matrix[5]),this.ctx.translate(-t.offsetX,-t.offsetY)),en(t)&&(this.path(t.path),this.ctx.clip()),this._activeEffects.push(t)},A.prototype.popEffect=function(){this._activeEffects.pop(),this.ctx.restore()},A.prototype.renderStack=function(t){return J(this,void 0,void 0,function(){var r;return _(this,function(B){switch(B.label){case 0:return r=t.element.container.styles,r.isVisible()?[4,this.renderStackContent(t)]:[3,2];case 1:B.sent(),B.label=2;case 2:return[2]}})})},A.prototype.renderNode=function(t){return J(this,void 0,void 0,function(){return _(this,function(r){switch(r.label){case 0:if(G(t.container.flags,16))debugger;return t.container.styles.isVisible()?[4,this.renderNodeBackgroundAndBorders(t)]:[3,3];case 1:return r.sent(),[4,this.renderNodeContent(t)];case 2:r.sent(),r.label=3;case 3:return[2]}})})},A.prototype.renderTextWithLetterSpacing=function(t,r,B){var n=this;if(r===0)this.ctx.fillText(t.text,t.bounds.left,t.bounds.top+B);else{var s=rt(t.text);s.reduce(function(i,a){return n.ctx.fillText(a,i,t.bounds.top+B),i+n.ctx.measureText(a).width},t.bounds.left)}},A.prototype.createFontStyle=function(t){var r=t.fontVariant.filter(function(s){return s==="normal"||s==="small-caps"}).join(""),B=LQ(t.fontFamily).join(", "),n=ue(t.fontSize)?""+t.fontSize.number+t.fontSize.unit:t.fontSize.number+"px";return[[t.fontStyle,r,t.fontWeight,n,B].join(" "),B,n]},A.prototype.renderTextNode=function(t,r){return J(this,void 0,void 0,function(){var B,n,s,i,a,o,Q,g,w=this;return _(this,function(f){return B=this.createFontStyle(r),n=B[0],s=B[1],i=B[2],this.ctx.font=n,this.ctx.direction=r.direction===1?"rtl":"ltr",this.ctx.textAlign="left",this.ctx.textBaseline="alphabetic",a=this.fontMetrics.getMetrics(s,i),o=a.baseline,Q=a.middle,g=r.paintOrder,t.textBounds.forEach(function(c){g.forEach(function(u){switch(u){case 0:w.ctx.fillStyle=R(r.color),w.renderTextWithLetterSpacing(c,r.letterSpacing,o);var H=r.textShadow;H.length&&c.text.trim().length&&(H.slice(0).reverse().forEach(function(h){w.ctx.shadowColor=R(h.color),w.ctx.shadowOffsetX=h.offsetX.number*w.options.scale,w.ctx.shadowOffsetY=h.offsetY.number*w.options.scale,w.ctx.shadowBlur=h.blur.number,w.renderTextWithLetterSpacing(c,r.letterSpacing,o)}),w.ctx.shadowColor="",w.ctx.shadowOffsetX=0,w.ctx.shadowOffsetY=0,w.ctx.shadowBlur=0),r.textDecorationLine.length&&(w.ctx.fillStyle=R(r.textDecorationColor||r.color),r.textDecorationLine.forEach(function(h){switch(h){case 1:w.ctx.fillRect(c.bounds.left,Math.round(c.bounds.top+o),c.bounds.width,1);break;case 2:w.ctx.fillRect(c.bounds.left,Math.round(c.bounds.top),c.bounds.width,1);break;case 3:w.ctx.fillRect(c.bounds.left,Math.ceil(c.bounds.top+Q),c.bounds.width,1);break}}));break;case 1:r.webkitTextStrokeWidth&&c.text.trim().length&&(w.ctx.strokeStyle=R(r.webkitTextStrokeColor),w.ctx.lineWidth=r.webkitTextStrokeWidth,w.ctx.lineJoin=window.chrome?"miter":"round",w.ctx.strokeText(c.text,c.bounds.left,c.bounds.top+o)),w.ctx.strokeStyle="",w.ctx.lineWidth=0,w.ctx.lineJoin="miter";break}})}),[2]})})},A.prototype.renderReplacedElement=function(t,r,B){if(B&&t.intrinsicWidth>0&&t.intrinsicHeight>0){var n=je(t),s=qe(r);this.path(s),this.ctx.save(),this.ctx.clip(),this.ctx.drawImage(B,0,0,t.intrinsicWidth,t.intrinsicHeight,n.left,n.top,n.width,n.height),this.ctx.restore()}},A.prototype.renderNodeContent=function(t){return J(this,void 0,void 0,function(){var r,B,n,s,i,a,F,F,o,Q,g,w,p,f,c,d,u,H,h,F,K,p,d;return _(this,function(C){switch(C.label){case 0:this.applyEffects(t.getEffects(4)),r=t.container,B=t.curves,n=r.styles,s=0,i=r.textNodes,C.label=1;case 1:return s<i.length?(a=i[s],[4,this.renderTextNode(a,n)]):[3,4];case 2:C.sent(),C.label=3;case 3:return s++,[3,1];case 4:if(!(r instanceof GB))return[3,8];C.label=5;case 5:return C.trys.push([5,7,,8]),[4,this.context.cache.match(r.src)];case 6:return F=C.sent(),this.renderReplacedElement(r,B,F),[3,8];case 7:return C.sent(),this.context.logger.error("Error loading image "+r.src),[3,8];case 8:if(r instanceof RB&&this.renderReplacedElement(r,B,r.canvas),!(r instanceof VB))return[3,12];C.label=9;case 9:return C.trys.push([9,11,,12]),[4,this.context.cache.match(r.svg)];case 10:return F=C.sent(),this.renderReplacedElement(r,B,F),[3,12];case 11:return C.sent(),this.context.logger.error("Error loading svg "+r.svg.substring(0,255)),[3,12];case 12:return r instanceof PB&&r.tree?(o=new A(this.context,{scale:this.options.scale,backgroundColor:r.backgroundColor,x:0,y:0,width:r.width,height:r.height}),[4,o.render(r.tree)]):[3,14];case 13:Q=C.sent(),r.width&&r.height&&this.ctx.drawImage(Q,0,0,r.width,r.height,r.bounds.left,r.bounds.top,r.bounds.width,r.bounds.height),C.label=14;case 14:if(r instanceof tt&&(g=Math.min(r.bounds.width,r.bounds.height),r.type===Je?r.checked&&(this.ctx.save(),this.path([new l(r.bounds.left+g*.39363,r.bounds.top+g*.79),new l(r.bounds.left+g*.16,r.bounds.top+g*.5549),new l(r.bounds.left+g*.27347,r.bounds.top+g*.44071),new l(r.bounds.left+g*.39694,r.bounds.top+g*.5649),new l(r.bounds.left+g*.72983,r.bounds.top+g*.23),new l(r.bounds.left+g*.84,r.bounds.top+g*.34085),new l(r.bounds.left+g*.39363,r.bounds.top+g*.79)]),this.ctx.fillStyle=R(Pt),this.ctx.fill(),this.ctx.restore()):r.type===ke&&r.checked&&(this.ctx.save(),this.ctx.beginPath(),this.ctx.arc(r.bounds.left+g/2,r.bounds.top+g/2,g/4,0,Math.PI*2,!0),this.ctx.fillStyle=R(Pt),this.ctx.fill(),this.ctx.restore())),vQ(r)&&r.value.length){switch(w=this.createFontStyle(n),p=w[0],f=w[1],c=this.fontMetrics.getMetrics(p,f).baseline,this.ctx.font=p,this.ctx.fillStyle=R(n.color),this.ctx.textBaseline="alphabetic",this.ctx.textAlign=KQ(r.styles.textAlign),d=je(r),u=0,r.styles.textAlign){case 1:u+=d.width/2;break;case 2:u+=d.width;break}H=d.add(u,0,0,-d.height/2+1),this.ctx.save(),this.path([new l(d.left,d.top),new l(d.left+d.width,d.top),new l(d.left+d.width,d.top+d.height),new l(d.left,d.top+d.height)]),this.ctx.clip(),this.renderTextWithLetterSpacing(new ie(r.value,H),n.letterSpacing,c),this.ctx.restore(),this.ctx.textBaseline="alphabetic",this.ctx.textAlign="left"}if(!G(r.styles.display,2048))return[3,20];if(r.styles.listStyleImage===null)return[3,19];if(h=r.styles.listStyleImage,h.type!==0)return[3,18];F=void 0,K=h.url,C.label=15;case 15:return C.trys.push([15,17,,18]),[4,this.context.cache.match(K)];case 16:return F=C.sent(),this.ctx.drawImage(F,r.bounds.left-(F.width+10),r.bounds.top),[3,18];case 17:return C.sent(),this.context.logger.error("Error loading list-style-image "+K),[3,18];case 18:return[3,20];case 19:t.listValue&&r.styles.listStyleType!==-1&&(p=this.createFontStyle(n)[0],this.ctx.font=p,this.ctx.fillStyle=R(n.color),this.ctx.textBaseline="middle",this.ctx.textAlign="right",d=new cA(r.bounds.left,r.bounds.top+b(r.styles.paddingTop,r.bounds.width),r.bounds.width,yt(n.lineHeight,n.fontSize.number)/2+1),this.renderTextWithLetterSpacing(new ie(t.listValue,d),n.letterSpacing,yt(n.lineHeight,n.fontSize.number)/2+2),this.ctx.textBaseline="bottom",this.ctx.textAlign="left"),C.label=20;case 20:return[2]}})})},A.prototype.renderStackContent=function(t){return J(this,void 0,void 0,function(){var r,B,h,n,s,h,i,a,h,o,Q,h,g,w,h,f,c,h,u,H,h;return _(this,function(F){switch(F.label){case 0:if(G(t.element.container.flags,16))debugger;return[4,this.renderNodeBackgroundAndBorders(t.element)];case 1:F.sent(),r=0,B=t.negativeZIndex,F.label=2;case 2:return r<B.length?(h=B[r],[4,this.renderStack(h)]):[3,5];case 3:F.sent(),F.label=4;case 4:return r++,[3,2];case 5:return[4,this.renderNodeContent(t.element)];case 6:F.sent(),n=0,s=t.nonInlineLevel,F.label=7;case 7:return n<s.length?(h=s[n],[4,this.renderNode(h)]):[3,10];case 8:F.sent(),F.label=9;case 9:return n++,[3,7];case 10:i=0,a=t.nonPositionedFloats,F.label=11;case 11:return i<a.length?(h=a[i],[4,this.renderStack(h)]):[3,14];case 12:F.sent(),F.label=13;case 13:return i++,[3,11];case 14:o=0,Q=t.nonPositionedInlineLevel,F.label=15;case 15:return o<Q.length?(h=Q[o],[4,this.renderStack(h)]):[3,18];case 16:F.sent(),F.label=17;case 17:return o++,[3,15];case 18:g=0,w=t.inlineLevel,F.label=19;case 19:return g<w.length?(h=w[g],[4,this.renderNode(h)]):[3,22];case 20:F.sent(),F.label=21;case 21:return g++,[3,19];case 22:f=0,c=t.zeroOrAutoZIndexOrTransformedOrOpacity,F.label=23;case 23:return f<c.length?(h=c[f],[4,this.renderStack(h)]):[3,26];case 24:F.sent(),F.label=25;case 25:return f++,[3,23];case 26:u=0,H=t.positiveZIndex,F.label=27;case 27:return u<H.length?(h=H[u],[4,this.renderStack(h)]):[3,30];case 28:F.sent(),F.label=29;case 29:return u++,[3,27];case 30:return[2]}})})},A.prototype.mask=function(t){this.ctx.beginPath(),this.ctx.moveTo(0,0),this.ctx.lineTo(this.canvas.width,0),this.ctx.lineTo(this.canvas.width,this.canvas.height),this.ctx.lineTo(0,this.canvas.height),this.ctx.lineTo(0,0),this.formatPath(t.slice(0).reverse()),this.ctx.closePath()},A.prototype.path=function(t){this.ctx.beginPath(),this.formatPath(t),this.ctx.closePath()},A.prototype.formatPath=function(t){var r=this;t.forEach(function(B,n){var s=$(B)?B.start:B;n===0?r.ctx.moveTo(s.x,s.y):r.ctx.lineTo(s.x,s.y),$(B)&&r.ctx.bezierCurveTo(B.startControl.x,B.startControl.y,B.endControl.x,B.endControl.y,B.end.x,B.end.y)})},A.prototype.renderRepeat=function(t,r,B,n){this.path(t),this.ctx.fillStyle=r,this.ctx.translate(B,n),this.ctx.fill(),this.ctx.translate(-B,-n)},A.prototype.resizeImage=function(t,r,B){var n;if(t.width===r&&t.height===B)return t;var s=(n=this.canvas.ownerDocument)!==null&&n!==void 0?n:document,i=s.createElement("canvas");i.width=Math.max(1,r),i.height=Math.max(1,B);var a=i.getContext("2d");return a.drawImage(t,0,0,t.width,t.height,0,0,r,B),i},A.prototype.renderBackgroundImage=function(t){return J(this,void 0,void 0,function(){var r,B,n,s,i,a;return _(this,function(o){switch(o.label){case 0:r=t.styles.backgroundImage.length-1,B=function(Q){var g,w,f,O,Y,W,L,V,y,c,O,Y,W,L,V,u,H,h,F,K,p,d,C,v,y,E,O,k,q,L,V,uA,Y,W,IA,BA,lA,vA,yA,iA,KA,oA;return _(this,function(TA){switch(TA.label){case 0:if(Q.type!==0)return[3,5];g=void 0,w=Q.url,TA.label=1;case 1:return TA.trys.push([1,3,,4]),[4,n.context.cache.match(w)];case 2:return g=TA.sent(),[3,4];case 3:return TA.sent(),n.context.logger.error("Error loading background-image "+w),[3,4];case 4:return g&&(f=Kr(t,r,[g.width,g.height,g.width/g.height]),O=f[0],Y=f[1],W=f[2],L=f[3],V=f[4],y=n.ctx.createPattern(n.resizeImage(g,L,V),"repeat"),n.renderRepeat(O,y,Y,W)),[3,6];case 5:Qa(Q)?(c=Kr(t,r,[null,null,null]),O=c[0],Y=c[1],W=c[2],L=c[3],V=c[4],u=na(Q.angle,L,V),H=u[0],h=u[1],F=u[2],K=u[3],p=u[4],d=document.createElement("canvas"),d.width=L,d.height=V,C=d.getContext("2d"),v=C.createLinearGradient(h,K,F,p),It(Q.stops,H).forEach(function(YA){return v.addColorStop(YA.stop,R(YA.color))}),C.fillStyle=v,C.fillRect(0,0,L,V),L>0&&V>0&&(y=n.ctx.createPattern(d,"repeat"),n.renderRepeat(O,y,Y,W))):ga(Q)&&(E=Kr(t,r,[null,null,null]),O=E[0],k=E[1],q=E[2],L=E[3],V=E[4],uA=Q.position.length===0?[$r]:Q.position,Y=b(uA[0],L),W=b(uA[uA.length-1],V),IA=sa(Q,Y,W,L,V),BA=IA[0],lA=IA[1],BA>0&&lA>0&&(vA=n.ctx.createRadialGradient(k+Y,q+W,0,k+Y,q+W,BA),It(Q.stops,BA*2).forEach(function(YA){return vA.addColorStop(YA.stop,R(YA.color))}),n.path(O),n.ctx.fillStyle=vA,BA!==lA?(yA=t.bounds.left+.5*t.bounds.width,iA=t.bounds.top+.5*t.bounds.height,KA=lA/BA,oA=1/KA,n.ctx.save(),n.ctx.translate(yA,iA),n.ctx.transform(1,0,0,KA,0,0),n.ctx.translate(-yA,-iA),n.ctx.fillRect(k,oA*(q-iA)+iA,L,V*oA),n.ctx.restore()):n.ctx.fill())),TA.label=6;case 6:return r--,[2]}})},n=this,s=0,i=t.styles.backgroundImage.slice(0).reverse(),o.label=1;case 1:return s<i.length?(a=i[s],[5,B(a)]):[3,4];case 2:o.sent(),o.label=3;case 3:return s++,[3,1];case 4:return[2]}})})},A.prototype.renderSolidBorder=function(t,r,B){return J(this,void 0,void 0,function(){return _(this,function(n){return this.path(rB(B,r)),this.ctx.fillStyle=R(t),this.ctx.fill(),[2]})})},A.prototype.renderDoubleBorder=function(t,r,B,n){return J(this,void 0,void 0,function(){var s,i;return _(this,function(a){switch(a.label){case 0:return r<3?[4,this.renderSolidBorder(t,B,n)]:[3,2];case 1:return a.sent(),[2];case 2:return s=lQ(n,B),this.path(s),this.ctx.fillStyle=R(t),this.ctx.fill(),i=CQ(n,B),this.path(i),this.ctx.fill(),[2]}})})},A.prototype.renderNodeBackgroundAndBorders=function(t){return J(this,void 0,void 0,function(){var r,B,n,s,i,a,o,Q,g=this;return _(this,function(w){switch(w.label){case 0:return this.applyEffects(t.getEffects(2)),r=t.container.styles,B=!HA(r.backgroundColor)||r.backgroundImage.length,n=[{style:r.borderTopStyle,color:r.borderTopColor,width:r.borderTopWidth},{style:r.borderRightStyle,color:r.borderRightColor,width:r.borderRightWidth},{style:r.borderBottomStyle,color:r.borderBottomColor,width:r.borderBottomWidth},{style:r.borderLeftStyle,color:r.borderLeftColor,width:r.borderLeftWidth}],s=yQ(XA(r.backgroundClip,0),t.curves),B||r.boxShadow.length?(this.ctx.save(),this.path(s),this.ctx.clip(),HA(r.backgroundColor)||(this.ctx.fillStyle=R(r.backgroundColor),this.ctx.fill()),[4,this.renderBackgroundImage(t.container)]):[3,2];case 1:w.sent(),this.ctx.restore(),r.boxShadow.slice(0).reverse().forEach(function(f){g.ctx.save();var c=Ze(t.curves),u=f.inset?0:pQ,H=cQ(c,-u+(f.inset?1:-1)*f.spread.number,(f.inset?1:-1)*f.spread.number,f.spread.number*(f.inset?-2:2),f.spread.number*(f.inset?-2:2));f.inset?(g.path(c),g.ctx.clip(),g.mask(H)):(g.mask(c),g.ctx.clip(),g.path(H)),g.ctx.shadowOffsetX=f.offsetX.number+u,g.ctx.shadowOffsetY=f.offsetY.number,g.ctx.shadowColor=R(f.color),g.ctx.shadowBlur=f.blur.number,g.ctx.fillStyle=f.inset?R(f.color):"rgba(0,0,0,1)",g.ctx.fill(),g.ctx.restore()}),w.label=2;case 2:i=0,a=0,o=n,w.label=3;case 3:return a<o.length?(Q=o[a],Q.style!==0&&!HA(Q.color)&&Q.width>0?Q.style!==2?[3,5]:[4,this.renderDashedDottedBorder(Q.color,Q.width,i,t.curves,2)]:[3,11]):[3,13];case 4:return w.sent(),[3,11];case 5:return Q.style!==3?[3,7]:[4,this.renderDashedDottedBorder(Q.color,Q.width,i,t.curves,3)];case 6:return w.sent(),[3,11];case 7:return Q.style!==4?[3,9]:[4,this.renderDoubleBorder(Q.color,Q.width,i,t.curves)];case 8:return w.sent(),[3,11];case 9:return[4,this.renderSolidBorder(Q.color,i,t.curves)];case 10:w.sent(),w.label=11;case 11:i++,w.label=12;case 12:return a++,[3,3];case 13:return[2]}})})},A.prototype.renderDashedDottedBorder=function(t,r,B,n,s){return J(this,void 0,void 0,function(){var i,a,o,Q,g,w,f,c,u,H,h,F,K,p,d,C,d,C;return _(this,function(v){return this.ctx.save(),i=fQ(n,B),a=rB(n,B),s===2&&(this.path(a),this.ctx.clip()),$(a[0])?(o=a[0].start.x,Q=a[0].start.y):(o=a[0].x,Q=a[0].y),$(a[1])?(g=a[1].end.x,w=a[1].end.y):(g=a[1].x,w=a[1].y),B===0||B===2?f=Math.abs(o-g):f=Math.abs(Q-w),this.ctx.beginPath(),s===3?this.formatPath(i):this.formatPath(a.slice(0,2)),c=r<3?r*3:r*2,u=r<3?r*2:r,s===3&&(c=r,u=r),H=!0,f<=c*2?H=!1:f<=c*2+u?(h=f/(2*c+u),c*=h,u*=h):(F=Math.floor((f+u)/(c+u)),K=(f-F*c)/(F-1),p=(f-(F+1)*c)/F,u=p<=0||Math.abs(u-K)<Math.abs(u-p)?K:p),H&&(s===3?this.ctx.setLineDash([0,c+u]):this.ctx.setLineDash([c,u])),s===3?(this.ctx.lineCap="round",this.ctx.lineWidth=r):this.ctx.lineWidth=r*2+1.1,this.ctx.strokeStyle=R(t),this.ctx.stroke(),this.ctx.setLineDash([]),s===2&&($(a[0])&&(d=a[3],C=a[0],this.ctx.beginPath(),this.formatPath([new l(d.end.x,d.end.y),new l(C.start.x,C.start.y)]),this.ctx.stroke()),$(a[1])&&(d=a[1],C=a[2],this.ctx.beginPath(),this.formatPath([new l(d.end.x,d.end.y),new l(C.start.x,C.start.y)]),this.ctx.stroke())),this.ctx.restore(),[2]})})},A.prototype.render=function(t){return J(this,void 0,void 0,function(){var r;return _(this,function(B){switch(B.label){case 0:return this.options.backgroundColor&&(this.ctx.fillStyle=R(this.options.backgroundColor),this.ctx.fillRect(this.options.x,this.options.y,this.options.width,this.options.height)),r=uQ(t),[4,this.renderStack(r)];case 1:return B.sent(),this.applyEffects([]),[2,this.canvas]}})})},A}(sn),vQ=function(e){return e instanceof _B||e instanceof XB?!0:e instanceof tt&&e.type!==ke&&e.type!==Je},yQ=function(e,A){switch(e){case 0:return Ze(A);case 2:return iQ(A);case 1:default:return qe(A)}},KQ=function(e){switch(e){case 1:return"center";case 2:return"right";case 0:default:return"left"}},mQ=["-apple-system","system-ui"],LQ=function(e){return/iPhone OS 15_(0|1)/.test(window.navigator.userAgent)?e.filter(function(A){return mQ.indexOf(A)===-1}):e},DQ=function(e){tA(A,e);function A(t,r){var B=e.call(this,t,r)||this;return B.canvas=r.canvas?r.canvas:document.createElement("canvas"),B.ctx=B.canvas.getContext("2d"),B.options=r,B.canvas.width=Math.floor(r.width*r.scale),B.canvas.height=Math.floor(r.height*r.scale),B.canvas.style.width=r.width+"px",B.canvas.style.height=r.height+"px",B.ctx.scale(B.options.scale,B.options.scale),B.ctx.translate(-r.x,-r.y),B.context.logger.debug("EXPERIMENTAL ForeignObject renderer initialized ("+r.width+"x"+r.height+" at "+r.x+","+r.y+") with scale "+r.scale),B}return A.prototype.render=function(t){return J(this,void 0,void 0,function(){var r,B;return _(this,function(n){switch(n.label){case 0:return r=Jr(this.options.width*this.options.scale,this.options.height*this.options.scale,this.options.scale,this.options.scale,t),[4,bQ(r)];case 1:return B=n.sent(),this.options.backgroundColor&&(this.ctx.fillStyle=R(this.options.backgroundColor),this.ctx.fillRect(0,0,this.options.width*this.options.scale,this.options.height*this.options.scale)),this.ctx.drawImage(B,-this.options.x*this.options.scale,-this.options.y*this.options.scale),[2,this.canvas]}})})},A}(sn),bQ=function(e){return new Promise(function(A,t){var r=new Image;r.onload=function(){A(r)},r.onerror=t,r.src="data:image/svg+xml;charset=utf-8,"+encodeURIComponent(new XMLSerializer().serializeToString(e))})},xQ=function(){function e(A){var t=A.id,r=A.enabled;this.id=t,this.enabled=r,this.start=Date.now()}return e.prototype.debug=function(){for(var A=[],t=0;t<arguments.length;t++)A[t]=arguments[t];this.enabled&&(typeof window<"u"&&window.console&&typeof console.debug=="function"?console.debug.apply(console,le([this.id,this.getTime()+"ms"],A)):this.info.apply(this,A))},e.prototype.getTime=function(){return Date.now()-this.start},e.prototype.info=function(){for(var A=[],t=0;t<arguments.length;t++)A[t]=arguments[t];this.enabled&&typeof window<"u"&&window.console&&typeof console.info=="function"&&console.info.apply(console,le([this.id,this.getTime()+"ms"],A))},e.prototype.warn=function(){for(var A=[],t=0;t<arguments.length;t++)A[t]=arguments[t];this.enabled&&(typeof window<"u"&&window.console&&typeof console.warn=="function"?console.warn.apply(console,le([this.id,this.getTime()+"ms"],A)):this.info.apply(this,A))},e.prototype.error=function(){for(var A=[],t=0;t<arguments.length;t++)A[t]=arguments[t];this.enabled&&(typeof window<"u"&&window.console&&typeof console.error=="function"?console.error.apply(console,le([this.id,this.getTime()+"ms"],A)):this.info.apply(this,A))},e.instances={},e}(),TQ=function(){function e(A,t){var r;this.windowBounds=t,this.instanceName="#"+e.instanceCount++,this.logger=new xQ({id:this.instanceName,enabled:A.logging}),this.cache=(r=A.cache)!==null&&r!==void 0?r:new AQ(this,A)}return e.instanceCount=1,e}(),SQ=function(e,A){return A===void 0&&(A={}),OQ(e,A)};typeof window<"u"&&An.setContext(window);var OQ=function(e,A){return J(void 0,void 0,void 0,function(){var t,r,B,n,s,i,a,o,Q,g,w,f,c,u,H,h,F,K,p,d,v,C,v,y,E,O,k,q,L,V,uA,Y,W,IA,BA,lA,vA,yA,iA,KA;return _(this,function(oA){switch(oA.label){case 0:if(!e||typeof e!="object")return[2,Promise.reject("Invalid element provided as first argument")];if(t=e.ownerDocument,!t)throw new Error("Element is not attached to a Document");if(r=t.defaultView,!r)throw new Error("Document is not attached to a Window");return B={allowTaint:(y=A.allowTaint)!==null&&y!==void 0?y:!1,imageTimeout:(E=A.imageTimeout)!==null&&E!==void 0?E:15e3,proxy:A.proxy,useCORS:(O=A.useCORS)!==null&&O!==void 0?O:!1},n=Lr({logging:(k=A.logging)!==null&&k!==void 0?k:!0,cache:A.cache},B),s={windowWidth:(q=A.windowWidth)!==null&&q!==void 0?q:r.innerWidth,windowHeight:(L=A.windowHeight)!==null&&L!==void 0?L:r.innerHeight,scrollX:(V=A.scrollX)!==null&&V!==void 0?V:r.pageXOffset,scrollY:(uA=A.scrollY)!==null&&uA!==void 0?uA:r.pageYOffset},i=new cA(s.scrollX,s.scrollY,s.windowWidth,s.windowHeight),a=new TQ(n,i),o=(Y=A.foreignObjectRendering)!==null&&Y!==void 0?Y:!1,Q={allowTaint:(W=A.allowTaint)!==null&&W!==void 0?W:!1,onclone:A.onclone,ignoreElements:A.ignoreElements,inlineImages:o,copyStyles:o},a.logger.debug("Starting document clone with size "+i.width+"x"+i.height+" scrolled to "+-i.left+","+-i.top),g=new $t(a,e,Q),w=g.clonedReferenceElement,w?[4,g.toIFrame(t,i)]:[2,Promise.reject("Unable to find element in cloned iframe")];case 1:return f=oA.sent(),c=Bt(w)||Mo(w)?wn(w.ownerDocument):ze(a,w),u=c.width,H=c.height,h=c.left,F=c.top,K=MQ(a,w,A.backgroundColor),p={canvas:A.canvas,backgroundColor:K,scale:(BA=(IA=A.scale)!==null&&IA!==void 0?IA:r.devicePixelRatio)!==null&&BA!==void 0?BA:1,x:((lA=A.x)!==null&&lA!==void 0?lA:0)+h,y:((vA=A.y)!==null&&vA!==void 0?vA:0)+F,width:(yA=A.width)!==null&&yA!==void 0?yA:Math.ceil(u),height:(iA=A.height)!==null&&iA!==void 0?iA:Math.ceil(H)},o?(a.logger.debug("Document cloned, using foreign object rendering"),v=new DQ(a,p),[4,v.render(w)]):[3,3];case 2:return d=oA.sent(),[3,5];case 3:return a.logger.debug("Document cloned, element located at "+h+","+F+" with size "+u+"x"+H+" using computed rendering"),a.logger.debug("Starting DOM parsing"),C=kB(a,w),K===C.styles.backgroundColor&&(C.styles.backgroundColor=wA.TRANSPARENT),a.logger.debug("Starting renderer for element at "+p.x+","+p.y+" with size "+p.width+"x"+p.height),v=new IQ(a,p),[4,v.render(C)];case 4:d=oA.sent(),oA.label=5;case 5:return(!((KA=A.removeContainer)!==null&&KA!==void 0)||KA)&&($t.destroy(f)||a.logger.error("Cannot detach cloned iframe as it is not in the DOM anymore")),a.logger.debug("Finished rendering"),[2,d]}})})},MQ=function(e,A,t){var r=A.ownerDocument,B=r.documentElement?se(e,getComputedStyle(r.documentElement).backgroundColor):wA.TRANSPARENT,n=r.body?se(e,getComputedStyle(r.body).backgroundColor):wA.TRANSPARENT,s=typeof t=="string"?se(e,t):t===null?wA.TRANSPARENT:4294967295;return A===r.documentElement?HA(B)?HA(n)?s:n:B:s};async function an(){await SQ(document.querySelector(".map")).then(e=>{let A=`${document.querySelector("#time-select-start").value}-`;A=`${A}${document.querySelector("#time-select-end").value}_`,A=`${A}${document.querySelector("#vesseltype-select").value}`,A=`${A}.png`;const t=document.createElement("a");t.download=A;const r=e.toDataURL("image/png");t.href=r,document.body.append(t),t.click(),t.remove()})}window.screnshot_single=an;async function GQ(e){await on(),e===void 0&&(e={renders:["All","Cargo","Tanker","Fishing","Tug","Pleasure Craft","Passenger","None"]});const A=document.querySelector("#vesseltype-select");for(const t of e.renders)A.value=t,await new Promise(r=>setTimeout(r,500)),Qn(gn),await new Promise(r=>setTimeout(r,500)),await an()}window.screenshot=GQ;export{GQ as screenshot};
PypiClean
/love_course_2016_2019-2023.3.1.0-py3-none-any.whl/LoveCourse20162019/docs/an-xiao-yao/1-260节聊天课:202.把女生聊嗨,就用这个方法.md
# 1-260节聊天课:202.把女生聊嗨,就用这个方法 哈喽大家好我是你们的小鱼女朋友,今天呢我来教大家怎么在微信上和女孩子聊天,如果你没有办法吸引女人按照字点上的定义,你等于没有反制能力,大自然会毫不流情地尖面你的基因,听起来是不是觉得很可怕。 不过没有关系啊,当你听到这个节目的时候呢,你就得救了,众所周知女生都喜欢高价值的男人那么包括哪些方面呢,我们来分析一下啊,高价值分为应性指标和软性指标,那应性指标包括什么,外形生活习惯经济实力。 那软性指标包括生活方式,社交认证,语言,行相气质,自身的性格,如果你觉得刚才小语老师说的各个方面,你都达到了标准啊,那就没有标再听我的节目了啊,省点时间去做有用的事吧,如果你意识到自己的价值并不高。 而且想立刻改变的话,那么请往下听寻找答案,当然呢,如果你想知道更多的追女孩的技巧,微信公众号一来搜搜小路情感男士,关注以后回复数字二一二,填写并提教你的手机号和微信号,小语老师的私人助理会联系到你。 送你免费的恋爱聊天密集,刚认识一个女孩,你对她有好感并且想追求她,那要如何让她了解你并喜欢上你呢,当然是从聊天开始了,别以为聊天很容易啊,说话这门艺术是需要不断学习的,特别是当我们带着某种需求。 比如为了吸引对方而聊天,往往是很难的,当你面对喜欢的女生的时候,说话有时候会口吃街吧,甚至变成亚版找不到话题,看着对方脸上逐渐失去笑容,平凡打喊显,或者是网上聊着聊着,她突然打断你说,好的,天起来不错。 我去洗澡了,是不是都中了,不要闹,不会靠聊天吸引对方,这个问题是可以解决的,你只需要一些幽默的聊天技巧,那今天的节目呢,小雨老师就教大家几招,第一招假装头响,这种方法呢。 是用于当女生提出某方面的要求的时候,你可以先让她一步,然后再打击她,比如,当女生说,不要再教我小公主了,再教我就不理你了,你就可以说,好的,那我该教你什么呢,小公主,虽然天起来很前走。 但是可以根据女生的反应来说话,一般情况下,女生会觉得你很完毕,处处在斗塔,她也会不但地跟你回应,甚至也会降米的尼撑,互相伴嘴,有说有笑,第二招故意开车,这种幽默技巧在生活中很常见,就是制造一个误解。 让女生响入非非,最后再跟她解释你的意思,并打去她的思想不健康,比如说,男生说,你知道事上什么东西最硬,女人最喜欢了,特别是结婚后的女人,这个时候你可以根据她的表情,和她的反应,如果发现她迟疑或者坏笑。 说明她想歪了,你可以接小答案,你想哪去了,我说的是钻石,当然这种幽默的方式,不能逢人就用,甚至不能多用,因为有些女生,拒绝跟男人谈之种耻度的话题,而说说效果还是可以的,第三点,假装否认再承认。 女生经常会问男生一些问题,比如你是不是教过很多女朋友,你是不是很讨厌你的前任等等,反正遇到问题你也躲不了,那么到女生制问你的时候,该怎么有趣的回答她呢,比如女生问你,你经常搭扇女生吗。 你是不是立马向否认,没有啊,我只搭扇你一个而已,然后就结束了,很多男生觉得撒谎也不对,说实话也不对,总结了女生不会饶过自己,那你完全可以比她更扯,你可以先假装否认,然后再承认制造幽默效果,比如你可以回。 不经常啊,一天只是认识十几个吧,再比如,当女孩问你经常带女生回家吗,那你就可以回答,怎么可能啊,都是女生带我回家,这样天起来,是不是有趣多了呢,第四招假装复合,跟女生聊天的时候。 但她说了一些你不认同的话,你会怎么回呢,记住千万不要反驳她,否则没有好下场的,比如女生说,我最讨厌男人吸烟了,臭死了,你要是突然忍不住站起来,替吸烟难通包身员,对她让让到,你们有人屁事比我们还多。 我先怎么了,那女生保证提包走人啊,那要怎么做呢,教你照先来,教你照先假装复合她,然后再去踩她,女孩子说,我最讨厌男人吸烟了,那你就可以回答,就是就是,臭烟应该是女人的事,男人臭什么热闹,对吧,这样呢。 既不得罪女生,又让她觉得你说话,非常有趣,第五招,夸张赞美,这个很容易理解啊,就是用夸张的与其赞美,公为对方,虽然表面上有点,炒粉的意思,但实际上呢,也是在夸女生,这种方式的赞美,更能增加趣味性,比如。 给女生拍照的时候,女生说,怎么样,拍的好看吗,男生可以回复,美的刘宾鞋,先与下反了吧,女生说,再拍一张,你就可以回答,美的猫胖,当女生说,这个字是好不好看呢,你可以说,每到枯了。 刘宾鞋都摆不出你这个动作,女生说,讨厌那样呢,男生可以回复,非常到位,你都可以出道了,反正,小悦老师告诉你,使劲的夸,这样重复夸张的赞美女生,很快让女生知道你再斗她,而且那些还会偷偷暗洗,最后呢。 我们来总结一下,如何用幽默的方式,给女生聊天,有五点方法,第一,假装头型,女生提出某方面要求的时候,先让她一步再去打击她,第二,故意开车,让她想入飞飞,再打去她思想不健康,第三,假装否认,再承认。 是走幽默的效果,第四,假装复合,不容易得罪女生,又能斗她笑,第五张,夸张在美,让聊天更加有趣味,好了,今天呢,小悦老师就跟大家赞起,分享这些技巧,所以大家都知道,聊天是需要学习的对不对,不会聊天。 但是已经下定决心小,学习聊天的朋友呢,可以直接关注我们的微信公众号,小路情感男士,关注以后,回复数字,2,1,2,填写病题,交表单,小悦老师,会送您免费的,恋爱。 聊天密集
PypiClean
/tensorflow_tflex-1.13.1rc1-cp27-cp27mu-manylinux1_x86_64.whl/tensorflow_tflex-1.13.1rc1.data/purelib/tensorflow/contrib/recurrent/python/ops/functional_rnn.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import copy from tensorflow.contrib.recurrent.python.ops import recurrent from tensorflow.python.framework import function from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import variable_scope from tensorflow.python.util import nest def _GetDTypesFromStructure(struct): dtypes_list = [] for x in nest.flatten(struct): x = ops.convert_to_tensor(x) dtypes_list.append(x.dtype) return dtypes_list def _SetShapeFromTemplate(struct, struct_template): as_list = nest.flatten(struct) template_as_list = nest.flatten(struct_template) for element, template in zip(as_list, template_as_list): element.set_shape(template.shape) class _FunctionalRnnCell(object): """Wrapper around RNNCell which separates state from computation. This class accomplishes the following: * Turn the cell's `__call__` function into a pure function. The global side effects are separated as `theta`. They are the variables created for the weights of the computation. * Unless the output is aliased as part of the state, extend the state to contain the output so that we store the history in `Recurrent`. * Set static shapes as required. """ def __init__(self, rnn_cell, seq_inputs, initial_state): assert initial_state is not None # TODO(drpng): Dtype needs to be configurable. input_dtypes = [seq_inputs.dtype] + _GetDTypesFromStructure(initial_state) # See _index. like_inputs_t = nest.map_structure( lambda x: array_ops.stop_gradient(array_ops.gather(x, 0)), seq_inputs) input_structure = (like_inputs_t, initial_state) @function.Defun(*input_dtypes) def FlatCellStep(*flat_inputs): """The flattened version of `rnn_cell`.""" inputs_t, state0 = nest.pack_sequence_as(input_structure, flat_inputs) _SetShapeFromTemplate(state0, initial_state) _SetShapeFromTemplate(inputs_t, like_inputs_t) outputs_t, state1 = rnn_cell(inputs_t, state0) state_list = nest.flatten(state1) self._output_shape = outputs_t.shape if outputs_t in state_list: output_index_in_state = state_list.index(outputs_t) else: output_index_in_state = None if output_index_in_state is None: self._prepend_output = True self._output_state_idx = 0 return [outputs_t] + state_list else: self._output_state_idx = output_index_in_state self._prepend_output = False # To save memory, we don't store return the output separately # from the state list, since we know it's the same. return state_list def _ToPureFunction(func): # NOTE: This forces the creating of the function. if func.captured_inputs: pure_func = copy.copy(func) # pylint: disable=protected-access pure_func._extra_inputs = [] return pure_func return func pure_flat_cell_step = _ToPureFunction(FlatCellStep) def CellStep(theta, extended_state0, inputs_t): """Performs one time steps on structured inputs. The purpose of this function is to turn the parameters into flattened versions, and to resolve the parameter order difference between `Recurrent` and `RNNCell`. In the event the cell returns a transformed output that is not aliased within its state, the `extended_state0` also contains the output as its first element. Args: theta: Weights required for the computation. A structure of tensors. extended_state0: the state0, and possibly the output at the previous time step. A structure of tensors. inputs_t: the inputs at time t. Returns: A pair of the next state (inclusive of the output), and an empty list (unused `extras`). The next state is congruent to state0. """ extended_state0_flat = nest.flatten(extended_state0) state0_flat = self.MaybeRemoveOutputFromState(extended_state0_flat) full_inputs = [inputs_t] + state0_flat + theta # Note that the thetas are additional inputs appeneded as extra # parameters. cell_out = pure_flat_cell_step(*full_inputs) return cell_out, [] self._cell_step = CellStep self._theta = FlatCellStep.captured_inputs self._zero_state = rnn_cell.zero_state self._state_template = initial_state self._output_size = rnn_cell.output_size @property def extended_initial_state(self): if self._prepend_output: return [array_ops.zeros( self._output_shape, dtype=_GetDTypesFromStructure(self._state_template)[0]), self._state_template] else: # The base case, where the output is just the hidden state. return self._state_template @property def cell_step(self): return self._cell_step @property def theta(self): return self._theta @property def state_template(self): return self._state_template @property def output_shape(self): return self._output_shape def GetOutputFromState(self, state): return nest.flatten(state)[self._output_state_idx] def MaybeRemoveOutputFromState(self, flat_state): if self._prepend_output: return flat_state[1:] return flat_state def _ApplyLengthsToBatch(sequence_lengths, tf_output): # TODO(drpng): just use Update so that we don't carry over the gradients? """Sets the output to be zero at the end of the sequence.""" # output is batch major. shape = array_ops.shape(tf_output) batch_size, max_time, vector_size = shape[0], shape[1], shape[2] output_time = array_ops.tile(math_ops.range(0, max_time), [batch_size]) output_time = array_ops.reshape(output_time, [batch_size, max_time]) lengths = array_ops.tile( array_ops.reshape(sequence_lengths, [-1, 1]), [1, max_time]) is_less = math_ops.cast( math_ops.less(output_time, lengths), dtype=tf_output.dtype) keep_mask = array_ops.tile( array_ops.expand_dims(is_less, -1), [1, 1, vector_size]) final_output = keep_mask * tf_output return final_output def _PickFinalStateFromHistory(acc_state, sequence_length): """Implements acc_state[sequence_length - 1].""" # This will work on all platforms, unlike the regular slice. last_value = [] for state_var in nest.flatten(acc_state): # We compute the following with matrix operations: # last_var = state_var[sequence_length - 1] shape = array_ops.shape(state_var) max_time, batch_size = shape[0], shape[1] output_time = array_ops.tile(math_ops.range(0, max_time), [batch_size]) output_time = array_ops.reshape(output_time, [batch_size, max_time]) lengths = array_ops.tile(array_ops.reshape(sequence_length, [-1, 1]), [1, max_time]) last_idx = math_ops.cast(math_ops.equal(output_time, lengths - 1), dtype=state_var.dtype) last_idx = array_ops.transpose(last_idx) last_idx_for_bcast = array_ops.expand_dims(last_idx, -1) sliced = math_ops.multiply(last_idx_for_bcast, state_var) last_var = math_ops.reduce_sum(sliced, 0) last_value += [last_var] return nest.pack_sequence_as(acc_state, last_value) def _PostProcessOutput(extended_acc_state, extended_final_state, func_cell, total_time, inputs_lengths, is_reversed): """Post-process output of recurrent. This function takes the accumulated extended state and extracts the requested state and output. When `inputs_lengths` has been set, it extracts the output from the accumulated state. It also sets outputs past. When `is_reversed` is true, the output will be reversed in this function. It also sets the static shape information. Args: extended_acc_state: A structure containing the accumulated state at each time. It may contain the output at each time as well. extended_final_state: A structure containing the final state. It may contain the output at the final time. func_cell: The functional wrapper around the cell. total_time: A scalar integer tensor. inputs_lengths: An integer tensor with one entry per input. is_reversed: A boolean to indicate if the sequence is reversed. Returns: A tuple with the outputs at each time, and the final state. """ if inputs_lengths is None or is_reversed: flat_final_state = func_cell.MaybeRemoveOutputFromState( nest.flatten(extended_final_state)) tf_state = nest.pack_sequence_as(func_cell.state_template, flat_final_state) else: # The accumulated state is over the entire sequence, so we pick it # out from the acc_state sequence. flat_acc_state = func_cell.MaybeRemoveOutputFromState( nest.flatten(extended_acc_state)) acc_state = nest.pack_sequence_as( func_cell.state_template, flat_acc_state) tf_state = _PickFinalStateFromHistory(acc_state, inputs_lengths) output_from_state = func_cell.GetOutputFromState(extended_acc_state) if is_reversed: output_from_state = array_ops.reverse(output_from_state, [0]) tf_output = array_ops.transpose(output_from_state, [1, 0, 2]) tf_output.set_shape( [func_cell.output_shape[0], total_time, func_cell.output_shape[1]]) if inputs_lengths is not None: # Need set the outputs to zero. tf_output = _ApplyLengthsToBatch(inputs_lengths, tf_output) _SetShapeFromTemplate(tf_state, func_cell.state_template) return tf_output, tf_state # pylint: disable=invalid-name def functional_rnn(cell, inputs, sequence_length=None, initial_state=None, dtype=None, time_major=False, scope=None, use_tpu=False, reverse=False): """Same interface as `tf.nn.dynamic_rnn`.""" with variable_scope.variable_scope(scope or 'rnn'): if not time_major: inputs = nest.map_structure( lambda t: array_ops.transpose(t, [1, 0, 2]), inputs) inputs_flat = nest.flatten(inputs) batch_size = array_ops.shape(inputs_flat[0])[1] if initial_state is None: initial_state = cell.zero_state(batch_size, dtype) func_cell = _FunctionalRnnCell(cell, inputs, initial_state) if sequence_length is not None: max_length = math_ops.reduce_max(sequence_length) else: max_length = None if reverse: inputs = array_ops.reverse(inputs, [0]) extended_acc_state, extended_final_state = recurrent.Recurrent( theta=func_cell.theta, state0=func_cell.extended_initial_state, inputs=inputs, cell_fn=func_cell.cell_step, max_input_length=max_length, use_tpu=use_tpu, aligned_end=reverse) tf_output, tf_state = _PostProcessOutput( extended_acc_state, extended_final_state, func_cell, inputs_flat[0].shape[0], sequence_length, is_reversed=reverse) if time_major: tf_output = array_ops.transpose(tf_output, [1, 0, 2]) return tf_output, tf_state def bidirectional_functional_rnn(cell_fw, cell_bw, inputs, initial_state_fw=None, initial_state_bw=None, dtype=None, sequence_length=None, time_major=False, use_tpu=False, fast_reverse=False, scope=None): """Creates a bidirectional recurrent neural network. Performs fully dynamic unrolling of inputs in both directions. Built to be API compatible with `tf.nn.bidirectional_dynamic_rnn`, but implemented with functional control flow for TPU compatibility. Args: cell_fw: An instance of `tf.contrib.rnn.RNNCell`. cell_bw: An instance of `tf.contrib.rnn.RNNCell`. inputs: The RNN inputs. If time_major == False (default), this must be a Tensor (or hierarchical structure of Tensors) of shape [batch_size, max_time, ...]. If time_major == True, this must be a Tensor (or hierarchical structure of Tensors) of shape: [max_time, batch_size, ...]. The first two dimensions must match across all the inputs, but otherwise the ranks and other shape components may differ. initial_state_fw: An optional initial state for `cell_fw`. Should match `cell_fw.zero_state` in structure and type. initial_state_bw: An optional initial state for `cell_bw`. Should match `cell_bw.zero_state` in structure and type. dtype: (optional) The data type for the initial state and expected output. Required if initial_states are not provided or RNN state has a heterogeneous dtype. sequence_length: An optional int32/int64 vector sized [batch_size]. Used to copy-through state and zero-out outputs when past a batch element's sequence length. So it's more for correctness than performance. time_major: Whether the `inputs` tensor is in "time major" format. use_tpu: Whether to enable TPU-compatible operation. If True, does not truly reverse `inputs` in the backwards RNN. Once b/69305369 is fixed, we can remove this flag. fast_reverse: Whether to use fast tf.reverse to replace tf.reverse_sequence. This is only possible when either all sequence lengths are the same inside the batch, or when the cell function does not change the state on padded input. scope: An optional scope name for the dynamic RNN. Returns: outputs: A tuple of `(output_fw, output_bw)`. The output of the forward and backward RNN. If time_major == False (default), these will be Tensors shaped: [batch_size, max_time, cell.output_size]. If time_major == True, these will be Tensors shaped: [max_time, batch_size, cell.output_size]. Note, if cell.output_size is a (possibly nested) tuple of integers or TensorShape objects, then the output for that direction will be a tuple having the same structure as cell.output_size, containing Tensors having shapes corresponding to the shape data in cell.output_size. final_states: A tuple of `(final_state_fw, final_state_bw)`. A Tensor or hierarchical structure of Tensors indicating the final cell state in each direction. Must have the same structure and shape as cell.zero_state. Raises: ValueError: If `initial_state_fw` is None or `initial_state_bw` is None and `dtype` is not provided. """ # Keep this code in sync with tf.nn.dynamic_rnn for compatibility. with variable_scope.variable_scope(scope or 'bidirectional_rnn'): # Forward direction with variable_scope.variable_scope('fw') as fw_scope: output_fw, output_state_fw = functional_rnn( cell=cell_fw, inputs=inputs, sequence_length=sequence_length, initial_state=initial_state_fw, dtype=dtype, time_major=time_major, scope=fw_scope, use_tpu=use_tpu) # Backward direction if not time_major: time_dim = 1 batch_dim = 0 else: time_dim = 0 batch_dim = 1 def _reverse(input_, seq_lengths, seq_dim, batch_dim): if seq_lengths is not None: return array_ops.reverse_sequence( input=input_, seq_lengths=seq_lengths, seq_dim=seq_dim, batch_dim=batch_dim) else: # See b/69305369. assert not use_tpu, ( 'Bidirectional with variable sequence lengths unsupported on TPU') return array_ops.reverse(input_, axis=[seq_dim]) with variable_scope.variable_scope('bw') as bw_scope: if not fast_reverse: inputs = _reverse( inputs, seq_lengths=sequence_length, seq_dim=time_dim, batch_dim=batch_dim) output_bw, output_state_bw = functional_rnn( cell=cell_bw, inputs=inputs, sequence_length=sequence_length, initial_state=initial_state_bw, dtype=dtype, time_major=time_major, scope=bw_scope, use_tpu=use_tpu, reverse=fast_reverse) if not fast_reverse: output_bw = _reverse( output_bw, seq_lengths=sequence_length, seq_dim=time_dim, batch_dim=batch_dim) outputs = (output_fw, output_bw) output_states = (output_state_fw, output_state_bw) return (outputs, output_states) # pylint: enable=invalid-name
PypiClean
/mmcif-0.69.tar.gz/mmcif-0.69/modules/pybind11/.github/ISSUE_TEMPLATE/feature-request.md
--- name: Feature Request about: File an issue about adding a feature title: "[FEAT] " --- Make sure you've completed the following steps before submitting your issue -- thank you! 1. Check if your feature has already been mentioned / rejected / planned in other issues. 2. If those resources didn't help, consider asking in the [Gitter chat room][] to see if this is interesting / useful to a larger audience and possible to implement reasonably, 4. If you have a useful feature that passes the previous items (or not suitable for chat), please fill in the details below. [Gitter chat room]: https://gitter.im/pybind/Lobby *After reading, remove this checklist.*
PypiClean
/GladTeX-3.1.0.tar.gz/GladTeX-3.1.0/gleetex/unicode.py
import enum class LaTeXMode(enum.Enum): """Represent either math or text mode. Math mode in LaTeX is e.g. everything between $ and $.""" mathmode = 1 textmode = 0 unicode_table = { 161: {LaTeXMode.textmode: '\\textexclamdown ', LaTeXMode.mathmode: '\\textexclamdown'}, 162: {LaTeXMode.textmode: '\\textcent ', LaTeXMode.mathmode: '\\mbox{\\textcent}'}, 163: {LaTeXMode.textmode: '\\textsterling ', LaTeXMode.mathmode: '\\sterling'}, 164: {LaTeXMode.textmode: '\\textcurrency ', LaTeXMode.mathmode: '\\mbox{\\textcurrency}'}, 165: {LaTeXMode.textmode: '\\textyen ', LaTeXMode.mathmode: '\\yen'}, 166: {LaTeXMode.textmode: '\\textbrokenbar '}, 167: {LaTeXMode.textmode: '\\textsection ', LaTeXMode.mathmode: '\\S'}, 168: {LaTeXMode.textmode: '\\textasciidieresis '}, 169: {LaTeXMode.textmode: '\\textcopyright ', LaTeXMode.mathmode: '\\copyright'}, 170: {LaTeXMode.textmode: '\\textordfeminine '}, 171: {LaTeXMode.textmode: '\\guillemotleft ', LaTeXMode.mathmode: '\\mbox{\\guillemotleft}'}, 172: {LaTeXMode.textmode: '\\lnot ', LaTeXMode.mathmode: '\\neg'}, 173: {LaTeXMode.textmode: '\\-'}, 174: {LaTeXMode.textmode: '\\textregistered ', LaTeXMode.mathmode: '\\circledR'}, 175: {LaTeXMode.textmode: '\\textasciimacron '}, 176: {LaTeXMode.textmode: '\\textdegree ', LaTeXMode.mathmode: '\\degree'}, 177: {LaTeXMode.textmode: '\\pm ', LaTeXMode.mathmode: '\\pm'}, 178: {LaTeXMode.textmode: '{^2}', LaTeXMode.mathmode: '^2'}, 179: {LaTeXMode.textmode: '{^3}', LaTeXMode.mathmode: '^3'}, 180: {LaTeXMode.textmode: '\\textasciiacute ', LaTeXMode.mathmode: '\\textasciiacute'}, 181: {LaTeXMode.textmode: '\\mathrm{\\mu}', LaTeXMode.mathmode: '\\mu'}, 182: {LaTeXMode.textmode: '\\textparagraph ', LaTeXMode.mathmode: '\\P'}, 183: {LaTeXMode.textmode: '\\cdot ', LaTeXMode.mathmode: '\\cdotp'}, 184: {LaTeXMode.textmode: '\\c{}', LaTeXMode.mathmode: '\\mbox{\\c{}}'}, 185: {LaTeXMode.textmode: '{^1}', LaTeXMode.mathmode: '^1'}, 186: {LaTeXMode.textmode: '\\textordmasculine '}, 187: {LaTeXMode.textmode: '\\guillemotright ', LaTeXMode.mathmode: '\\mbox{\\guillemotright}'}, 188: {LaTeXMode.textmode: '\\textonequarter '}, 189: {LaTeXMode.textmode: '\\textonehalf '}, 190: {LaTeXMode.textmode: '\\textthreequarters '}, 191: {LaTeXMode.textmode: '\\textquestiondown ', LaTeXMode.mathmode: '\\textquestiondown'}, 192: {LaTeXMode.textmode: '\\`{A}', LaTeXMode.mathmode: '\\grave{A}'}, 193: {LaTeXMode.textmode: "\\'{A}", LaTeXMode.mathmode: '\\acute{A}'}, 194: {LaTeXMode.textmode: '\\^{A}', LaTeXMode.mathmode: '\\hat{A}'}, 195: {LaTeXMode.textmode: '\\~{A}', LaTeXMode.mathmode: '\\tilde{A}'}, 196: {LaTeXMode.textmode: '\\"{A}', LaTeXMode.mathmode: '\\ddot{A}'}, 197: {LaTeXMode.textmode: '\\AA '}, 198: {LaTeXMode.textmode: '\\AE ', LaTeXMode.mathmode: '\\AE'}, 199: {LaTeXMode.textmode: '\\c{C}', LaTeXMode.mathmode: '\\mbox{\\c{C}}'}, 200: {LaTeXMode.textmode: '\\`{E}', LaTeXMode.mathmode: '\\grave{E}'}, 201: {LaTeXMode.textmode: "\\'{E}", LaTeXMode.mathmode: '\\acute{E}'}, 202: {LaTeXMode.textmode: '\\^{E}', LaTeXMode.mathmode: '\\hat{E}'}, 203: {LaTeXMode.textmode: '\\"{E}', LaTeXMode.mathmode: '\\ddot{E}'}, 204: {LaTeXMode.textmode: '\\`{I}', LaTeXMode.mathmode: '\\grave{I}'}, 205: {LaTeXMode.textmode: "\\'{I}", LaTeXMode.mathmode: '\\acute{I}'}, 206: {LaTeXMode.textmode: '\\^{I}', LaTeXMode.mathmode: '\\hat{I}'}, 207: {LaTeXMode.textmode: '\\"{I}', LaTeXMode.mathmode: '\\ddot{I}'}, 208: {LaTeXMode.textmode: '\\DH '}, 209: {LaTeXMode.textmode: '\\~{N}', LaTeXMode.mathmode: '\\tilde{N}'}, 210: {LaTeXMode.textmode: '\\`{O}', LaTeXMode.mathmode: '\\grave{O}'}, 211: {LaTeXMode.textmode: "\\'{O}", LaTeXMode.mathmode: '\\acute{O}'}, 212: {LaTeXMode.textmode: '\\^{O}', LaTeXMode.mathmode: '\\hat{O}'}, 213: {LaTeXMode.textmode: '\\~{O}', LaTeXMode.mathmode: '\\tilde{O}'}, 214: {LaTeXMode.textmode: '\\"{O}', LaTeXMode.mathmode: '\\ddot{O}'}, 215: {LaTeXMode.textmode: '\\texttimes ', LaTeXMode.mathmode: '\\times'}, 216: {LaTeXMode.textmode: '\\O ', LaTeXMode.mathmode: '\\O'}, 217: {LaTeXMode.textmode: '\\`{U}', LaTeXMode.mathmode: '\\grave{U}'}, 218: {LaTeXMode.textmode: "\\'{U}", LaTeXMode.mathmode: '\\acute{U}'}, 219: {LaTeXMode.textmode: '\\^{U}', LaTeXMode.mathmode: '\\hat{U}'}, 220: {LaTeXMode.textmode: '\\"{U}', LaTeXMode.mathmode: '\\ddot{U}'}, 221: {LaTeXMode.textmode: "\\'{Y}", LaTeXMode.mathmode: '\\acute{Y}'}, 222: {LaTeXMode.textmode: '\\TH '}, 223: {LaTeXMode.textmode: '\\ss ', LaTeXMode.mathmode: '\\ss'}, 224: {LaTeXMode.textmode: '\\`{a}', LaTeXMode.mathmode: '\\grave{a}'}, 225: {LaTeXMode.textmode: "\\'{a}", LaTeXMode.mathmode: '\\acute{a}'}, 226: {LaTeXMode.textmode: '\\^{a}', LaTeXMode.mathmode: '\\hat{a}'}, 227: {LaTeXMode.textmode: '\\~{a}', LaTeXMode.mathmode: '\\tilde{a}'}, 228: {LaTeXMode.textmode: '\\"{a}', LaTeXMode.mathmode: '\\ddot{a}'}, 229: {LaTeXMode.textmode: '\\aa '}, 230: {LaTeXMode.textmode: '\\ae ', LaTeXMode.mathmode: '\\ae'}, 231: {LaTeXMode.textmode: '\\c{c}', LaTeXMode.mathmode: '\\mbox{\\c{c}}'}, 232: {LaTeXMode.textmode: '\\`{e}', LaTeXMode.mathmode: '\\grave{e}'}, 233: {LaTeXMode.textmode: "\\'{e}", LaTeXMode.mathmode: '\\acute{e}'}, 234: {LaTeXMode.textmode: '\\^{e}', LaTeXMode.mathmode: '\\hat{e}'}, 235: {LaTeXMode.textmode: '\\"{e}', LaTeXMode.mathmode: '\\ddot{e}'}, 236: {LaTeXMode.textmode: '\\`{\\i}', LaTeXMode.mathmode: '\\grave{\\imath}'}, 237: {LaTeXMode.textmode: "\\'{\\i}", LaTeXMode.mathmode: '\\acute{\\imath}'}, 238: {LaTeXMode.textmode: '\\^{\\i}', LaTeXMode.mathmode: '\\hat{\\imath}'}, 239: {LaTeXMode.textmode: '\\"{\\i}', LaTeXMode.mathmode: '\\ddot{\\imath}'}, 240: {LaTeXMode.textmode: '\\dh ', LaTeXMode.mathmode: '\\eth'}, 241: {LaTeXMode.textmode: '\\~{n}', LaTeXMode.mathmode: '\\tilde{n}'}, 242: {LaTeXMode.textmode: '\\`{o}', LaTeXMode.mathmode: '\\grave{o}'}, 243: {LaTeXMode.textmode: "\\'{o}", LaTeXMode.mathmode: '\\acute{o}'}, 244: {LaTeXMode.textmode: '\\^{o}', LaTeXMode.mathmode: '\\hat{o}'}, 245: {LaTeXMode.textmode: '\\~{o}', LaTeXMode.mathmode: '\\tilde{o}'}, 246: {LaTeXMode.textmode: '\\"{o}', LaTeXMode.mathmode: '\\ddot{o}'}, 247: {LaTeXMode.textmode: '\\div ', LaTeXMode.mathmode: '\\div'}, 248: {LaTeXMode.textmode: '\\o ', LaTeXMode.mathmode: '\\o'}, 249: {LaTeXMode.textmode: '\\`{u}', LaTeXMode.mathmode: '\\grave{u}'}, 250: {LaTeXMode.textmode: "\\'{u}", LaTeXMode.mathmode: '\\acute{u}'}, 251: {LaTeXMode.textmode: '\\^{u}', LaTeXMode.mathmode: '\\hat{u}'}, 252: {LaTeXMode.textmode: '\\"{u}', LaTeXMode.mathmode: '\\ddot{u}'}, 253: {LaTeXMode.textmode: "\\'{y}", LaTeXMode.mathmode: '\\acute{y}'}, 254: {LaTeXMode.textmode: '\\th '}, 255: {LaTeXMode.textmode: '\\"{y}', LaTeXMode.mathmode: '\\ddot{y}'}, 256: {LaTeXMode.textmode: '\\={A}', LaTeXMode.mathmode: '\\bar{A}'}, 257: {LaTeXMode.textmode: '\\={a}', LaTeXMode.mathmode: '\\bar{a}'}, 258: {LaTeXMode.textmode: '\\u{A}', LaTeXMode.mathmode: '\\breve{A}'}, 259: {LaTeXMode.textmode: '\\u{a}', LaTeXMode.mathmode: '\\u{a}'}, 260: {LaTeXMode.textmode: '\\k{A}'}, 261: {LaTeXMode.textmode: '\\k{a}'}, 262: {LaTeXMode.textmode: "\\'{C}", LaTeXMode.mathmode: '\\acute{C}'}, 263: {LaTeXMode.textmode: "\\'{c}", LaTeXMode.mathmode: '\\acute{c}'}, 264: {LaTeXMode.textmode: '\\^{C}', LaTeXMode.mathmode: '\\hat{C}'}, 265: {LaTeXMode.textmode: '\\^{c}', LaTeXMode.mathmode: '\\hat{c}'}, 266: {LaTeXMode.textmode: '\\.{C}', LaTeXMode.mathmode: '\\dot{C}'}, 267: {LaTeXMode.textmode: '\\.{c}', LaTeXMode.mathmode: '\\dot{c}'}, 268: {LaTeXMode.textmode: '\\v{C}', LaTeXMode.mathmode: '\\check{C}'}, 269: {LaTeXMode.textmode: '\\v{c}', LaTeXMode.mathmode: '\\check{c}'}, 270: {LaTeXMode.textmode: '\\v{D}', LaTeXMode.mathmode: '\\check{D}'}, 271: {LaTeXMode.textmode: '\\v{d}', LaTeXMode.mathmode: '\\check{d}'}, 272: {LaTeXMode.textmode: '\\DJ '}, 273: {LaTeXMode.textmode: '\\dj '}, 274: {LaTeXMode.textmode: '\\={E}', LaTeXMode.mathmode: '\\bar{E}'}, 275: {LaTeXMode.textmode: '\\={e}', LaTeXMode.mathmode: '\\bar{e}'}, 276: {LaTeXMode.textmode: '\\u{E}', LaTeXMode.mathmode: '\\breve{E}'}, 277: {LaTeXMode.textmode: '\\u{e}', LaTeXMode.mathmode: '\\breve{e}'}, 278: {LaTeXMode.textmode: '\\.{E}', LaTeXMode.mathmode: '\\dot{E}'}, 279: {LaTeXMode.textmode: '\\.{e}', LaTeXMode.mathmode: '\\dot{e}'}, 280: {LaTeXMode.textmode: '\\k{E}', LaTeXMode.mathmode: '\\k{E}'}, 281: {LaTeXMode.textmode: '\\k{e}'}, 282: {LaTeXMode.textmode: '\\v{E}', LaTeXMode.mathmode: '\\check{E}'}, 283: {LaTeXMode.textmode: '\\v{e}', LaTeXMode.mathmode: '\\check{e}'}, 284: {LaTeXMode.textmode: '\\^{G}', LaTeXMode.mathmode: '\\hat{G}'}, 285: {LaTeXMode.textmode: '\\^{g}', LaTeXMode.mathmode: '\\hat{g}'}, 286: {LaTeXMode.textmode: '\\u{G}', LaTeXMode.mathmode: '\\breve{G}'}, 287: {LaTeXMode.textmode: '\\u{g}', LaTeXMode.mathmode: '\\breve{g}'}, 288: {LaTeXMode.textmode: '\\.{G}', LaTeXMode.mathmode: '\\dot{G}'}, 289: {LaTeXMode.textmode: '\\.{g}', LaTeXMode.mathmode: '\\dot{g}'}, 290: {LaTeXMode.textmode: '\\c{G}', LaTeXMode.mathmode: '\\mbox{\\c{G}}'}, 291: {LaTeXMode.textmode: '\\c{g}', LaTeXMode.mathmode: '\\mbox{\\c{g}}'}, 292: {LaTeXMode.textmode: '\\^{H}', LaTeXMode.mathmode: '\\hat{H}'}, 293: {LaTeXMode.textmode: '\\^{h}', LaTeXMode.mathmode: '\\hat{h}'}, 294: {LaTeXMode.textmode: '{\\fontencoding{LELA}\\selectfont\\char40}'}, 295: {LaTeXMode.textmode: '\\Elzxh '}, 296: {LaTeXMode.textmode: '\\~{I}', LaTeXMode.mathmode: '\\tilde{I}'}, 297: {LaTeXMode.textmode: '\\~{\\i}', LaTeXMode.mathmode: '\\tilde{\\imath}'}, 298: {LaTeXMode.textmode: '\\={I}', LaTeXMode.mathmode: '\\bar{I}'}, 299: {LaTeXMode.textmode: '\\={\\i}', LaTeXMode.mathmode: '\\bar{\\imath}'}, 300: {LaTeXMode.textmode: '\\u{I}', LaTeXMode.mathmode: '\\breve{I}'}, 301: {LaTeXMode.textmode: '\\u{\\i}', LaTeXMode.mathmode: '\\breve{\\imath}'}, 302: {LaTeXMode.textmode: '\\k{I}'}, 303: {LaTeXMode.textmode: '\\k{i}'}, 304: {LaTeXMode.textmode: '\\.{I}', LaTeXMode.mathmode: '\\dot{I}'}, 305: {LaTeXMode.textmode: '\\i ', LaTeXMode.mathmode: '\\imath'}, 306: {LaTeXMode.textmode: 'IJ'}, 307: {LaTeXMode.textmode: 'ij'}, 308: {LaTeXMode.textmode: '\\^{J}', LaTeXMode.mathmode: '\\hat{J}'}, 309: {LaTeXMode.textmode: '\\^{\\j}', LaTeXMode.mathmode: '\\hat{\\jmath}'}, 310: {LaTeXMode.textmode: '\\c{K}', LaTeXMode.mathmode: '\\mbox{\\c{K}}'}, 311: {LaTeXMode.textmode: '\\c{k}', LaTeXMode.mathmode: '\\mbox{\\c{k}}'}, 312: {LaTeXMode.textmode: '{\\fontencoding{LELA}\\selectfont\\char91}'}, 313: {LaTeXMode.textmode: "\\'{L}", LaTeXMode.mathmode: '\\acute{L}'}, 314: {LaTeXMode.textmode: "\\'{l}", LaTeXMode.mathmode: '\\acute{l}'}, 315: {LaTeXMode.textmode: '\\c{L}', LaTeXMode.mathmode: '\\mbox{\\c{L}}'}, 316: {LaTeXMode.textmode: '\\c{l}', LaTeXMode.mathmode: '\\mbox{\\c{l}}'}, 317: {LaTeXMode.textmode: '\\v{L}', LaTeXMode.mathmode: '\\check{L}'}, 318: {LaTeXMode.textmode: '\\v{l}', LaTeXMode.mathmode: '\\check{l}'}, 319: {LaTeXMode.textmode: '{\\fontencoding{LELA}\\selectfont\\char201}'}, 320: {LaTeXMode.textmode: '{\\fontencoding{LELA}\\selectfont\\char202}'}, 321: {LaTeXMode.textmode: '\\L ', LaTeXMode.mathmode: '\\L'}, 322: {LaTeXMode.textmode: '\\l ', LaTeXMode.mathmode: '\\l'}, 323: {LaTeXMode.textmode: "\\'{N}", LaTeXMode.mathmode: '\\acute{N}'}, 324: {LaTeXMode.textmode: "\\'{n}", LaTeXMode.mathmode: '\\acute{n}'}, 325: {LaTeXMode.textmode: '\\c{N}', LaTeXMode.mathmode: '\\mbox{\\c{N}}'}, 326: {LaTeXMode.textmode: '\\c{n}', LaTeXMode.mathmode: '\\mbox{\\c{n}}'}, 327: {LaTeXMode.textmode: '\\v{N}', LaTeXMode.mathmode: '\\check{N}'}, 328: {LaTeXMode.textmode: '\\v{n}', LaTeXMode.mathmode: '\\check{n}'}, 329: {LaTeXMode.textmode: "'n"}, 330: {LaTeXMode.textmode: '\\NG '}, 331: {LaTeXMode.textmode: '\\ng '}, 332: {LaTeXMode.textmode: '\\={O}', LaTeXMode.mathmode: '\\bar{O}'}, 333: {LaTeXMode.textmode: '\\={o}', LaTeXMode.mathmode: '\\bar{o}'}, 334: {LaTeXMode.textmode: '\\u{O}', LaTeXMode.mathmode: '\\breve{O}'}, 335: {LaTeXMode.textmode: '\\u{o}', LaTeXMode.mathmode: '\\breve{o}'}, 336: {LaTeXMode.textmode: '\\H{O}', LaTeXMode.mathmode: '\\mbox{\\H{O}}'}, 337: {LaTeXMode.textmode: '\\H{o}', LaTeXMode.mathmode: '\\mbox{\\H{o}}'}, 338: {LaTeXMode.textmode: '\\OE ', LaTeXMode.mathmode: '\\OE'}, 339: {LaTeXMode.textmode: '\\oe ', LaTeXMode.mathmode: '\\oe'}, 340: {LaTeXMode.textmode: "\\'{R}", LaTeXMode.mathmode: '\\acute{R}'}, 341: {LaTeXMode.textmode: "\\'{r}", LaTeXMode.mathmode: '\\acute{r}'}, 342: {LaTeXMode.textmode: '\\c{R}', LaTeXMode.mathmode: '\\mbox{\\c{R}}'}, 343: {LaTeXMode.textmode: '\\c{r}', LaTeXMode.mathmode: '\\mbox{\\c{r}}'}, 344: {LaTeXMode.textmode: '\\v{R}', LaTeXMode.mathmode: '\\check{R}'}, 345: {LaTeXMode.textmode: '\\v{r}', LaTeXMode.mathmode: '\\check{r}'}, 346: {LaTeXMode.textmode: "\\'{S}", LaTeXMode.mathmode: '\\acute{S}'}, 347: {LaTeXMode.textmode: "\\'{s}", LaTeXMode.mathmode: '\\acute{s}'}, 348: {LaTeXMode.textmode: '\\^{S}', LaTeXMode.mathmode: '\\hat{S}'}, 349: {LaTeXMode.textmode: '\\^{s}', LaTeXMode.mathmode: '\\hat{s}'}, 350: {LaTeXMode.textmode: '\\c{S}', LaTeXMode.mathmode: '\\mbox{\\c{S}}'}, 351: {LaTeXMode.textmode: '\\c{s}', LaTeXMode.mathmode: '\\mbox{\\c{s}}'}, 352: {LaTeXMode.textmode: '\\v{S}', LaTeXMode.mathmode: '\\check{S}'}, 353: {LaTeXMode.textmode: '\\v{s}', LaTeXMode.mathmode: '\\check{s}'}, 354: {LaTeXMode.textmode: '\\c{T}', LaTeXMode.mathmode: '\\mbox{\\c{T}}'}, 355: {LaTeXMode.textmode: '\\c{t}', LaTeXMode.mathmode: '\\mbox{\\c{t}}'}, 356: {LaTeXMode.textmode: '\\v{T}', LaTeXMode.mathmode: '\\check{T}'}, 357: {LaTeXMode.textmode: '\\v{t}', LaTeXMode.mathmode: '\\check{t}'}, 358: {LaTeXMode.textmode: '{\\fontencoding{LELA}\\selectfont\\char47}'}, 359: {LaTeXMode.textmode: '{\\fontencoding{LELA}\\selectfont\\char63}'}, 360: {LaTeXMode.textmode: '\\~{U}', LaTeXMode.mathmode: '\\tilde{U}'}, 361: {LaTeXMode.textmode: '\\~{u}', LaTeXMode.mathmode: '\\tilde{u}'}, 362: {LaTeXMode.textmode: '\\={U}', LaTeXMode.mathmode: '\\bar{U}'}, 363: {LaTeXMode.textmode: '\\={u}', LaTeXMode.mathmode: '\\bar{u}'}, 364: {LaTeXMode.textmode: '\\u{U}', LaTeXMode.mathmode: '\\breve{U}'}, 365: {LaTeXMode.textmode: '\\u{u}', LaTeXMode.mathmode: '\\breve{u}'}, 366: {LaTeXMode.textmode: '\\r{U}', LaTeXMode.mathmode: '\\mathring{U}'}, 367: {LaTeXMode.textmode: '\\r{u}', LaTeXMode.mathmode: '\\mathring{u}'}, 368: {LaTeXMode.textmode: '\\H{U}', LaTeXMode.mathmode: '\\mbox{\\H{U}}'}, 369: {LaTeXMode.textmode: '\\H{u}', LaTeXMode.mathmode: '\\mbox{\\H{u}}'}, 370: {LaTeXMode.textmode: '\\k{U}', LaTeXMode.mathmode: '\\k{U}'}, 371: {LaTeXMode.textmode: '\\k{u}'}, 372: {LaTeXMode.textmode: '\\^{W}', LaTeXMode.mathmode: '\\hat{W}'}, 373: {LaTeXMode.textmode: '\\^{w}', LaTeXMode.mathmode: '\\hat{w}'}, 374: {LaTeXMode.textmode: '\\^{Y}', LaTeXMode.mathmode: '\\hat{Y}'}, 375: {LaTeXMode.textmode: '\\^{y}', LaTeXMode.mathmode: '\\hat{y}'}, 376: {LaTeXMode.textmode: '\\"{Y}', LaTeXMode.mathmode: '\\ddot{Y}'}, 377: {LaTeXMode.textmode: "\\'{Z}", LaTeXMode.mathmode: '\\acute{Z}'}, 378: {LaTeXMode.textmode: "\\'{z}", LaTeXMode.mathmode: '\\acute{z}'}, 379: {LaTeXMode.textmode: '\\.{Z}', LaTeXMode.mathmode: '\\dot{Z}'}, 380: {LaTeXMode.textmode: '\\.{z}', LaTeXMode.mathmode: '\\dot{z}'}, 381: {LaTeXMode.textmode: '\\v{Z}', LaTeXMode.mathmode: '\\check{Z}'}, 382: {LaTeXMode.textmode: '\\v{z}', LaTeXMode.mathmode: '\\check{z}'}, 402: {LaTeXMode.textmode: 'f'}, 405: {LaTeXMode.textmode: '\\texthvlig '}, 414: {LaTeXMode.textmode: '\\textnrleg '}, 426: {LaTeXMode.textmode: '\\eth '}, 450: {LaTeXMode.textmode: '\\textdoublepipe '}, 501: {LaTeXMode.textmode: "\\'{g}", LaTeXMode.mathmode: '\\acute{g}'}, 592: {LaTeXMode.textmode: '\\Elztrna '}, 594: {LaTeXMode.textmode: '\\Elztrnsa '}, 596: {LaTeXMode.textmode: '\\Elzopeno '}, 598: {LaTeXMode.textmode: '\\Elzrtld '}, 601: {LaTeXMode.textmode: '\\Elzschwa '}, 603: {LaTeXMode.textmode: '\\varepsilon '}, 609: {LaTeXMode.textmode: 'g'}, 611: {LaTeXMode.textmode: '\\Elzpgamma '}, 612: {LaTeXMode.textmode: '\\Elzpbgam '}, 613: {LaTeXMode.textmode: '\\Elztrnh '}, 620: {LaTeXMode.textmode: '\\Elzbtdl '}, 621: {LaTeXMode.textmode: '\\Elzrtll '}, 623: {LaTeXMode.textmode: '\\Elztrnm '}, 624: {LaTeXMode.textmode: '\\Elztrnmlr '}, 625: {LaTeXMode.textmode: '\\Elzltlmr '}, 627: {LaTeXMode.textmode: '\\Elzrtln '}, 631: {LaTeXMode.textmode: '\\Elzclomeg '}, 632: {LaTeXMode.textmode: '\\textphi '}, 633: {LaTeXMode.textmode: '\\Elztrnr '}, 634: {LaTeXMode.textmode: '\\Elztrnrl '}, 635: {LaTeXMode.textmode: '\\Elzrttrnr '}, 636: {LaTeXMode.textmode: '\\Elzrl '}, 637: {LaTeXMode.textmode: '\\Elzrtlr '}, 638: {LaTeXMode.textmode: '\\Elzfhr '}, 642: {LaTeXMode.textmode: '\\Elzrtls '}, 643: {LaTeXMode.textmode: '\\Elzesh '}, 647: {LaTeXMode.textmode: '\\Elztrnt '}, 648: {LaTeXMode.textmode: '\\Elzrtlt '}, 650: {LaTeXMode.textmode: '\\Elzpupsil '}, 651: {LaTeXMode.textmode: '\\Elzpscrv '}, 652: {LaTeXMode.textmode: '\\Elzinvv '}, 653: {LaTeXMode.textmode: '\\Elzinvw '}, 654: {LaTeXMode.textmode: '\\Elztrny '}, 656: {LaTeXMode.textmode: '\\Elzrtlz '}, 658: {LaTeXMode.textmode: '\\Elzyogh '}, 660: {LaTeXMode.textmode: '\\Elzglst '}, 661: {LaTeXMode.textmode: '\\Elzreglst '}, 662: {LaTeXMode.textmode: '\\Elzinglst '}, 670: {LaTeXMode.textmode: '\\textturnk '}, 676: {LaTeXMode.textmode: '\\Elzdyogh '}, 679: {LaTeXMode.textmode: '\\Elztesh '}, 700: {LaTeXMode.textmode: "'", LaTeXMode.mathmode: '\\rasp'}, 711: {LaTeXMode.textmode: '\\textasciicaron '}, 712: {LaTeXMode.textmode: '\\Elzverts '}, 716: {LaTeXMode.textmode: '\\Elzverti '}, 720: {LaTeXMode.textmode: '\\Elzlmrk '}, 721: {LaTeXMode.textmode: '\\Elzhlmrk '}, 722: {LaTeXMode.textmode: '\\Elzsbrhr '}, 723: {LaTeXMode.textmode: '\\Elzsblhr '}, 724: {LaTeXMode.textmode: '\\Elzrais '}, 725: {LaTeXMode.textmode: '\\Elzlow '}, 728: {LaTeXMode.textmode: '\\textasciibreve ', LaTeXMode.mathmode: '\\u'}, 729: {LaTeXMode.textmode: '\\textperiodcentered ', LaTeXMode.mathmode: '\\dot{}'}, 730: {LaTeXMode.textmode: '\\r{}', LaTeXMode.mathmode: '\\mathring{}'}, 731: {LaTeXMode.textmode: '\\k{}', LaTeXMode.mathmode: '\\mbox{\\k{}}'}, 732: {LaTeXMode.textmode: '\\texttildelow '}, 733: {LaTeXMode.textmode: '\\H{}', LaTeXMode.mathmode: '\\mbox{\\H{}}'}, 741: {LaTeXMode.textmode: '\\tone{55}'}, 742: {LaTeXMode.textmode: '\\tone{44}'}, 743: {LaTeXMode.textmode: '\\tone{33}'}, 744: {LaTeXMode.textmode: '\\tone{22}'}, 745: {LaTeXMode.textmode: '\\tone{11}'}, 768: {LaTeXMode.textmode: '\\`', LaTeXMode.mathmode: '\\grave'}, 769: {LaTeXMode.textmode: "\\'", LaTeXMode.mathmode: '\\acute'}, 770: {LaTeXMode.textmode: '\\^', LaTeXMode.mathmode: '\\hat'}, 771: {LaTeXMode.textmode: '\\~', LaTeXMode.mathmode: '\\tilde'}, 772: {LaTeXMode.textmode: '\\=', LaTeXMode.mathmode: '\\bar'}, 774: {LaTeXMode.textmode: '\\u', LaTeXMode.mathmode: '\\breve'}, 775: {LaTeXMode.textmode: '\\.', LaTeXMode.mathmode: '\\dot'}, 776: {LaTeXMode.textmode: '\\"', LaTeXMode.mathmode: '\\ddot'}, 778: {LaTeXMode.textmode: '\\r', LaTeXMode.mathmode: '\\ocirc'}, 779: {LaTeXMode.textmode: '\\H', LaTeXMode.mathmode: '\\H'}, 780: {LaTeXMode.textmode: '\\v', LaTeXMode.mathmode: '\\check'}, 783: {LaTeXMode.textmode: '\\cyrchar\\C'}, 801: {LaTeXMode.textmode: '\\Elzpalh '}, 807: {LaTeXMode.textmode: '\\c', LaTeXMode.mathmode: '\\c'}, 810: {LaTeXMode.textmode: '\\Elzsbbrg '}, 824: {LaTeXMode.mathmode: '\\forks'}, 902: {LaTeXMode.textmode: "\\'{A}", LaTeXMode.mathmode: '\\acute{A}'}, 904: {LaTeXMode.textmode: "\\'{E}", LaTeXMode.mathmode: '\\acute{E}'}, 905: {LaTeXMode.textmode: "\\'{H}", LaTeXMode.mathmode: '\\grave{H}'}, 906: {LaTeXMode.textmode: "\\'{}{I}", LaTeXMode.mathmode: "\\mathrm{'I}"}, 908: {LaTeXMode.textmode: "\\'{}O", LaTeXMode.mathmode: "\\mathrm{'O}"}, 910: {LaTeXMode.textmode: "\\mathrm{'Y}"}, 911: {LaTeXMode.textmode: "\\mathrm{'\\Omega}"}, 912: {LaTeXMode.textmode: '\\acute{\\ddot{\\iota}}'}, 913: {LaTeXMode.textmode: '\\Alpha '}, 914: {LaTeXMode.textmode: '\\Beta '}, 915: {LaTeXMode.textmode: '\\Gamma ', LaTeXMode.mathmode: '\\Gamma'}, 916: {LaTeXMode.textmode: '\\Delta ', LaTeXMode.mathmode: '\\Delta'}, 917: {LaTeXMode.textmode: '\\Epsilon '}, 918: {LaTeXMode.textmode: '\\Zeta '}, 919: {LaTeXMode.textmode: '\\Eta '}, 920: {LaTeXMode.textmode: '\\Theta ', LaTeXMode.mathmode: '\\Theta'}, 921: {LaTeXMode.textmode: '\\Iota '}, 922: {LaTeXMode.textmode: '\\Kappa '}, 923: {LaTeXMode.textmode: '\\Lambda ', LaTeXMode.mathmode: '\\Lambda'}, 924: {LaTeXMode.textmode: 'M'}, 925: {LaTeXMode.textmode: 'N'}, 926: {LaTeXMode.textmode: '\\Xi ', LaTeXMode.mathmode: '\\Xi'}, 927: {LaTeXMode.textmode: 'O'}, 928: {LaTeXMode.textmode: '\\Pi ', LaTeXMode.mathmode: '\\Pi'}, 929: {LaTeXMode.textmode: '\\Rho '}, 931: {LaTeXMode.textmode: '\\Sigma ', LaTeXMode.mathmode: '\\Sigma'}, 932: {LaTeXMode.textmode: '\\Tau '}, 933: {LaTeXMode.textmode: '\\Upsilon '}, 934: {LaTeXMode.textmode: '\\Phi ', LaTeXMode.mathmode: '\\Phi'}, 935: {LaTeXMode.textmode: '\\Chi '}, 936: {LaTeXMode.textmode: '\\Psi ', LaTeXMode.mathmode: '\\Psi'}, 937: {LaTeXMode.textmode: '\\Omega ', LaTeXMode.mathmode: '\\Omega'}, 938: {LaTeXMode.textmode: '\\mathrm{\\ddot{I}}'}, 939: {LaTeXMode.textmode: '\\mathrm{\\ddot{Y}}'}, 940: {LaTeXMode.textmode: "\\'{$\\alpha$}", LaTeXMode.mathmode: '\\acute{\\alpha}'}, 941: {LaTeXMode.textmode: '\\acute{\\epsilon}'}, 942: {LaTeXMode.textmode: '\\acute{\\eta}'}, 943: {LaTeXMode.textmode: '\\acute{\\iota}'}, 944: {LaTeXMode.textmode: '\\acute{\\ddot{\\upsilon}}'}, 945: {LaTeXMode.textmode: '\\alpha ', LaTeXMode.mathmode: '\\alpha'}, 946: {LaTeXMode.textmode: '\\beta ', LaTeXMode.mathmode: '\\beta'}, 947: {LaTeXMode.textmode: '\\gamma ', LaTeXMode.mathmode: '\\gamma'}, 948: {LaTeXMode.textmode: '\\delta ', LaTeXMode.mathmode: '\\delta'}, 949: {LaTeXMode.textmode: '\\epsilon ', LaTeXMode.mathmode: '\\varepsilon'}, 950: {LaTeXMode.textmode: '\\zeta ', LaTeXMode.mathmode: '\\zeta'}, 951: {LaTeXMode.textmode: '\\eta ', LaTeXMode.mathmode: '\\eta'}, 952: {LaTeXMode.textmode: '\\texttheta ', LaTeXMode.mathmode: '\\theta'}, 953: {LaTeXMode.textmode: '\\iota ', LaTeXMode.mathmode: '\\iota'}, 954: {LaTeXMode.textmode: '\\kappa ', LaTeXMode.mathmode: '\\kappa'}, 955: {LaTeXMode.textmode: '\\lambda ', LaTeXMode.mathmode: '\\lambda'}, 956: {LaTeXMode.textmode: '\\mu ', LaTeXMode.mathmode: '\\mu'}, 957: {LaTeXMode.textmode: '\\nu ', LaTeXMode.mathmode: '\\nu'}, 958: {LaTeXMode.textmode: '\\xi ', LaTeXMode.mathmode: '\\xi'}, 959: {LaTeXMode.textmode: 'o'}, 960: {LaTeXMode.textmode: '\\pi ', LaTeXMode.mathmode: '\\pi'}, 961: {LaTeXMode.textmode: '\\rho ', LaTeXMode.mathmode: '\\rho'}, 962: {LaTeXMode.textmode: '\\varsigma ', LaTeXMode.mathmode: '\\varsigma'}, 963: {LaTeXMode.textmode: '\\sigma ', LaTeXMode.mathmode: '\\sigma'}, 964: {LaTeXMode.textmode: '\\tau ', LaTeXMode.mathmode: '\\tau'}, 965: {LaTeXMode.textmode: '\\upsilon ', LaTeXMode.mathmode: '\\upsilon'}, 966: {LaTeXMode.textmode: '\\varphi ', LaTeXMode.mathmode: '\\varphi'}, 967: {LaTeXMode.textmode: '\\chi ', LaTeXMode.mathmode: '\\chi'}, 968: {LaTeXMode.textmode: '\\psi ', LaTeXMode.mathmode: '\\psi'}, 969: {LaTeXMode.textmode: '\\omega ', LaTeXMode.mathmode: '\\omega'}, 970: {LaTeXMode.textmode: '\\ddot{\\iota}'}, 971: {LaTeXMode.textmode: '\\ddot{\\upsilon}'}, 972: {LaTeXMode.textmode: "\\'{o}", LaTeXMode.mathmode: '\\acute{o}'}, 973: {LaTeXMode.textmode: '\\acute{\\upsilon}'}, 974: {LaTeXMode.textmode: '\\acute{\\omega}'}, 977: {LaTeXMode.textmode: '\\textvartheta ', LaTeXMode.mathmode: '\\vartheta '}, 978: {LaTeXMode.textmode: '\\Upsilon ', LaTeXMode.mathmode: '\\Upsilon'}, 981: {LaTeXMode.textmode: '\\phi ', LaTeXMode.mathmode: '\\phi'}, 982: {LaTeXMode.textmode: '\\varpi ', LaTeXMode.mathmode: '\\varpi'}, 986: {LaTeXMode.textmode: '\\Stigma '}, 988: {LaTeXMode.textmode: '\\Digamma '}, 989: {LaTeXMode.textmode: '\\digamma ', LaTeXMode.mathmode: '\\digamma'}, 990: {LaTeXMode.textmode: '\\Koppa '}, 992: {LaTeXMode.textmode: '\\Sampi '}, 1008: {LaTeXMode.textmode: '\\varkappa ', LaTeXMode.mathmode: '\\varkappa'}, 1009: {LaTeXMode.textmode: '\\varrho ', LaTeXMode.mathmode: '\\varrho'}, 1014: {LaTeXMode.textmode: '\\backepsilon ', LaTeXMode.mathmode: '\\backepsilon'}, 1025: {LaTeXMode.textmode: '\\cyrchar\\CYRYO '}, 1026: {LaTeXMode.textmode: '\\cyrchar\\CYRDJE '}, 1027: {LaTeXMode.textmode: "\\cyrchar{\\'\\CYRG}"}, 1028: {LaTeXMode.textmode: '\\cyrchar\\CYRIE '}, 1029: {LaTeXMode.textmode: '\\cyrchar\\CYRDZE '}, 1030: {LaTeXMode.textmode: '\\cyrchar\\CYRII '}, 1031: {LaTeXMode.textmode: '\\cyrchar\\CYRYI '}, 1032: {LaTeXMode.textmode: '\\cyrchar\\CYRJE '}, 1033: {LaTeXMode.textmode: '\\cyrchar\\CYRLJE '}, 1034: {LaTeXMode.textmode: '\\cyrchar\\CYRNJE '}, 1035: {LaTeXMode.textmode: '\\cyrchar\\CYRTSHE '}, 1036: {LaTeXMode.textmode: "\\cyrchar{\\'\\CYRK}"}, 1038: {LaTeXMode.textmode: '\\cyrchar\\CYRUSHRT '}, 1039: {LaTeXMode.textmode: '\\cyrchar\\CYRDZHE '}, 1040: {LaTeXMode.textmode: '\\cyrchar\\CYRA '}, 1041: {LaTeXMode.textmode: '\\cyrchar\\CYRB '}, 1042: {LaTeXMode.textmode: '\\cyrchar\\CYRV '}, 1043: {LaTeXMode.textmode: '\\cyrchar\\CYRG '}, 1044: {LaTeXMode.textmode: '\\cyrchar\\CYRD '}, 1045: {LaTeXMode.textmode: '\\cyrchar\\CYRE '}, 1046: {LaTeXMode.textmode: '\\cyrchar\\CYRZH '}, 1047: {LaTeXMode.textmode: '\\cyrchar\\CYRZ '}, 1048: {LaTeXMode.textmode: '\\cyrchar\\CYRI '}, 1049: {LaTeXMode.textmode: '\\cyrchar\\CYRISHRT '}, 1050: {LaTeXMode.textmode: '\\cyrchar\\CYRK '}, 1051: {LaTeXMode.textmode: '\\cyrchar\\CYRL '}, 1052: {LaTeXMode.textmode: '\\cyrchar\\CYRM '}, 1053: {LaTeXMode.textmode: '\\cyrchar\\CYRN '}, 1054: {LaTeXMode.textmode: '\\cyrchar\\CYRO '}, 1055: {LaTeXMode.textmode: '\\cyrchar\\CYRP '}, 1056: {LaTeXMode.textmode: '\\cyrchar\\CYRR '}, 1057: {LaTeXMode.textmode: '\\cyrchar\\CYRS '}, 1058: {LaTeXMode.textmode: '\\cyrchar\\CYRT '}, 1059: {LaTeXMode.textmode: '\\cyrchar\\CYRU '}, 1060: {LaTeXMode.textmode: '\\cyrchar\\CYRF '}, 1061: {LaTeXMode.textmode: '\\cyrchar\\CYRH '}, 1062: {LaTeXMode.textmode: '\\cyrchar\\CYRC '}, 1063: {LaTeXMode.textmode: '\\cyrchar\\CYRCH '}, 1064: {LaTeXMode.textmode: '\\cyrchar\\CYRSH '}, 1065: {LaTeXMode.textmode: '\\cyrchar\\CYRSHCH '}, 1066: {LaTeXMode.textmode: '\\cyrchar\\CYRHRDSN '}, 1067: {LaTeXMode.textmode: '\\cyrchar\\CYRERY '}, 1068: {LaTeXMode.textmode: '\\cyrchar\\CYRSFTSN '}, 1069: {LaTeXMode.textmode: '\\cyrchar\\CYREREV '}, 1070: {LaTeXMode.textmode: '\\cyrchar\\CYRYU '}, 1071: {LaTeXMode.textmode: '\\cyrchar\\CYRYA '}, 1072: {LaTeXMode.textmode: '\\cyrchar\\cyra '}, 1073: {LaTeXMode.textmode: '\\cyrchar\\cyrb '}, 1074: {LaTeXMode.textmode: '\\cyrchar\\cyrv '}, 1075: {LaTeXMode.textmode: '\\cyrchar\\cyrg '}, 1076: {LaTeXMode.textmode: '\\cyrchar\\cyrd '}, 1077: {LaTeXMode.textmode: '\\cyrchar\\cyre '}, 1078: {LaTeXMode.textmode: '\\cyrchar\\cyrzh '}, 1079: {LaTeXMode.textmode: '\\cyrchar\\cyrz '}, 1080: {LaTeXMode.textmode: '\\cyrchar\\cyri '}, 1081: {LaTeXMode.textmode: '\\cyrchar\\cyrishrt '}, 1082: {LaTeXMode.textmode: '\\cyrchar\\cyrk '}, 1083: {LaTeXMode.textmode: '\\cyrchar\\cyrl '}, 1084: {LaTeXMode.textmode: '\\cyrchar\\cyrm '}, 1085: {LaTeXMode.textmode: '\\cyrchar\\cyrn '}, 1086: {LaTeXMode.textmode: '\\cyrchar\\cyro '}, 1087: {LaTeXMode.textmode: '\\cyrchar\\cyrp '}, 1088: {LaTeXMode.textmode: '\\cyrchar\\cyrr '}, 1089: {LaTeXMode.textmode: '\\cyrchar\\cyrs '}, 1090: {LaTeXMode.textmode: '\\cyrchar\\cyrt '}, 1091: {LaTeXMode.textmode: '\\cyrchar\\cyru '}, 1092: {LaTeXMode.textmode: '\\cyrchar\\cyrf '}, 1093: {LaTeXMode.textmode: '\\cyrchar\\cyrh '}, 1094: {LaTeXMode.textmode: '\\cyrchar\\cyrc '}, 1095: {LaTeXMode.textmode: '\\cyrchar\\cyrch '}, 1096: {LaTeXMode.textmode: '\\cyrchar\\cyrsh '}, 1097: {LaTeXMode.textmode: '\\cyrchar\\cyrshch '}, 1098: {LaTeXMode.textmode: '\\cyrchar\\cyrhrdsn '}, 1099: {LaTeXMode.textmode: '\\cyrchar\\cyrery '}, 1100: {LaTeXMode.textmode: '\\cyrchar\\cyrsftsn '}, 1101: {LaTeXMode.textmode: '\\cyrchar\\cyrerev '}, 1102: {LaTeXMode.textmode: '\\cyrchar\\cyryu '}, 1103: {LaTeXMode.textmode: '\\cyrchar\\cyrya '}, 1105: {LaTeXMode.textmode: '\\cyrchar\\cyryo '}, 1106: {LaTeXMode.textmode: '\\cyrchar\\cyrdje '}, 1107: {LaTeXMode.textmode: "\\cyrchar{\\'\\cyrg}"}, 1108: {LaTeXMode.textmode: '\\cyrchar\\cyrie '}, 1109: {LaTeXMode.textmode: '\\cyrchar\\cyrdze '}, 1110: {LaTeXMode.textmode: '\\cyrchar\\cyrii '}, 1111: {LaTeXMode.textmode: '\\cyrchar\\cyryi '}, 1112: {LaTeXMode.textmode: '\\cyrchar\\cyrje '}, 1113: {LaTeXMode.textmode: '\\cyrchar\\cyrlje '}, 1114: {LaTeXMode.textmode: '\\cyrchar\\cyrnje '}, 1115: {LaTeXMode.textmode: '\\cyrchar\\cyrtshe '}, 1116: {LaTeXMode.textmode: "\\cyrchar{\\'\\cyrk}"}, 1118: {LaTeXMode.textmode: '\\cyrchar\\cyrushrt '}, 1119: {LaTeXMode.textmode: '\\cyrchar\\cyrdzhe '}, 1120: {LaTeXMode.textmode: '\\cyrchar\\CYROMEGA '}, 1121: {LaTeXMode.textmode: '\\cyrchar\\cyromega '}, 1122: {LaTeXMode.textmode: '\\cyrchar\\CYRYAT '}, 1124: {LaTeXMode.textmode: '\\cyrchar\\CYRIOTE '}, 1125: {LaTeXMode.textmode: '\\cyrchar\\cyriote '}, 1126: {LaTeXMode.textmode: '\\cyrchar\\CYRLYUS '}, 1127: {LaTeXMode.textmode: '\\cyrchar\\cyrlyus '}, 1128: {LaTeXMode.textmode: '\\cyrchar\\CYRIOTLYUS '}, 1129: {LaTeXMode.textmode: '\\cyrchar\\cyriotlyus '}, 1130: {LaTeXMode.textmode: '\\cyrchar\\CYRBYUS '}, 1132: {LaTeXMode.textmode: '\\cyrchar\\CYRIOTBYUS '}, 1133: {LaTeXMode.textmode: '\\cyrchar\\cyriotbyus '}, 1134: {LaTeXMode.textmode: '\\cyrchar\\CYRKSI '}, 1135: {LaTeXMode.textmode: '\\cyrchar\\cyrksi '}, 1136: {LaTeXMode.textmode: '\\cyrchar\\CYRPSI '}, 1137: {LaTeXMode.textmode: '\\cyrchar\\cyrpsi '}, 1138: {LaTeXMode.textmode: '\\cyrchar\\CYRFITA '}, 1140: {LaTeXMode.textmode: '\\cyrchar\\CYRIZH '}, 1144: {LaTeXMode.textmode: '\\cyrchar\\CYRUK '}, 1145: {LaTeXMode.textmode: '\\cyrchar\\cyruk '}, 1146: {LaTeXMode.textmode: '\\cyrchar\\CYROMEGARND '}, 1147: {LaTeXMode.textmode: '\\cyrchar\\cyromegarnd '}, 1148: {LaTeXMode.textmode: '\\cyrchar\\CYROMEGATITLO '}, 1149: {LaTeXMode.textmode: '\\cyrchar\\cyromegatitlo '}, 1150: {LaTeXMode.textmode: '\\cyrchar\\CYROT '}, 1151: {LaTeXMode.textmode: '\\cyrchar\\cyrot '}, 1152: {LaTeXMode.textmode: '\\cyrchar\\CYRKOPPA '}, 1153: {LaTeXMode.textmode: '\\cyrchar\\cyrkoppa '}, 1154: {LaTeXMode.textmode: '\\cyrchar\\cyrthousands '}, 1160: {LaTeXMode.textmode: '\\cyrchar\\cyrhundredthousands '}, 1161: {LaTeXMode.textmode: '\\cyrchar\\cyrmillions '}, 1164: {LaTeXMode.textmode: '\\cyrchar\\CYRSEMISFTSN '}, 1165: {LaTeXMode.textmode: '\\cyrchar\\cyrsemisftsn '}, 1166: {LaTeXMode.textmode: '\\cyrchar\\CYRRTICK '}, 1167: {LaTeXMode.textmode: '\\cyrchar\\cyrrtick '}, 1168: {LaTeXMode.textmode: '\\cyrchar\\CYRGUP '}, 1169: {LaTeXMode.textmode: '\\cyrchar\\cyrgup '}, 1170: {LaTeXMode.textmode: '\\cyrchar\\CYRGHCRS '}, 1171: {LaTeXMode.textmode: '\\cyrchar\\cyrghcrs '}, 1172: {LaTeXMode.textmode: '\\cyrchar\\CYRGHK '}, 1173: {LaTeXMode.textmode: '\\cyrchar\\cyrghk '}, 1174: {LaTeXMode.textmode: '\\cyrchar\\CYRZHDSC '}, 1175: {LaTeXMode.textmode: '\\cyrchar\\cyrzhdsc '}, 1176: {LaTeXMode.textmode: '\\cyrchar\\CYRZDSC '}, 1177: {LaTeXMode.textmode: '\\cyrchar\\cyrzdsc '}, 1178: {LaTeXMode.textmode: '\\cyrchar\\CYRKDSC '}, 1179: {LaTeXMode.textmode: '\\cyrchar\\cyrkdsc '}, 1180: {LaTeXMode.textmode: '\\cyrchar\\CYRKVCRS '}, 1181: {LaTeXMode.textmode: '\\cyrchar\\cyrkvcrs '}, 1182: {LaTeXMode.textmode: '\\cyrchar\\CYRKHCRS '}, 1183: {LaTeXMode.textmode: '\\cyrchar\\cyrkhcrs '}, 1184: {LaTeXMode.textmode: '\\cyrchar\\CYRKBEAK '}, 1185: {LaTeXMode.textmode: '\\cyrchar\\cyrkbeak '}, 1186: {LaTeXMode.textmode: '\\cyrchar\\CYRNDSC '}, 1187: {LaTeXMode.textmode: '\\cyrchar\\cyrndsc '}, 1188: {LaTeXMode.textmode: '\\cyrchar\\CYRNG '}, 1189: {LaTeXMode.textmode: '\\cyrchar\\cyrng '}, 1190: {LaTeXMode.textmode: '\\cyrchar\\CYRPHK '}, 1191: {LaTeXMode.textmode: '\\cyrchar\\cyrphk '}, 1192: {LaTeXMode.textmode: '\\cyrchar\\CYRABHHA '}, 1193: {LaTeXMode.textmode: '\\cyrchar\\cyrabhha '}, 1194: {LaTeXMode.textmode: '\\cyrchar\\CYRSDSC '}, 1195: {LaTeXMode.textmode: '\\cyrchar\\cyrsdsc '}, 1196: {LaTeXMode.textmode: '\\cyrchar\\CYRTDSC '}, 1197: {LaTeXMode.textmode: '\\cyrchar\\cyrtdsc '}, 1198: {LaTeXMode.textmode: '\\cyrchar\\CYRY '}, 1199: {LaTeXMode.textmode: '\\cyrchar\\cyry '}, 1200: {LaTeXMode.textmode: '\\cyrchar\\CYRYHCRS '}, 1201: {LaTeXMode.textmode: '\\cyrchar\\cyryhcrs '}, 1202: {LaTeXMode.textmode: '\\cyrchar\\CYRHDSC '}, 1203: {LaTeXMode.textmode: '\\cyrchar\\cyrhdsc '}, 1204: {LaTeXMode.textmode: '\\cyrchar\\CYRTETSE '}, 1205: {LaTeXMode.textmode: '\\cyrchar\\cyrtetse '}, 1206: {LaTeXMode.textmode: '\\cyrchar\\CYRCHRDSC '}, 1207: {LaTeXMode.textmode: '\\cyrchar\\cyrchrdsc '}, 1208: {LaTeXMode.textmode: '\\cyrchar\\CYRCHVCRS '}, 1209: {LaTeXMode.textmode: '\\cyrchar\\cyrchvcrs '}, 1210: {LaTeXMode.textmode: '\\cyrchar\\CYRSHHA '}, 1211: {LaTeXMode.textmode: '\\cyrchar\\cyrshha '}, 1212: {LaTeXMode.textmode: '\\cyrchar\\CYRABHCH '}, 1213: {LaTeXMode.textmode: '\\cyrchar\\cyrabhch '}, 1214: {LaTeXMode.textmode: '\\cyrchar\\CYRABHCHDSC '}, 1215: {LaTeXMode.textmode: '\\cyrchar\\cyrabhchdsc '}, 1216: {LaTeXMode.textmode: '\\cyrchar\\CYRpalochka '}, 1219: {LaTeXMode.textmode: '\\cyrchar\\CYRKHK '}, 1220: {LaTeXMode.textmode: '\\cyrchar\\cyrkhk '}, 1223: {LaTeXMode.textmode: '\\cyrchar\\CYRNHK '}, 1224: {LaTeXMode.textmode: '\\cyrchar\\cyrnhk '}, 1227: {LaTeXMode.textmode: '\\cyrchar\\CYRCHLDSC '}, 1228: {LaTeXMode.textmode: '\\cyrchar\\cyrchldsc '}, 1236: {LaTeXMode.textmode: '\\cyrchar\\CYRAE '}, 1237: {LaTeXMode.textmode: '\\cyrchar\\cyrae '}, 1240: {LaTeXMode.textmode: '\\cyrchar\\CYRSCHWA '}, 1241: {LaTeXMode.textmode: '\\cyrchar\\cyrschwa '}, 1248: {LaTeXMode.textmode: '\\cyrchar\\CYRABHDZE '}, 1249: {LaTeXMode.textmode: '\\cyrchar\\cyrabhdze '}, 1256: {LaTeXMode.textmode: '\\cyrchar\\CYROTLD '}, 1257: {LaTeXMode.textmode: '\\cyrchar\\cyrotld '}, 8194: {LaTeXMode.textmode: '\\hspace{0.6em}', LaTeXMode.mathmode: '\\enspace'}, 8195: {LaTeXMode.textmode: '\\hspace{1em}', LaTeXMode.mathmode: '\\quad'}, 8196: {LaTeXMode.textmode: '\\hspace{0.33em}'}, 8197: {LaTeXMode.textmode: '\\hspace{0.25em}', LaTeXMode.mathmode: '\\thickspace'}, 8198: {LaTeXMode.textmode: '\\hspace{0.166em}'}, 8199: {LaTeXMode.textmode: '\\hphantom{0}'}, 8200: {LaTeXMode.textmode: '\\hphantom{,}'}, 8201: {LaTeXMode.textmode: '\\hspace{0.167em}', LaTeXMode.mathmode: '\\thinspace'}, 8202: {LaTeXMode.textmode: '\\mkern1mu ', LaTeXMode.mathmode: '\\hspace'}, 8208: {LaTeXMode.textmode: '-'}, 8211: {LaTeXMode.textmode: '\\textendash ', LaTeXMode.mathmode: '\\mathrm{\\textendash}'}, 8212: {LaTeXMode.textmode: '\\textemdash ', LaTeXMode.mathmode: '\\emdash'}, 8213: {LaTeXMode.textmode: '\\rule{1em}{1pt}'}, 8214: {LaTeXMode.textmode: '\\Vert ', LaTeXMode.mathmode: '\\Vert'}, 8216: {LaTeXMode.textmode: '`', LaTeXMode.mathmode: '\\lq'}, 8217: {LaTeXMode.textmode: "'", LaTeXMode.mathmode: '\\rq'}, 8218: {LaTeXMode.textmode: ','}, 8219: {LaTeXMode.textmode: '\\Elzreapos '}, 8220: {LaTeXMode.textmode: '\\textquotedblleft ', LaTeXMode.mathmode: '\\textquotedblleft'}, 8221: {LaTeXMode.textmode: '\\textquotedblright ', LaTeXMode.mathmode: '\\textquotedblright'}, 8222: {LaTeXMode.textmode: ',,'}, 8224: {LaTeXMode.textmode: '\\textdagger ', LaTeXMode.mathmode: '\\dagger'}, 8225: {LaTeXMode.textmode: '\\textdaggerdbl ', LaTeXMode.mathmode: '\\ddagger'}, 8226: {LaTeXMode.textmode: '\\textbullet ', LaTeXMode.mathmode: '\\bullet'}, 8228: {LaTeXMode.textmode: '.'}, 8229: {LaTeXMode.textmode: '..'}, 8230: {LaTeXMode.textmode: '\\ldots ', LaTeXMode.mathmode: '\\dots'}, 8240: {LaTeXMode.textmode: '\\textperthousand '}, 8241: {LaTeXMode.textmode: '\\textpertenthousand '}, 8242: {LaTeXMode.textmode: "{'}", LaTeXMode.mathmode: '\\prime'}, 8243: {LaTeXMode.textmode: "{''}"}, 8244: {LaTeXMode.textmode: "{'''}"}, 8245: {LaTeXMode.textmode: '\\backprime ', LaTeXMode.mathmode: '\\backprime'}, 8279: {LaTeXMode.textmode: "''''"}, 8411: {LaTeXMode.textmode: '\\dddot ', LaTeXMode.mathmode: '\\dddot'}, 8412: {LaTeXMode.textmode: '\\ddddot ', LaTeXMode.mathmode: '\\ddddot'}, 8421: {LaTeXMode.textmode: '{\\rlap{\\textbackslash}{{/}\\!\\!{/}}}'}, 8450: {LaTeXMode.textmode: '\\mathbb{C}'}, 8459: {LaTeXMode.textmode: '\\mathscr{H}'}, 8460: {LaTeXMode.textmode: '\\mathfrak{H}'}, 8461: {LaTeXMode.textmode: '\\mathbb{H}'}, 8463: {LaTeXMode.textmode: '\\hslash ', LaTeXMode.mathmode: '\\hslash'}, 8464: {LaTeXMode.textmode: '\\mathscr{I}'}, 8465: {LaTeXMode.textmode: '\\mathfrak{I}', LaTeXMode.mathmode: '\\Im'}, 8466: {LaTeXMode.textmode: '\\mathscr{L}'}, 8467: {LaTeXMode.textmode: '\\mathscr{l}', LaTeXMode.mathmode: '\\ell'}, 8469: {LaTeXMode.textmode: '\\mathbb{N}'}, 8470: {LaTeXMode.textmode: '\\cyrchar\\textnumero ', LaTeXMode.mathmode: '\\textnumero'}, 8472: {LaTeXMode.textmode: '\\wp ', LaTeXMode.mathmode: '\\wp'}, 8473: {LaTeXMode.textmode: '\\mathbb{P}'}, 8474: {LaTeXMode.textmode: '\\mathbb{Q}'}, 8475: {LaTeXMode.textmode: '\\mathscr{R}'}, 8476: {LaTeXMode.textmode: '\\mathfrak{R}', LaTeXMode.mathmode: '\\Re'}, 8477: {LaTeXMode.textmode: '\\mathbb{R}'}, 8478: {LaTeXMode.textmode: '\\Elzxrat '}, 8482: {LaTeXMode.textmode: '\\texttrademark '}, 8484: {LaTeXMode.textmode: '\\mathbb{Z}'}, 8486: {LaTeXMode.textmode: '\\Omega '}, 8487: {LaTeXMode.textmode: '\\mho ', LaTeXMode.mathmode: '\\mho'}, 8488: {LaTeXMode.textmode: '\\mathfrak{Z}'}, 8489: {LaTeXMode.textmode: '\\ElsevierGlyph{2129}'}, 8491: {LaTeXMode.textmode: '\\AA ', LaTeXMode.mathmode: '\\AA'}, 8492: {LaTeXMode.textmode: '\\mathscr{B}'}, 8493: {LaTeXMode.textmode: '\\mathfrak{C}'}, 8495: {LaTeXMode.textmode: '\\mathscr{e}'}, 8496: {LaTeXMode.textmode: '\\mathscr{E}'}, 8497: {LaTeXMode.textmode: '\\mathscr{F}'}, 8499: {LaTeXMode.textmode: '\\mathscr{M}'}, 8500: {LaTeXMode.textmode: '\\mathscr{o}'}, 8501: {LaTeXMode.textmode: '\\aleph ', LaTeXMode.mathmode: '\\aleph'}, 8502: {LaTeXMode.textmode: '\\beth ', LaTeXMode.mathmode: '\\beth'}, 8503: {LaTeXMode.textmode: '\\gimel ', LaTeXMode.mathmode: '\\gimel'}, 8504: {LaTeXMode.textmode: '\\daleth ', LaTeXMode.mathmode: '\\daleth'}, 8512: {LaTeXMode.mathmode: '\\bbsum'}, 8531: {LaTeXMode.textmode: '\\textfrac{1}{3}'}, 8532: {LaTeXMode.textmode: '\\textfrac{2}{3}'}, 8533: {LaTeXMode.textmode: '\\textfrac{1}{5}'}, 8534: {LaTeXMode.textmode: '\\textfrac{2}{5}'}, 8535: {LaTeXMode.textmode: '\\textfrac{3}{5}'}, 8536: {LaTeXMode.textmode: '\\textfrac{4}{5}'}, 8537: {LaTeXMode.textmode: '\\textfrac{1}{6}'}, 8538: {LaTeXMode.textmode: '\\textfrac{5}{6}'}, 8539: {LaTeXMode.textmode: '\\textfrac{1}{8}'}, 8540: {LaTeXMode.textmode: '\\textfrac{3}{8}'}, 8541: {LaTeXMode.textmode: '\\textfrac{5}{8}'}, 8542: {LaTeXMode.textmode: '\\textfrac{7}{8}'}, 8592: {LaTeXMode.textmode: '\\leftarrow ', LaTeXMode.mathmode: '\\leftarrow'}, 8593: {LaTeXMode.textmode: '\\uparrow ', LaTeXMode.mathmode: '\\uparrow'}, 8594: {LaTeXMode.textmode: '\\rightarrow ', LaTeXMode.mathmode: '\\rightarrow'}, 8595: {LaTeXMode.textmode: '\\downarrow ', LaTeXMode.mathmode: '\\downarrow'}, 8596: {LaTeXMode.textmode: '\\leftrightarrow ', LaTeXMode.mathmode: '\\leftrightarrow'}, 8597: {LaTeXMode.textmode: '\\updownarrow ', LaTeXMode.mathmode: '\\updownarrow'}, 8598: {LaTeXMode.textmode: '\\nwarrow ', LaTeXMode.mathmode: '\\nwarrow'}, 8599: {LaTeXMode.textmode: '\\nearrow ', LaTeXMode.mathmode: '\\nearrow'}, 8600: {LaTeXMode.textmode: '\\searrow ', LaTeXMode.mathmode: '\\searrow'}, 8601: {LaTeXMode.textmode: '\\swarrow ', LaTeXMode.mathmode: '\\swarrow'}, 8602: {LaTeXMode.textmode: '\\nleftarrow ', LaTeXMode.mathmode: '\\nleftarrow'}, 8603: {LaTeXMode.textmode: '\\nrightarrow ', LaTeXMode.mathmode: '\\nrightarrow'}, 8604: {LaTeXMode.textmode: '\\arrowwaveleft ', LaTeXMode.mathmode: '\\leftsquigarrow'}, 8605: {LaTeXMode.textmode: '\\arrowwaveright ', LaTeXMode.mathmode: '\\rightsquigarrow'}, 8606: {LaTeXMode.textmode: '\\twoheadleftarrow ', LaTeXMode.mathmode: '\\twoheadleftarrow'}, 8608: {LaTeXMode.textmode: '\\twoheadrightarrow ', LaTeXMode.mathmode: '\\twoheadrightarrow'}, 8610: {LaTeXMode.textmode: '\\leftarrowtail ', LaTeXMode.mathmode: '\\leftarrowtail'}, 8611: {LaTeXMode.textmode: '\\rightarrowtail ', LaTeXMode.mathmode: '\\rightarrowtail'}, 8614: {LaTeXMode.textmode: '\\mapsto ', LaTeXMode.mathmode: '\\mapsto'}, 8617: {LaTeXMode.textmode: '\\hookleftarrow ', LaTeXMode.mathmode: '\\hookleftarrow'}, 8618: {LaTeXMode.textmode: '\\hookrightarrow ', LaTeXMode.mathmode: '\\hookrightarrow'}, 8619: {LaTeXMode.textmode: '\\looparrowleft ', LaTeXMode.mathmode: '\\looparrowleft'}, 8620: {LaTeXMode.textmode: '\\looparrowright ', LaTeXMode.mathmode: '\\looparrowright'}, 8621: {LaTeXMode.textmode: '\\leftrightsquigarrow ', LaTeXMode.mathmode: '\\leftrightsquigarrow'}, 8622: {LaTeXMode.textmode: '\\nleftrightarrow ', LaTeXMode.mathmode: '\\nleftrightarrow'}, 8624: {LaTeXMode.textmode: '\\Lsh ', LaTeXMode.mathmode: '\\Lsh'}, 8625: {LaTeXMode.textmode: '\\Rsh ', LaTeXMode.mathmode: '\\Rsh'}, 8627: {LaTeXMode.textmode: '\\ElsevierGlyph{21B3}'}, 8630: {LaTeXMode.textmode: '\\curvearrowleft ', LaTeXMode.mathmode: '\\curvearrowleft'}, 8631: {LaTeXMode.textmode: '\\curvearrowright ', LaTeXMode.mathmode: '\\curvearrowright'}, 8634: {LaTeXMode.textmode: '\\circlearrowleft '}, 8635: {LaTeXMode.textmode: '\\circlearrowright '}, 8636: {LaTeXMode.textmode: '\\leftharpoonup ', LaTeXMode.mathmode: '\\leftharpoonup'}, 8637: {LaTeXMode.textmode: '\\leftharpoondown ', LaTeXMode.mathmode: '\\leftharpoondown'}, 8638: {LaTeXMode.textmode: '\\upharpoonright ', LaTeXMode.mathmode: '\\upharpoonleft'}, 8639: {LaTeXMode.textmode: '\\upharpoonleft ', LaTeXMode.mathmode: '\\upharpoonright'}, 8640: {LaTeXMode.textmode: '\\rightharpoonup ', LaTeXMode.mathmode: '\\rightharpoonup'}, 8641: {LaTeXMode.textmode: '\\rightharpoondown ', LaTeXMode.mathmode: '\\rightharpoondown'}, 8642: {LaTeXMode.textmode: '\\downharpoonright ', LaTeXMode.mathmode: '\\downharpoonright'}, 8643: {LaTeXMode.textmode: '\\downharpoonleft ', LaTeXMode.mathmode: '\\downharpoonleft'}, 8644: {LaTeXMode.textmode: '\\rightleftarrows ', LaTeXMode.mathmode: '\\rightleftarrows'}, 8645: {LaTeXMode.textmode: '\\dblarrowupdown '}, 8646: {LaTeXMode.textmode: '\\leftrightarrows ', LaTeXMode.mathmode: '\\leftrightarrows'}, 8647: {LaTeXMode.textmode: '\\leftleftarrows ', LaTeXMode.mathmode: '\\leftleftarrows'}, 8648: {LaTeXMode.textmode: '\\upuparrows ', LaTeXMode.mathmode: '\\upuparrows'}, 8649: {LaTeXMode.textmode: '\\rightrightarrows ', LaTeXMode.mathmode: '\\rightrightarrows'}, 8650: {LaTeXMode.textmode: '\\downdownarrows ', LaTeXMode.mathmode: '\\downdownarrows'}, 8651: {LaTeXMode.textmode: '\\leftrightharpoons ', LaTeXMode.mathmode: '\\leftrightharpoons'}, 8652: {LaTeXMode.textmode: '\\rightleftharpoons ', LaTeXMode.mathmode: '\\rightleftharpoons'}, 8653: {LaTeXMode.textmode: '\\nLeftarrow ', LaTeXMode.mathmode: '\\nLeftarrow'}, 8654: {LaTeXMode.textmode: '\\nLeftrightarrow ', LaTeXMode.mathmode: '\\nLeftrightarrow'}, 8655: {LaTeXMode.textmode: '\\nRightarrow ', LaTeXMode.mathmode: '\\nRightarrow'}, 8656: {LaTeXMode.textmode: '\\Leftarrow ', LaTeXMode.mathmode: '\\Leftarrow'}, 8657: {LaTeXMode.textmode: '\\Uparrow ', LaTeXMode.mathmode: '\\Uparrow'}, 8658: {LaTeXMode.textmode: '\\Rightarrow ', LaTeXMode.mathmode: '\\Rightarrow'}, 8659: {LaTeXMode.textmode: '\\Downarrow ', LaTeXMode.mathmode: '\\Downarrow'}, 8660: {LaTeXMode.textmode: '\\Leftrightarrow ', LaTeXMode.mathmode: '\\Leftrightarrow'}, 8661: {LaTeXMode.textmode: '\\Updownarrow ', LaTeXMode.mathmode: '\\Updownarrow'}, 8666: {LaTeXMode.textmode: '\\Lleftarrow ', LaTeXMode.mathmode: '\\Lleftarrow'}, 8667: {LaTeXMode.textmode: '\\Rrightarrow ', LaTeXMode.mathmode: '\\Rrightarrow'}, 8669: {LaTeXMode.textmode: '\\rightsquigarrow '}, 8693: {LaTeXMode.textmode: '\\DownArrowUpArrow '}, 8701: {LaTeXMode.mathmode: '\\leftarrowtriangle'}, 8702: {LaTeXMode.mathmode: '\\rightarrowtriangle'}, 8703: {LaTeXMode.mathmode: '\\leftrightarrowtria*'}, 8704: {LaTeXMode.textmode: '\\forall ', LaTeXMode.mathmode: '\\forall'}, 8705: {LaTeXMode.textmode: '\\complement ', LaTeXMode.mathmode: '\\complement'}, 8706: {LaTeXMode.textmode: '\\partial ', LaTeXMode.mathmode: '\\partial'}, 8707: {LaTeXMode.textmode: '\\exists ', LaTeXMode.mathmode: '\\exists'}, 8708: {LaTeXMode.textmode: '\\nexists ', LaTeXMode.mathmode: '\\nexists'}, 8709: {LaTeXMode.textmode: '\\varnothing ', LaTeXMode.mathmode: '\\varnothing'}, 8711: {LaTeXMode.textmode: '\\nabla ', LaTeXMode.mathmode: '\\nabla'}, 8712: {LaTeXMode.textmode: '\\in '}, 8713: {LaTeXMode.textmode: '\\not\\in ', LaTeXMode.mathmode: '\\notin'}, 8714: {LaTeXMode.mathmode: '\\in'}, 8715: {LaTeXMode.textmode: '\\ni ', LaTeXMode.mathmode: '\\ni'}, 8716: {LaTeXMode.textmode: '\\not\\ni '}, 8719: {LaTeXMode.textmode: '\\prod ', LaTeXMode.mathmode: '\\prod'}, 8720: {LaTeXMode.textmode: '\\coprod ', LaTeXMode.mathmode: '\\coprod'}, 8721: {LaTeXMode.textmode: '\\sum ', LaTeXMode.mathmode: '\\sum'}, 8722: {LaTeXMode.textmode: '-', LaTeXMode.mathmode: '-'}, 8723: {LaTeXMode.textmode: '\\mp ', LaTeXMode.mathmode: '\\mp'}, 8724: {LaTeXMode.textmode: '\\dotplus ', LaTeXMode.mathmode: '\\dotplus'}, 8726: {LaTeXMode.textmode: '\\setminus ', LaTeXMode.mathmode: '\\setminus'}, 8727: {LaTeXMode.textmode: '{_\\ast}', LaTeXMode.mathmode: '\\ast'}, 8728: {LaTeXMode.textmode: '\\circ ', LaTeXMode.mathmode: '\\circ'}, 8729: {LaTeXMode.textmode: '\\bullet '}, 8730: {LaTeXMode.textmode: '\\surd ', LaTeXMode.mathmode: '\\surd'}, 8733: {LaTeXMode.textmode: '\\propto ', LaTeXMode.mathmode: '\\propto'}, 8734: {LaTeXMode.textmode: '\\infty ', LaTeXMode.mathmode: '\\infty'}, 8735: {LaTeXMode.textmode: '\\rightangle '}, 8736: {LaTeXMode.textmode: '\\angle ', LaTeXMode.mathmode: '\\angle'}, 8737: {LaTeXMode.textmode: '\\measuredangle ', LaTeXMode.mathmode: '\\measuredangle'}, 8738: {LaTeXMode.textmode: '\\sphericalangle ', LaTeXMode.mathmode: '\\sphericalangle'}, 8739: {LaTeXMode.textmode: '\\mid ', LaTeXMode.mathmode: '\\mid'}, 8740: {LaTeXMode.textmode: '\\nmid ', LaTeXMode.mathmode: '\\nmid'}, 8741: {LaTeXMode.textmode: '\\parallel ', LaTeXMode.mathmode: '\\parallel'}, 8742: {LaTeXMode.textmode: '\\nparallel ', LaTeXMode.mathmode: '\\nparallel'}, 8743: {LaTeXMode.textmode: '\\wedge ', LaTeXMode.mathmode: '\\wedge'}, 8744: {LaTeXMode.textmode: '\\vee ', LaTeXMode.mathmode: '\\vee'}, 8745: {LaTeXMode.textmode: '\\cap ', LaTeXMode.mathmode: '\\cap'}, 8746: {LaTeXMode.textmode: '\\cup ', LaTeXMode.mathmode: '\\cup'}, 8747: {LaTeXMode.textmode: '\\int ', LaTeXMode.mathmode: '\\int'}, 8748: {LaTeXMode.textmode: '\\int\\!\\int ', LaTeXMode.mathmode: '\\iint'}, 8749: {LaTeXMode.textmode: '\\int\\!\\int\\!\\int ', LaTeXMode.mathmode: '\\iiint'}, 8750: {LaTeXMode.textmode: '\\oint ', LaTeXMode.mathmode: '\\oint'}, 8751: {LaTeXMode.textmode: '\\surfintegral ', LaTeXMode.mathmode: '\\oiint'}, 8752: {LaTeXMode.textmode: '\\volintegral ', LaTeXMode.mathmode: '\\oiiint'}, 8753: {LaTeXMode.textmode: '\\clwintegral '}, 8754: {LaTeXMode.textmode: '\\ElsevierGlyph{2232}'}, 8755: {LaTeXMode.textmode: '\\ElsevierGlyph{2233}'}, 8756: {LaTeXMode.textmode: '\\therefore ', LaTeXMode.mathmode: '\\therefore'}, 8757: {LaTeXMode.textmode: '\\because ', LaTeXMode.mathmode: '\\because'}, 8759: {LaTeXMode.textmode: '\\Colon ', LaTeXMode.mathmode: '\\Colon'}, 8760: {LaTeXMode.textmode: '\\ElsevierGlyph{2238}', LaTeXMode.mathmode: '\\dotminus'}, 8762: {LaTeXMode.textmode: '\\mathbin{{:}\\!\\!{-}\\!\\!{:}}'}, 8763: {LaTeXMode.textmode: '\\homothetic ', LaTeXMode.mathmode: '\\kernelcontraction'}, 8764: {LaTeXMode.textmode: '\\sim ', LaTeXMode.mathmode: '\\sim'}, 8765: {LaTeXMode.textmode: '\\backsim ', LaTeXMode.mathmode: '\\backsim'}, 8766: {LaTeXMode.textmode: '\\lazysinv '}, 8768: {LaTeXMode.textmode: '\\wr ', LaTeXMode.mathmode: '\\wr'}, 8769: {LaTeXMode.textmode: '\\not\\sim ', LaTeXMode.mathmode: '\\nsim'}, 8770: {LaTeXMode.textmode: '\\NotEqualTilde ', LaTeXMode.mathmode: '\\neqsim'}, 8771: {LaTeXMode.textmode: '\\simeq ', LaTeXMode.mathmode: '\\simeq'}, 8772: {LaTeXMode.textmode: '\\not\\simeq ', LaTeXMode.mathmode: '\\nsime'}, 8773: {LaTeXMode.textmode: '\\cong ', LaTeXMode.mathmode: '\\cong'}, 8774: {LaTeXMode.textmode: '\\approxnotequal '}, 8775: {LaTeXMode.textmode: '\\not\\cong ', LaTeXMode.mathmode: '\\ncong'}, 8776: {LaTeXMode.textmode: '\\approx ', LaTeXMode.mathmode: '\\approx'}, 8777: {LaTeXMode.textmode: '\\not\\approx ', LaTeXMode.mathmode: '\\napprox'}, 8778: {LaTeXMode.textmode: '\\approxeq ', LaTeXMode.mathmode: '\\approxeq'}, 8779: {LaTeXMode.textmode: '\\not\\apid '}, 8780: {LaTeXMode.textmode: '\\allequal '}, 8781: {LaTeXMode.textmode: '\\asymp ', LaTeXMode.mathmode: '\\asymp'}, 8782: {LaTeXMode.textmode: '\\NotHumpDownHump ', LaTeXMode.mathmode: '\\nBumpeq'}, 8783: {LaTeXMode.textmode: '\\NotHumpEqual ', LaTeXMode.mathmode: '\\nbumpeq'}, 8784: {LaTeXMode.textmode: '\\not\\doteq'}, 8785: {LaTeXMode.textmode: '\\doteqdot ', LaTeXMode.mathmode: '\\Doteq'}, 8786: {LaTeXMode.textmode: '\\fallingdotseq ', LaTeXMode.mathmode: '\\fallingdotseq'}, 8787: {LaTeXMode.textmode: '\\risingdotseq ', LaTeXMode.mathmode: '\\risingdotseq'}, 8788: {LaTeXMode.textmode: ':=', LaTeXMode.mathmode: '\\coloneq'}, 8789: {LaTeXMode.textmode: '=:', LaTeXMode.mathmode: '\\eqcolon'}, 8790: {LaTeXMode.textmode: '\\eqcirc ', LaTeXMode.mathmode: '\\eqcirc'}, 8791: {LaTeXMode.textmode: '\\circeq ', LaTeXMode.mathmode: '\\circeq'}, 8793: {LaTeXMode.textmode: '\\estimates ', LaTeXMode.mathmode: '\\wedgeq'}, 8794: {LaTeXMode.textmode: '\\ElsevierGlyph{225A}'}, 8795: {LaTeXMode.textmode: '\\starequal '}, 8796: {LaTeXMode.textmode: '\\triangleq ', LaTeXMode.mathmode: '\\triangleq'}, 8799: {LaTeXMode.textmode: '\\ElsevierGlyph{225F}', LaTeXMode.mathmode: '\\questeq'}, 8800: {LaTeXMode.textmode: '\\not =', LaTeXMode.mathmode: '\\ne'}, 8801: {LaTeXMode.textmode: '\\equiv ', LaTeXMode.mathmode: '\\equiv'}, 8802: {LaTeXMode.textmode: '\\not\\equiv ', LaTeXMode.mathmode: '\\nequiv'}, 8804: {LaTeXMode.textmode: '\\leq ', LaTeXMode.mathmode: '\\le'}, 8805: {LaTeXMode.textmode: '\\geq ', LaTeXMode.mathmode: '\\ge'}, 8806: {LaTeXMode.textmode: '\\leqq ', LaTeXMode.mathmode: '\\leqq'}, 8807: {LaTeXMode.textmode: '\\geqq ', LaTeXMode.mathmode: '\\geqq'}, 8808: {LaTeXMode.textmode: '\\lvertneqq ', LaTeXMode.mathmode: '\\lvertneqq'}, 8809: {LaTeXMode.textmode: '\\gvertneqq ', LaTeXMode.mathmode: '\\gvertneqq'}, 8810: {LaTeXMode.textmode: '\\NotLessLess '}, 8811: {LaTeXMode.textmode: '\\NotGreaterGreater '}, 8812: {LaTeXMode.textmode: '\\between ', LaTeXMode.mathmode: '\\between'}, 8813: {LaTeXMode.textmode: '\\not\\kern-0.3em\\times '}, 8814: {LaTeXMode.textmode: '\\not<', LaTeXMode.mathmode: '\\nless'}, 8815: {LaTeXMode.textmode: '\\not>', LaTeXMode.mathmode: '\\ngtr'}, 8816: {LaTeXMode.textmode: '\\not\\leq ', LaTeXMode.mathmode: '\\nleq'}, 8817: {LaTeXMode.textmode: '\\not\\geq ', LaTeXMode.mathmode: '\\ngeq'}, 8818: {LaTeXMode.textmode: '\\lessequivlnt ', LaTeXMode.mathmode: '\\lesssim'}, 8819: {LaTeXMode.textmode: '\\greaterequivlnt ', LaTeXMode.mathmode: '\\gtrsim'}, 8820: {LaTeXMode.textmode: '\\ElsevierGlyph{2274}'}, 8821: {LaTeXMode.textmode: '\\ElsevierGlyph{2275}'}, 8822: {LaTeXMode.textmode: '\\lessgtr ', LaTeXMode.mathmode: '\\lessgtr'}, 8823: {LaTeXMode.textmode: '\\gtrless ', LaTeXMode.mathmode: '\\gtrless'}, 8824: {LaTeXMode.textmode: '\\notlessgreater '}, 8825: {LaTeXMode.textmode: '\\notgreaterless '}, 8826: {LaTeXMode.textmode: '\\prec ', LaTeXMode.mathmode: '\\prec'}, 8827: {LaTeXMode.textmode: '\\succ ', LaTeXMode.mathmode: '\\succ'}, 8828: {LaTeXMode.textmode: '\\preccurlyeq ', LaTeXMode.mathmode: '\\preccurlyeq'}, 8829: {LaTeXMode.textmode: '\\succcurlyeq ', LaTeXMode.mathmode: '\\succcurlyeq'}, 8830: {LaTeXMode.textmode: '\\NotPrecedesTilde ', LaTeXMode.mathmode: '\\nprecsim'}, 8831: {LaTeXMode.textmode: '\\NotSucceedsTilde ', LaTeXMode.mathmode: '\\nsuccsim'}, 8832: {LaTeXMode.textmode: '\\not\\prec ', LaTeXMode.mathmode: '\\nprec'}, 8833: {LaTeXMode.textmode: '\\not\\succ ', LaTeXMode.mathmode: '\\nsucc'}, 8834: {LaTeXMode.textmode: '\\subset ', LaTeXMode.mathmode: '\\subset'}, 8835: {LaTeXMode.textmode: '\\supset ', LaTeXMode.mathmode: '\\supset'}, 8836: {LaTeXMode.textmode: '\\not\\subset ', LaTeXMode.mathmode: '\\nsubset'}, 8837: {LaTeXMode.textmode: '\\not\\supset ', LaTeXMode.mathmode: '\\nsupset'}, 8838: {LaTeXMode.textmode: '\\subseteq ', LaTeXMode.mathmode: '\\subseteq'}, 8839: {LaTeXMode.textmode: '\\supseteq ', LaTeXMode.mathmode: '\\supseteq'}, 8840: {LaTeXMode.textmode: '\\not\\subseteq ', LaTeXMode.mathmode: '\\nsubseteq'}, 8841: {LaTeXMode.textmode: '\\not\\supseteq ', LaTeXMode.mathmode: '\\nsupseteq'}, 8842: {LaTeXMode.textmode: '\\varsubsetneqq ', LaTeXMode.mathmode: '\\varsubsetneqq'}, 8843: {LaTeXMode.textmode: '\\varsupsetneq ', LaTeXMode.mathmode: '\\varsupsetneq'}, 8846: {LaTeXMode.textmode: '\\uplus ', LaTeXMode.mathmode: '\\uplus'}, 8847: {LaTeXMode.textmode: '\\NotSquareSubset '}, 8848: {LaTeXMode.textmode: '\\NotSquareSuperset '}, 8849: {LaTeXMode.textmode: '\\sqsubseteq ', LaTeXMode.mathmode: '\\sqsubseteq'}, 8850: {LaTeXMode.textmode: '\\sqsupseteq ', LaTeXMode.mathmode: '\\sqsupseteq'}, 8851: {LaTeXMode.textmode: '\\sqcap ', LaTeXMode.mathmode: '\\sqcap'}, 8852: {LaTeXMode.textmode: '\\sqcup ', LaTeXMode.mathmode: '\\sqcup'}, 8853: {LaTeXMode.textmode: '\\oplus ', LaTeXMode.mathmode: '\\oplus'}, 8854: {LaTeXMode.textmode: '\\ominus ', LaTeXMode.mathmode: '\\ominus'}, 8855: {LaTeXMode.textmode: '\\otimes ', LaTeXMode.mathmode: '\\otimes'}, 8856: {LaTeXMode.textmode: '\\oslash ', LaTeXMode.mathmode: '\\oslash'}, 8857: {LaTeXMode.textmode: '\\odot ', LaTeXMode.mathmode: '\\odot'}, 8858: {LaTeXMode.textmode: '\\circledcirc ', LaTeXMode.mathmode: '\\circledcirc'}, 8859: {LaTeXMode.textmode: '\\circledast ', LaTeXMode.mathmode: '\\circledast'}, 8861: {LaTeXMode.textmode: '\\circleddash ', LaTeXMode.mathmode: '\\circleddash'}, 8862: {LaTeXMode.textmode: '\\boxplus ', LaTeXMode.mathmode: '\\boxplus'}, 8863: {LaTeXMode.textmode: '\\boxminus ', LaTeXMode.mathmode: '\\boxminus'}, 8864: {LaTeXMode.textmode: '\\boxtimes ', LaTeXMode.mathmode: '\\boxtimes'}, 8865: {LaTeXMode.textmode: '\\boxdot ', LaTeXMode.mathmode: '\\boxdot'}, 8866: {LaTeXMode.textmode: '\\vdash ', LaTeXMode.mathmode: '\\vdash'}, 8867: {LaTeXMode.textmode: '\\dashv ', LaTeXMode.mathmode: '\\dashv'}, 8868: {LaTeXMode.textmode: '\\top ', LaTeXMode.mathmode: '\\top'}, 8869: {LaTeXMode.textmode: '\\perp ', LaTeXMode.mathmode: '\\perp'}, 8871: {LaTeXMode.textmode: '\\truestate ', LaTeXMode.mathmode: '\\models'}, 8872: {LaTeXMode.textmode: '\\forcesextra ', LaTeXMode.mathmode: '\\vDash'}, 8873: {LaTeXMode.textmode: '\\Vdash ', LaTeXMode.mathmode: '\\Vdash'}, 8874: {LaTeXMode.textmode: '\\Vvdash ', LaTeXMode.mathmode: '\\Vvdash'}, 8875: {LaTeXMode.textmode: '\\VDash '}, 8876: {LaTeXMode.textmode: '\\nvdash ', LaTeXMode.mathmode: '\\nvdash'}, 8877: {LaTeXMode.textmode: '\\nvDash ', LaTeXMode.mathmode: '\\nvDash'}, 8878: {LaTeXMode.textmode: '\\nVdash ', LaTeXMode.mathmode: '\\nVdash'}, 8879: {LaTeXMode.textmode: '\\nVDash ', LaTeXMode.mathmode: '\\nVDash'}, 8882: {LaTeXMode.textmode: '\\vartriangleleft ', LaTeXMode.mathmode: '\\vartriangleleft'}, 8883: {LaTeXMode.textmode: '\\vartriangleright ', LaTeXMode.mathmode: '\\vartriangleright'}, 8884: {LaTeXMode.textmode: '\\trianglelefteq ', LaTeXMode.mathmode: '\\trianglelefteq'}, 8885: {LaTeXMode.textmode: '\\trianglerighteq ', LaTeXMode.mathmode: '\\trianglerighteq'}, 8886: {LaTeXMode.textmode: '\\original '}, 8887: {LaTeXMode.textmode: '\\image '}, 8888: {LaTeXMode.textmode: '\\multimap ', LaTeXMode.mathmode: '\\multimap'}, 8889: {LaTeXMode.textmode: '\\hermitconjmatrix '}, 8890: {LaTeXMode.textmode: '\\intercal ', LaTeXMode.mathmode: '\\intercal'}, 8891: {LaTeXMode.textmode: '\\veebar ', LaTeXMode.mathmode: '\\veebar'}, 8894: {LaTeXMode.textmode: '\\rightanglearc '}, 8896: {LaTeXMode.textmode: '\\ElsevierGlyph{22C0}', LaTeXMode.mathmode: '\\bigwedge'}, 8897: {LaTeXMode.textmode: '\\ElsevierGlyph{22C1}', LaTeXMode.mathmode: '\\bigvee'}, 8898: {LaTeXMode.textmode: '\\bigcap ', LaTeXMode.mathmode: '\\bigcap'}, 8899: {LaTeXMode.textmode: '\\bigcup ', LaTeXMode.mathmode: '\\bigcup'}, 8900: {LaTeXMode.textmode: '\\diamond ', LaTeXMode.mathmode: '\\diamond'}, 8901: {LaTeXMode.textmode: '\\cdot ', LaTeXMode.mathmode: '\\cdot'}, 8902: {LaTeXMode.textmode: '\\star ', LaTeXMode.mathmode: '\\star'}, 8903: {LaTeXMode.textmode: '\\divideontimes ', LaTeXMode.mathmode: '\\divideontimes'}, 8904: {LaTeXMode.textmode: '\\bowtie ', LaTeXMode.mathmode: '\\bowtie'}, 8905: {LaTeXMode.textmode: '\\ltimes ', LaTeXMode.mathmode: '\\ltimes'}, 8906: {LaTeXMode.textmode: '\\rtimes ', LaTeXMode.mathmode: '\\rtimes'}, 8907: {LaTeXMode.textmode: '\\leftthreetimes ', LaTeXMode.mathmode: '\\leftthreetimes'}, 8908: {LaTeXMode.textmode: '\\rightthreetimes ', LaTeXMode.mathmode: '\\rightthreetimes'}, 8909: {LaTeXMode.textmode: '\\backsimeq ', LaTeXMode.mathmode: '\\backsimeq'}, 8910: {LaTeXMode.textmode: '\\curlyvee ', LaTeXMode.mathmode: '\\curlyvee'}, 8911: {LaTeXMode.textmode: '\\curlywedge ', LaTeXMode.mathmode: '\\curlywedge'}, 8912: {LaTeXMode.textmode: '\\Subset ', LaTeXMode.mathmode: '\\Subset'}, 8913: {LaTeXMode.textmode: '\\Supset ', LaTeXMode.mathmode: '\\Supset'}, 8914: {LaTeXMode.textmode: '\\Cap ', LaTeXMode.mathmode: '\\Cap'}, 8915: {LaTeXMode.textmode: '\\Cup ', LaTeXMode.mathmode: '\\Cup'}, 8916: {LaTeXMode.textmode: '\\pitchfork ', LaTeXMode.mathmode: '\\pitchfork'}, 8918: {LaTeXMode.textmode: '\\lessdot ', LaTeXMode.mathmode: '\\lessdot'}, 8919: {LaTeXMode.textmode: '\\gtrdot ', LaTeXMode.mathmode: '\\gtrdot'}, 8920: {LaTeXMode.textmode: '\\verymuchless '}, 8921: {LaTeXMode.textmode: '\\verymuchgreater ', LaTeXMode.mathmode: '\\ggg'}, 8922: {LaTeXMode.textmode: '\\lesseqgtr ', LaTeXMode.mathmode: '\\lesseqgtr'}, 8923: {LaTeXMode.textmode: '\\gtreqless ', LaTeXMode.mathmode: '\\gtreqless'}, 8926: {LaTeXMode.textmode: '\\curlyeqprec ', LaTeXMode.mathmode: '\\curlyeqprec'}, 8927: {LaTeXMode.textmode: '\\curlyeqsucc ', LaTeXMode.mathmode: '\\curlyeqsucc'}, 8930: {LaTeXMode.textmode: '\\not\\sqsubseteq '}, 8931: {LaTeXMode.textmode: '\\not\\sqsupseteq '}, 8933: {LaTeXMode.textmode: '\\Elzsqspne '}, 8934: {LaTeXMode.textmode: '\\lnsim ', LaTeXMode.mathmode: '\\lnsim'}, 8935: {LaTeXMode.textmode: '\\gnsim ', LaTeXMode.mathmode: '\\gnsim'}, 8936: {LaTeXMode.textmode: '\\precedesnotsimilar ', LaTeXMode.mathmode: '\\precnsim'}, 8937: {LaTeXMode.textmode: '\\succnsim ', LaTeXMode.mathmode: '\\succnsim'}, 8938: {LaTeXMode.textmode: '\\ntriangleleft ', LaTeXMode.mathmode: '\\ntriangleleft'}, 8939: {LaTeXMode.textmode: '\\ntriangleright ', LaTeXMode.mathmode: '\\ntriangleright'}, 8940: {LaTeXMode.textmode: '\\ntrianglelefteq ', LaTeXMode.mathmode: '\\ntrianglelefteq'}, 8941: {LaTeXMode.textmode: '\\ntrianglerighteq ', LaTeXMode.mathmode: '\\ntrianglerighteq'}, 8942: {LaTeXMode.textmode: '\\vdots ', LaTeXMode.mathmode: '\\vdots'}, 8943: {LaTeXMode.textmode: '\\cdots ', LaTeXMode.mathmode: '\\cdots'}, 8944: {LaTeXMode.textmode: '\\upslopeellipsis ', LaTeXMode.mathmode: '\\adots'}, 8945: {LaTeXMode.textmode: '\\downslopeellipsis ', LaTeXMode.mathmode: '\\ddots'}, 8966: {LaTeXMode.textmode: '\\varperspcorrespond '}, 8968: {LaTeXMode.textmode: '\\lceil ', LaTeXMode.mathmode: '\\lceil'}, 8969: {LaTeXMode.textmode: '\\rceil ', LaTeXMode.mathmode: '\\rceil'}, 8970: {LaTeXMode.textmode: '\\lfloor ', LaTeXMode.mathmode: '\\lfloor'}, 8971: {LaTeXMode.textmode: '\\rfloor ', LaTeXMode.mathmode: '\\rfloor'}, 8981: {LaTeXMode.textmode: '\\recorder '}, 8982: {LaTeXMode.textmode: '\\mathchar"2208'}, 8988: {LaTeXMode.textmode: '\\ulcorner ', LaTeXMode.mathmode: '\\ulcorner'}, 8989: {LaTeXMode.textmode: '\\urcorner ', LaTeXMode.mathmode: '\\urcorner'}, 8990: {LaTeXMode.textmode: '\\llcorner ', LaTeXMode.mathmode: '\\llcorner'}, 8991: {LaTeXMode.textmode: '\\lrcorner ', LaTeXMode.mathmode: '\\lrcorner'}, 8994: {LaTeXMode.textmode: '\\frown ', LaTeXMode.mathmode: '\\frown'}, 8995: {LaTeXMode.textmode: '\\smile ', LaTeXMode.mathmode: '\\smile'}, 9021: {LaTeXMode.textmode: '\\ElsevierGlyph{E838}', LaTeXMode.mathmode: '\\obar'}, 9123: {LaTeXMode.textmode: '\\Elzdlcorn '}, 9136: {LaTeXMode.textmode: '\\lmoustache ', LaTeXMode.mathmode: '\\lmoustache'}, 9137: {LaTeXMode.textmode: '\\rmoustache ', LaTeXMode.mathmode: '\\rmoustache'}, 9251: {LaTeXMode.textmode: '\\textvisiblespace '}, 9312: {LaTeXMode.textmode: '\\ding{172}'}, 9313: {LaTeXMode.textmode: '\\ding{173}'}, 9314: {LaTeXMode.textmode: '\\ding{174}'}, 9315: {LaTeXMode.textmode: '\\ding{175}'}, 9316: {LaTeXMode.textmode: '\\ding{176}'}, 9317: {LaTeXMode.textmode: '\\ding{177}'}, 9318: {LaTeXMode.textmode: '\\ding{178}'}, 9319: {LaTeXMode.textmode: '\\ding{179}'}, 9320: {LaTeXMode.textmode: '\\ding{180}'}, 9321: {LaTeXMode.textmode: '\\ding{181}'}, 9416: {LaTeXMode.textmode: '\\circledS ', LaTeXMode.mathmode: '\\circledS'}, 9478: {LaTeXMode.textmode: '\\Elzdshfnc '}, 9497: {LaTeXMode.textmode: '\\Elzsqfnw '}, 9585: {LaTeXMode.textmode: '\\diagup ', LaTeXMode.mathmode: '\\diagup'}, 9586: {LaTeXMode.mathmode: '\\diagdown'}, 9632: {LaTeXMode.textmode: '\\ding{110}', LaTeXMode.mathmode: '\\blacksquare'}, 9633: {LaTeXMode.textmode: '\\square ', LaTeXMode.mathmode: '\\square'}, 9642: {LaTeXMode.textmode: '\\blacksquare '}, 9645: {LaTeXMode.textmode: '\\fbox{~~}'}, 9647: {LaTeXMode.textmode: '\\Elzvrecto '}, 9649: {LaTeXMode.textmode: '\\ElsevierGlyph{E381}'}, 9650: {LaTeXMode.textmode: '\\ding{115}'}, 9651: {LaTeXMode.textmode: '\\bigtriangleup ', LaTeXMode.mathmode: '\\bigtriangleup'}, 9652: {LaTeXMode.textmode: '\\blacktriangle ', LaTeXMode.mathmode: '\\blacktriangle'}, 9653: {LaTeXMode.textmode: '\\vartriangle ', LaTeXMode.mathmode: '\\vartriangle'}, 9656: {LaTeXMode.textmode: '\\blacktriangleright ', LaTeXMode.mathmode: '\\blacktriangleright'}, 9657: {LaTeXMode.textmode: '\\triangleright ', LaTeXMode.mathmode: '\\triangleright'}, 9660: {LaTeXMode.textmode: '\\ding{116}'}, 9661: {LaTeXMode.textmode: '\\bigtriangledown ', LaTeXMode.mathmode: '\\bigtriangledown'}, 9662: {LaTeXMode.textmode: '\\blacktriangledown ', LaTeXMode.mathmode: '\\blacktriangledown'}, 9663: {LaTeXMode.textmode: '\\triangledown ', LaTeXMode.mathmode: '\\triangledown'}, 9666: {LaTeXMode.textmode: '\\blacktriangleleft ', LaTeXMode.mathmode: '\\blacktriangleleft'}, 9667: {LaTeXMode.textmode: '\\triangleleft ', LaTeXMode.mathmode: '\\triangleleft'}, 9670: {LaTeXMode.textmode: '\\ding{117}'}, 9674: {LaTeXMode.textmode: '\\lozenge ', LaTeXMode.mathmode: '\\lozenge'}, 9675: {LaTeXMode.textmode: '\\bigcirc ', LaTeXMode.mathmode: '\\bigcirc'}, 9679: {LaTeXMode.textmode: '\\ding{108}'}, 9680: {LaTeXMode.textmode: '\\Elzcirfl '}, 9681: {LaTeXMode.textmode: '\\Elzcirfr '}, 9682: {LaTeXMode.textmode: '\\Elzcirfb '}, 9687: {LaTeXMode.textmode: '\\ding{119}'}, 9688: {LaTeXMode.textmode: '\\Elzrvbull '}, 9703: {LaTeXMode.textmode: '\\Elzsqfl '}, 9704: {LaTeXMode.textmode: '\\Elzsqfr '}, 9706: {LaTeXMode.textmode: '\\Elzsqfse '}, 9711: {LaTeXMode.textmode: '\\bigcirc '}, 9733: {LaTeXMode.textmode: '\\ding{72}', LaTeXMode.mathmode: '\\bigstar'}, 9742: {LaTeXMode.textmode: '\\ding{37}'}, 9755: {LaTeXMode.textmode: '\\ding{42}'}, 9758: {LaTeXMode.textmode: '\\ding{43}'}, 9792: {LaTeXMode.textmode: '\\venus '}, 9794: {LaTeXMode.textmode: '\\male '}, 9799: {LaTeXMode.textmode: '\\pluto '}, 9800: {LaTeXMode.textmode: '\\aries '}, 9801: {LaTeXMode.textmode: '\\taurus '}, 9802: {LaTeXMode.textmode: '\\gemini '}, 9803: {LaTeXMode.textmode: '\\cancer '}, 9804: {LaTeXMode.textmode: '\\leo '}, 9805: {LaTeXMode.textmode: '\\virgo '}, 9806: {LaTeXMode.textmode: '\\libra '}, 9807: {LaTeXMode.textmode: '\\scorpio '}, 9808: {LaTeXMode.textmode: '\\sagittarius '}, 9809: {LaTeXMode.textmode: '\\capricornus '}, 9810: {LaTeXMode.textmode: '\\aquarius '}, 9811: {LaTeXMode.textmode: '\\pisces '}, 9824: {LaTeXMode.textmode: '\\ding{171}', LaTeXMode.mathmode: '\\spadesuit'}, 9826: {LaTeXMode.textmode: '\\diamond ', LaTeXMode.mathmode: '\\diamondsuit'}, 9827: {LaTeXMode.textmode: '\\ding{168}', LaTeXMode.mathmode: '\\clubsuit'}, 9829: {LaTeXMode.textmode: '\\ding{170}'}, 9830: {LaTeXMode.textmode: '\\ding{169}'}, 9833: {LaTeXMode.textmode: '\\quarternote '}, 9834: {LaTeXMode.textmode: '\\eighthnote '}, 9837: {LaTeXMode.textmode: '\\flat ', LaTeXMode.mathmode: '\\flat'}, 9838: {LaTeXMode.textmode: '\\natural ', LaTeXMode.mathmode: '\\natural'}, 9839: {LaTeXMode.textmode: '\\sharp ', LaTeXMode.mathmode: '\\sharp'}, 9985: {LaTeXMode.textmode: '\\ding{33}'}, 9986: {LaTeXMode.textmode: '\\ding{34}'}, 9987: {LaTeXMode.textmode: '\\ding{35}'}, 9988: {LaTeXMode.textmode: '\\ding{36}'}, 9990: {LaTeXMode.textmode: '\\ding{38}'}, 9991: {LaTeXMode.textmode: '\\ding{39}'}, 9992: {LaTeXMode.textmode: '\\ding{40}'}, 9993: {LaTeXMode.textmode: '\\ding{41}'}, 9996: {LaTeXMode.textmode: '\\ding{44}'}, 9997: {LaTeXMode.textmode: '\\ding{45}'}, 9998: {LaTeXMode.textmode: '\\ding{46}'}, 9999: {LaTeXMode.textmode: '\\ding{47}'}, 10000: {LaTeXMode.textmode: '\\ding{48}'}, 10001: {LaTeXMode.textmode: '\\ding{49}'}, 10002: {LaTeXMode.textmode: '\\ding{50}'}, 10003: {LaTeXMode.textmode: '\\ding{51}', LaTeXMode.mathmode: '\\checkmark'}, 10004: {LaTeXMode.textmode: '\\ding{52}'}, 10005: {LaTeXMode.textmode: '\\ding{53}'}, 10006: {LaTeXMode.textmode: '\\ding{54}'}, 10007: {LaTeXMode.textmode: '\\ding{55}'}, 10008: {LaTeXMode.textmode: '\\ding{56}'}, 10009: {LaTeXMode.textmode: '\\ding{57}'}, 10010: {LaTeXMode.textmode: '\\ding{58}'}, 10011: {LaTeXMode.textmode: '\\ding{59}'}, 10012: {LaTeXMode.textmode: '\\ding{60}'}, 10013: {LaTeXMode.textmode: '\\ding{61}'}, 10014: {LaTeXMode.textmode: '\\ding{62}'}, 10015: {LaTeXMode.textmode: '\\ding{63}'}, 10016: {LaTeXMode.textmode: '\\ding{64}', LaTeXMode.mathmode: '\\maltese'}, 10017: {LaTeXMode.textmode: '\\ding{65}'}, 10018: {LaTeXMode.textmode: '\\ding{66}'}, 10019: {LaTeXMode.textmode: '\\ding{67}'}, 10020: {LaTeXMode.textmode: '\\ding{68}'}, 10021: {LaTeXMode.textmode: '\\ding{69}'}, 10022: {LaTeXMode.textmode: '\\ding{70}'}, 10023: {LaTeXMode.textmode: '\\ding{71}'}, 10025: {LaTeXMode.textmode: '\\ding{73}'}, 10026: {LaTeXMode.textmode: '\\ding{74}'}, 10027: {LaTeXMode.textmode: '\\ding{75}'}, 10028: {LaTeXMode.textmode: '\\ding{76}'}, 10029: {LaTeXMode.textmode: '\\ding{77}'}, 10030: {LaTeXMode.textmode: '\\ding{78}'}, 10031: {LaTeXMode.textmode: '\\ding{79}'}, 10032: {LaTeXMode.textmode: '\\ding{80}'}, 10033: {LaTeXMode.textmode: '\\ding{81}'}, 10034: {LaTeXMode.textmode: '\\ding{82}'}, 10035: {LaTeXMode.textmode: '\\ding{83}'}, 10036: {LaTeXMode.textmode: '\\ding{84}'}, 10037: {LaTeXMode.textmode: '\\ding{85}'}, 10038: {LaTeXMode.textmode: '\\ding{86}'}, 10039: {LaTeXMode.textmode: '\\ding{87}'}, 10040: {LaTeXMode.textmode: '\\ding{88}'}, 10041: {LaTeXMode.textmode: '\\ding{89}'}, 10042: {LaTeXMode.textmode: '\\ding{90}'}, 10043: {LaTeXMode.textmode: '\\ding{91}'}, 10044: {LaTeXMode.textmode: '\\ding{92}'}, 10045: {LaTeXMode.textmode: '\\ding{93}'}, 10046: {LaTeXMode.textmode: '\\ding{94}'}, 10047: {LaTeXMode.textmode: '\\ding{95}'}, 10048: {LaTeXMode.textmode: '\\ding{96}'}, 10049: {LaTeXMode.textmode: '\\ding{97}'}, 10050: {LaTeXMode.textmode: '\\ding{98}'}, 10051: {LaTeXMode.textmode: '\\ding{99}'}, 10052: {LaTeXMode.textmode: '\\ding{100}'}, 10053: {LaTeXMode.textmode: '\\ding{101}'}, 10054: {LaTeXMode.textmode: '\\ding{102}'}, 10055: {LaTeXMode.textmode: '\\ding{103}'}, 10056: {LaTeXMode.textmode: '\\ding{104}'}, 10057: {LaTeXMode.textmode: '\\ding{105}'}, 10058: {LaTeXMode.textmode: '\\ding{106}'}, 10059: {LaTeXMode.textmode: '\\ding{107}'}, 10061: {LaTeXMode.textmode: '\\ding{109}'}, 10063: {LaTeXMode.textmode: '\\ding{111}'}, 10064: {LaTeXMode.textmode: '\\ding{112}'}, 10065: {LaTeXMode.textmode: '\\ding{113}'}, 10066: {LaTeXMode.textmode: '\\ding{114}'}, 10070: {LaTeXMode.textmode: '\\ding{118}'}, 10072: {LaTeXMode.textmode: '\\ding{120}'}, 10073: {LaTeXMode.textmode: '\\ding{121}'}, 10074: {LaTeXMode.textmode: '\\ding{122}'}, 10075: {LaTeXMode.textmode: '\\ding{123}'}, 10076: {LaTeXMode.textmode: '\\ding{124}'}, 10077: {LaTeXMode.textmode: '\\ding{125}'}, 10078: {LaTeXMode.textmode: '\\ding{126}'}, 10081: {LaTeXMode.textmode: '\\ding{161}'}, 10082: {LaTeXMode.textmode: '\\ding{162}'}, 10083: {LaTeXMode.textmode: '\\ding{163}'}, 10084: {LaTeXMode.textmode: '\\ding{164}'}, 10085: {LaTeXMode.textmode: '\\ding{165}'}, 10086: {LaTeXMode.textmode: '\\ding{166}'}, 10087: {LaTeXMode.textmode: '\\ding{167}'}, 10102: {LaTeXMode.textmode: '\\ding{182}'}, 10103: {LaTeXMode.textmode: '\\ding{183}'}, 10104: {LaTeXMode.textmode: '\\ding{184}'}, 10105: {LaTeXMode.textmode: '\\ding{185}'}, 10106: {LaTeXMode.textmode: '\\ding{186}'}, 10107: {LaTeXMode.textmode: '\\ding{187}'}, 10108: {LaTeXMode.textmode: '\\ding{188}'}, 10109: {LaTeXMode.textmode: '\\ding{189}'}, 10110: {LaTeXMode.textmode: '\\ding{190}'}, 10111: {LaTeXMode.textmode: '\\ding{191}'}, 10112: {LaTeXMode.textmode: '\\ding{192}'}, 10113: {LaTeXMode.textmode: '\\ding{193}'}, 10114: {LaTeXMode.textmode: '\\ding{194}'}, 10115: {LaTeXMode.textmode: '\\ding{195}'}, 10116: {LaTeXMode.textmode: '\\ding{196}'}, 10117: {LaTeXMode.textmode: '\\ding{197}'}, 10118: {LaTeXMode.textmode: '\\ding{198}'}, 10119: {LaTeXMode.textmode: '\\ding{199}'}, 10120: {LaTeXMode.textmode: '\\ding{200}'}, 10121: {LaTeXMode.textmode: '\\ding{201}'}, 10122: {LaTeXMode.textmode: '\\ding{202}'}, 10123: {LaTeXMode.textmode: '\\ding{203}'}, 10124: {LaTeXMode.textmode: '\\ding{204}'}, 10125: {LaTeXMode.textmode: '\\ding{205}'}, 10126: {LaTeXMode.textmode: '\\ding{206}'}, 10127: {LaTeXMode.textmode: '\\ding{207}'}, 10128: {LaTeXMode.textmode: '\\ding{208}'}, 10129: {LaTeXMode.textmode: '\\ding{209}'}, 10130: {LaTeXMode.textmode: '\\ding{210}'}, 10131: {LaTeXMode.textmode: '\\ding{211}'}, 10132: {LaTeXMode.textmode: '\\ding{212}'}, 10136: {LaTeXMode.textmode: '\\ding{216}'}, 10137: {LaTeXMode.textmode: '\\ding{217}'}, 10138: {LaTeXMode.textmode: '\\ding{218}'}, 10139: {LaTeXMode.textmode: '\\ding{219}'}, 10140: {LaTeXMode.textmode: '\\ding{220}'}, 10141: {LaTeXMode.textmode: '\\ding{221}'}, 10142: {LaTeXMode.textmode: '\\ding{222}'}, 10143: {LaTeXMode.textmode: '\\ding{223}'}, 10144: {LaTeXMode.textmode: '\\ding{224}'}, 10145: {LaTeXMode.textmode: '\\ding{225}'}, 10146: {LaTeXMode.textmode: '\\ding{226}'}, 10147: {LaTeXMode.textmode: '\\ding{227}'}, 10148: {LaTeXMode.textmode: '\\ding{228}'}, 10149: {LaTeXMode.textmode: '\\ding{229}'}, 10150: {LaTeXMode.textmode: '\\ding{230}'}, 10151: {LaTeXMode.textmode: '\\ding{231}'}, 10152: {LaTeXMode.textmode: '\\ding{232}'}, 10153: {LaTeXMode.textmode: '\\ding{233}'}, 10154: {LaTeXMode.textmode: '\\ding{234}'}, 10155: {LaTeXMode.textmode: '\\ding{235}'}, 10156: {LaTeXMode.textmode: '\\ding{236}'}, 10157: {LaTeXMode.textmode: '\\ding{237}'}, 10158: {LaTeXMode.textmode: '\\ding{238}'}, 10159: {LaTeXMode.textmode: '\\ding{239}'}, 10161: {LaTeXMode.textmode: '\\ding{241}'}, 10162: {LaTeXMode.textmode: '\\ding{242}'}, 10163: {LaTeXMode.textmode: '\\ding{243}'}, 10164: {LaTeXMode.textmode: '\\ding{244}'}, 10165: {LaTeXMode.textmode: '\\ding{245}'}, 10166: {LaTeXMode.textmode: '\\ding{246}'}, 10167: {LaTeXMode.textmode: '\\ding{247}'}, 10168: {LaTeXMode.textmode: '\\ding{248}'}, 10169: {LaTeXMode.textmode: '\\ding{249}'}, 10170: {LaTeXMode.textmode: '\\ding{250}'}, 10171: {LaTeXMode.textmode: '\\ding{251}'}, 10172: {LaTeXMode.textmode: '\\ding{252}'}, 10173: {LaTeXMode.textmode: '\\ding{253}'}, 10174: {LaTeXMode.textmode: '\\ding{254}'}, 10229: {LaTeXMode.textmode: '\\longleftarrow ', LaTeXMode.mathmode: '\\longleftarrow'}, 10230: {LaTeXMode.textmode: '\\longrightarrow ', LaTeXMode.mathmode: '\\longrightarrow'}, 10231: {LaTeXMode.textmode: '\\longleftrightarrow ', LaTeXMode.mathmode: '\\longleftrightarrow'}, 10232: {LaTeXMode.textmode: '\\Longleftarrow ', LaTeXMode.mathmode: '\\Longleftarrow'}, 10233: {LaTeXMode.textmode: '\\Longrightarrow ', LaTeXMode.mathmode: '\\Longrightarrow'}, 10234: {LaTeXMode.textmode: '\\Longleftrightarrow ', LaTeXMode.mathmode: '\\Longleftrightarrow'}, 10236: {LaTeXMode.textmode: '\\longmapsto ', LaTeXMode.mathmode: '\\longmapsto'}, 10239: {LaTeXMode.textmode: '\\sim\\joinrel\\leadsto'}, 10501: {LaTeXMode.textmode: '\\ElsevierGlyph{E212}'}, 10502: {LaTeXMode.mathmode: '\\Mapsfrom'}, 10503: {LaTeXMode.mathmode: '\\Mapsto'}, 10506: {LaTeXMode.mathmode: '\\Uuparrow'}, 10507: {LaTeXMode.mathmode: '\\Ddownarrow'}, 10511: {LaTeXMode.mathmode: '\\dbkarow'}, 10512: {LaTeXMode.mathmode: '\\drbkarrow'}, 10514: {LaTeXMode.textmode: '\\UpArrowBar '}, 10515: {LaTeXMode.textmode: '\\DownArrowBar '}, 10518: {LaTeXMode.mathmode: '\\twoheadrightarrowtail'}, 10531: {LaTeXMode.textmode: '\\ElsevierGlyph{E20C}'}, 10532: {LaTeXMode.textmode: '\\ElsevierGlyph{E20D}'}, 10533: {LaTeXMode.textmode: '\\ElsevierGlyph{E20B}', LaTeXMode.mathmode: '\\hksearow'}, 10534: {LaTeXMode.textmode: '\\ElsevierGlyph{E20A}', LaTeXMode.mathmode: '\\hkswarow'}, 10535: {LaTeXMode.textmode: '\\ElsevierGlyph{E211}', LaTeXMode.mathmode: '\\tona'}, 10536: {LaTeXMode.textmode: '\\ElsevierGlyph{E20E}', LaTeXMode.mathmode: '\\toea'}, 10537: {LaTeXMode.textmode: '\\ElsevierGlyph{E20F}', LaTeXMode.mathmode: '\\tosa'}, 10538: {LaTeXMode.textmode: '\\ElsevierGlyph{E210}', LaTeXMode.mathmode: '\\towa'}, 10539: {LaTeXMode.mathmode: '\\rdiagovfdiag'}, 10540: {LaTeXMode.mathmode: '\\fdiagovrdiag'}, 10541: {LaTeXMode.mathmode: '\\seovnearrow'}, 10542: {LaTeXMode.mathmode: '\\neovsearrow'}, 10543: {LaTeXMode.mathmode: '\\fdiagovnearrow'}, 10544: {LaTeXMode.mathmode: '\\rdiagovsearrow'}, 10545: {LaTeXMode.mathmode: '\\neovnwarrow'}, 10546: {LaTeXMode.mathmode: '\\nwovnearrow'}, 10547: {LaTeXMode.textmode: '\\ElsevierGlyph{E21D}'}, 10550: {LaTeXMode.textmode: '\\ElsevierGlyph{E21A}'}, 10551: {LaTeXMode.textmode: '\\ElsevierGlyph{E219}'}, 10560: {LaTeXMode.textmode: '\\Elolarr ', LaTeXMode.mathmode: '\\circlearrowleft'}, 10561: {LaTeXMode.textmode: '\\Elorarr ', LaTeXMode.mathmode: '\\circlearrowright'}, 10562: {LaTeXMode.textmode: '\\ElzRlarr '}, 10564: {LaTeXMode.textmode: '\\ElzrLarr '}, 10567: {LaTeXMode.textmode: '\\Elzrarrx '}, 10574: {LaTeXMode.textmode: '\\LeftRightVector '}, 10575: {LaTeXMode.textmode: '\\RightUpDownVector '}, 10576: {LaTeXMode.textmode: '\\DownLeftRightVector '}, 10577: {LaTeXMode.textmode: '\\LeftUpDownVector '}, 10578: {LaTeXMode.textmode: '\\LeftVectorBar '}, 10579: {LaTeXMode.textmode: '\\RightVectorBar '}, 10580: {LaTeXMode.textmode: '\\RightUpVectorBar '}, 10581: {LaTeXMode.textmode: '\\RightDownVectorBar '}, 10582: {LaTeXMode.textmode: '\\DownLeftVectorBar '}, 10583: {LaTeXMode.textmode: '\\DownRightVectorBar '}, 10584: {LaTeXMode.textmode: '\\LeftUpVectorBar '}, 10585: {LaTeXMode.textmode: '\\LeftDownVectorBar '}, 10586: {LaTeXMode.textmode: '\\LeftTeeVector '}, 10587: {LaTeXMode.textmode: '\\RightTeeVector '}, 10588: {LaTeXMode.textmode: '\\RightUpTeeVector '}, 10589: {LaTeXMode.textmode: '\\RightDownTeeVector '}, 10590: {LaTeXMode.textmode: '\\DownLeftTeeVector '}, 10591: {LaTeXMode.textmode: '\\DownRightTeeVector '}, 10592: {LaTeXMode.textmode: '\\LeftUpTeeVector '}, 10593: {LaTeXMode.textmode: '\\LeftDownTeeVector '}, 10606: {LaTeXMode.textmode: '\\UpEquilibrium '}, 10607: {LaTeXMode.textmode: '\\ReverseUpEquilibrium '}, 10608: {LaTeXMode.textmode: '\\RoundImplies '}, 10620: {LaTeXMode.textmode: '\\ElsevierGlyph{E214}'}, 10621: {LaTeXMode.textmode: '\\ElsevierGlyph{E215}'}, 10624: {LaTeXMode.textmode: '\\Elztfnc ', LaTeXMode.mathmode: '\\Vvert'}, 10629: {LaTeXMode.textmode: '\\ElsevierGlyph{3018}'}, 10630: {LaTeXMode.textmode: '\\Elroang '}, 10643: {LaTeXMode.textmode: '<\\kern-0.58em('}, 10644: {LaTeXMode.textmode: '\\ElsevierGlyph{E291}'}, 10649: {LaTeXMode.textmode: '\\Elzddfnc '}, 10652: {LaTeXMode.textmode: '\\Angle '}, 10656: {LaTeXMode.textmode: '\\Elzlpargt '}, 10677: {LaTeXMode.textmode: '\\ElsevierGlyph{E260}'}, 10678: {LaTeXMode.textmode: '\\ElsevierGlyph{E61B}'}, 10680: {LaTeXMode.mathmode: '\\obslash'}, 10692: {LaTeXMode.mathmode: '\\boxdiag'}, 10693: {LaTeXMode.mathmode: '\\boxbslash'}, 10694: {LaTeXMode.mathmode: '\\boxast'}, 10695: {LaTeXMode.mathmode: '\\boxcircle'}, 10698: {LaTeXMode.textmode: '\\ElzLap '}, 10699: {LaTeXMode.textmode: '\\Elzdefas '}, 10703: {LaTeXMode.textmode: '\\NotLeftTriangleBar '}, 10704: {LaTeXMode.textmode: '\\NotRightTriangleBar '}, 10716: {LaTeXMode.textmode: '\\ElsevierGlyph{E372}'}, 10719: {LaTeXMode.mathmode: '\\dualmap'}, 10722: {LaTeXMode.mathmode: '\\shuffle'}, 10731: {LaTeXMode.textmode: '\\blacklozenge ', LaTeXMode.mathmode: '\\blacklozenge'}, 10740: {LaTeXMode.textmode: '\\RuleDelayed '}, 10752: {LaTeXMode.mathmode: '\\bigodot'}, 10753: {LaTeXMode.mathmode: '\\bigoplus'}, 10754: {LaTeXMode.mathmode: '\\bigotimes'}, 10755: {LaTeXMode.mathmode: '\\bigcupdot'}, 10756: {LaTeXMode.textmode: '\\Elxuplus ', LaTeXMode.mathmode: '\\biguplus'}, 10757: {LaTeXMode.textmode: '\\ElzThr ', LaTeXMode.mathmode: '\\bigsqcap'}, 10758: {LaTeXMode.textmode: '\\Elxsqcup ', LaTeXMode.mathmode: '\\bigsqcup'}, 10759: {LaTeXMode.textmode: '\\ElzInf ', LaTeXMode.mathmode: '\\conjquant'}, 10760: {LaTeXMode.textmode: '\\ElzSup ', LaTeXMode.mathmode: '\\disjquant'}, 10761: {LaTeXMode.mathmode: '\\bigtimes'}, 10765: {LaTeXMode.textmode: '\\ElzCint ', LaTeXMode.mathmode: '\\intbar'}, 10766: {LaTeXMode.mathmode: '\\intBar'}, 10767: {LaTeXMode.textmode: '\\clockoint '}, 10768: {LaTeXMode.textmode: '\\ElsevierGlyph{E395}'}, 10774: {LaTeXMode.textmode: '\\sqrint '}, 10776: {LaTeXMode.mathmode: '\\intx'}, 10777: {LaTeXMode.mathmode: '\\intcap'}, 10778: {LaTeXMode.mathmode: '\\intcup'}, 10779: {LaTeXMode.mathmode: '\\upint'}, 10780: {LaTeXMode.mathmode: '\\lowint'}, 10789: {LaTeXMode.textmode: '\\ElsevierGlyph{E25A}', LaTeXMode.mathmode: '\\plusdot'}, 10794: {LaTeXMode.textmode: '\\ElsevierGlyph{E25B}', LaTeXMode.mathmode: '\\minusdot'}, 10797: {LaTeXMode.textmode: '\\ElsevierGlyph{E25C}'}, 10798: {LaTeXMode.textmode: '\\ElsevierGlyph{E25D}'}, 10799: {LaTeXMode.textmode: '\\ElzTimes '}, 10802: {LaTeXMode.mathmode: '\\btimes'}, 10804: {LaTeXMode.textmode: '\\ElsevierGlyph{E25E}'}, 10805: {LaTeXMode.textmode: '\\ElsevierGlyph{E25E}'}, 10812: {LaTeXMode.textmode: '\\ElsevierGlyph{E259}', LaTeXMode.mathmode: '\\intprod'}, 10813: {LaTeXMode.mathmode: '\\intprodr'}, 10815: {LaTeXMode.textmode: '\\amalg ', LaTeXMode.mathmode: '\\amalg'}, 10835: {LaTeXMode.textmode: '\\ElzAnd '}, 10836: {LaTeXMode.textmode: '\\ElzOr '}, 10837: {LaTeXMode.textmode: '\\ElsevierGlyph{E36E}'}, 10838: {LaTeXMode.textmode: '\\ElOr '}, 10846: {LaTeXMode.textmode: '\\perspcorrespond ', LaTeXMode.mathmode: '\\doublebarwedge'}, 10847: {LaTeXMode.textmode: '\\Elzminhat '}, 10851: {LaTeXMode.textmode: '\\ElsevierGlyph{225A}'}, 10862: {LaTeXMode.textmode: '\\stackrel{*}{=}'}, 10869: {LaTeXMode.textmode: '\\Equal '}, 10871: {LaTeXMode.mathmode: '\\ddotseq'}, 10877: {LaTeXMode.textmode: '\\nleqslant ', LaTeXMode.mathmode: '\\nleqslant'}, 10878: {LaTeXMode.textmode: '\\ngeqslant ', LaTeXMode.mathmode: '\\ngeqslant'}, 10885: {LaTeXMode.textmode: '\\lessapprox ', LaTeXMode.mathmode: '\\lessapprox'}, 10886: {LaTeXMode.textmode: '\\gtrapprox ', LaTeXMode.mathmode: '\\gtrapprox'}, 10887: {LaTeXMode.textmode: '\\lneq ', LaTeXMode.mathmode: '\\lneq'}, 10888: {LaTeXMode.textmode: '\\gneq ', LaTeXMode.mathmode: '\\gneq'}, 10889: {LaTeXMode.textmode: '\\lnapprox ', LaTeXMode.mathmode: '\\lnapprox'}, 10890: {LaTeXMode.textmode: '\\gnapprox ', LaTeXMode.mathmode: '\\gnapprox'}, 10891: {LaTeXMode.textmode: '\\lesseqqgtr ', LaTeXMode.mathmode: '\\lesseqqgtr'}, 10892: {LaTeXMode.textmode: '\\gtreqqless ', LaTeXMode.mathmode: '\\gtreqqless'}, 10901: {LaTeXMode.textmode: '\\eqslantless ', LaTeXMode.mathmode: '\\eqslantless'}, 10902: {LaTeXMode.textmode: '\\eqslantgtr ', LaTeXMode.mathmode: '\\eqslantgtr'}, 10909: {LaTeXMode.textmode: '\\Pisymbol{ppi020}{117}'}, 10910: {LaTeXMode.textmode: '\\Pisymbol{ppi020}{105}'}, 10913: {LaTeXMode.textmode: '\\NotNestedLessLess '}, 10914: {LaTeXMode.textmode: '\\NotNestedGreaterGreater '}, 10915: {LaTeXMode.mathmode: '\\partialmeetcontraction'}, 10926: {LaTeXMode.mathmode: '\\bumpeqq'}, 10927: {LaTeXMode.textmode: '\\not\\preceq ', LaTeXMode.mathmode: '\\npreceq'}, 10928: {LaTeXMode.textmode: '\\not\\succeq ', LaTeXMode.mathmode: '\\nsucceq'}, 10933: {LaTeXMode.textmode: '\\precneqq ', LaTeXMode.mathmode: '\\precneqq'}, 10934: {LaTeXMode.textmode: '\\succneqq ', LaTeXMode.mathmode: '\\succneqq'}, 10935: {LaTeXMode.textmode: '\\precapprox ', LaTeXMode.mathmode: '\\precapprox'}, 10936: {LaTeXMode.textmode: '\\succapprox ', LaTeXMode.mathmode: '\\succapprox'}, 10937: {LaTeXMode.textmode: '\\precnapprox ', LaTeXMode.mathmode: '\\precnapprox'}, 10938: {LaTeXMode.textmode: '\\succnapprox ', LaTeXMode.mathmode: '\\succnapprox'}, 10949: {LaTeXMode.textmode: '\\nsubseteqq ', LaTeXMode.mathmode: '\\nsubseteqq'}, 10950: {LaTeXMode.textmode: '\\nsupseteqq'}, 10955: {LaTeXMode.textmode: '\\subsetneqq ', LaTeXMode.mathmode: '\\subsetneqq'}, 10956: {LaTeXMode.textmode: '\\supsetneqq ', LaTeXMode.mathmode: '\\supsetneqq'}, 10971: {LaTeXMode.mathmode: '\\mlcp'}, 10973: {LaTeXMode.mathmode: '\\forks'}, 10979: {LaTeXMode.mathmode: '\\dashV'}, 10980: {LaTeXMode.mathmode: '\\Dashv'}, 10987: {LaTeXMode.textmode: '\\ElsevierGlyph{E30D}'}, 10996: {LaTeXMode.mathmode: '\\interleave'}, 10998: {LaTeXMode.textmode: '\\Elztdcol '}, 11005: {LaTeXMode.textmode: '{\\rlap{\\textbackslash}{{/}\\!\\!{/}}}'}, 12298: {LaTeXMode.textmode: '\\ElsevierGlyph{300A}'}, 12299: {LaTeXMode.textmode: '\\ElsevierGlyph{300B}'}, 12312: {LaTeXMode.textmode: '\\ElsevierGlyph{3018}'}, 12313: {LaTeXMode.textmode: '\\ElsevierGlyph{3019}'}, 12314: {LaTeXMode.textmode: '\\openbracketleft '}, 12315: {LaTeXMode.textmode: '\\openbracketright '}, 64256: {LaTeXMode.textmode: 'ff'}, 64257: {LaTeXMode.textmode: 'fi'}, 64258: {LaTeXMode.textmode: 'fl'}, 64259: {LaTeXMode.textmode: 'ffi'}, 64260: {LaTeXMode.textmode: 'ffl'}, 65024: {LaTeXMode.textmode: '\\varsupsetneq ', LaTeXMode.mathmode: '\\varsupsetneq'}, 119808: {LaTeXMode.textmode: '\\mathbf{A}'}, 119809: {LaTeXMode.textmode: '\\mathbf{B}'}, 119810: {LaTeXMode.textmode: '\\mathbf{C}'}, 119811: {LaTeXMode.textmode: '\\mathbf{D}'}, 119812: {LaTeXMode.textmode: '\\mathbf{E}'}, 119813: {LaTeXMode.textmode: '\\mathbf{F}'}, 119814: {LaTeXMode.textmode: '\\mathbf{G}'}, 119815: {LaTeXMode.textmode: '\\mathbf{H}'}, 119816: {LaTeXMode.textmode: '\\mathbf{I}'}, 119817: {LaTeXMode.textmode: '\\mathbf{J}'}, 119818: {LaTeXMode.textmode: '\\mathbf{K}'}, 119819: {LaTeXMode.textmode: '\\mathbf{L}'}, 119820: {LaTeXMode.textmode: '\\mathbf{M}'}, 119821: {LaTeXMode.textmode: '\\mathbf{N}'}, 119822: {LaTeXMode.textmode: '\\mathbf{O}'}, 119823: {LaTeXMode.textmode: '\\mathbf{P}'}, 119824: {LaTeXMode.textmode: '\\mathbf{Q}'}, 119825: {LaTeXMode.textmode: '\\mathbf{R}'}, 119826: {LaTeXMode.textmode: '\\mathbf{S}'}, 119827: {LaTeXMode.textmode: '\\mathbf{T}'}, 119828: {LaTeXMode.textmode: '\\mathbf{U}'}, 119829: {LaTeXMode.textmode: '\\mathbf{V}'}, 119830: {LaTeXMode.textmode: '\\mathbf{W}'}, 119831: {LaTeXMode.textmode: '\\mathbf{X}'}, 119832: {LaTeXMode.textmode: '\\mathbf{Y}'}, 119833: {LaTeXMode.textmode: '\\mathbf{Z}'}, 119834: {LaTeXMode.textmode: '\\mathbf{a}'}, 119835: {LaTeXMode.textmode: '\\mathbf{b}'}, 119836: {LaTeXMode.textmode: '\\mathbf{c}'}, 119837: {LaTeXMode.textmode: '\\mathbf{d}'}, 119838: {LaTeXMode.textmode: '\\mathbf{e}'}, 119839: {LaTeXMode.textmode: '\\mathbf{f}'}, 119840: {LaTeXMode.textmode: '\\mathbf{g}'}, 119841: {LaTeXMode.textmode: '\\mathbf{h}'}, 119842: {LaTeXMode.textmode: '\\mathbf{i}'}, 119843: {LaTeXMode.textmode: '\\mathbf{j}'}, 119844: {LaTeXMode.textmode: '\\mathbf{k}'}, 119845: {LaTeXMode.textmode: '\\mathbf{l}'}, 119846: {LaTeXMode.textmode: '\\mathbf{m}'}, 119847: {LaTeXMode.textmode: '\\mathbf{n}'}, 119848: {LaTeXMode.textmode: '\\mathbf{o}'}, 119849: {LaTeXMode.textmode: '\\mathbf{p}'}, 119850: {LaTeXMode.textmode: '\\mathbf{q}'}, 119851: {LaTeXMode.textmode: '\\mathbf{r}'}, 119852: {LaTeXMode.textmode: '\\mathbf{s}'}, 119853: {LaTeXMode.textmode: '\\mathbf{t}'}, 119854: {LaTeXMode.textmode: '\\mathbf{u}'}, 119855: {LaTeXMode.textmode: '\\mathbf{v}'}, 119856: {LaTeXMode.textmode: '\\mathbf{w}'}, 119857: {LaTeXMode.textmode: '\\mathbf{x}'}, 119858: {LaTeXMode.textmode: '\\mathbf{y}'}, 119859: {LaTeXMode.textmode: '\\mathbf{z}'}, 119860: {LaTeXMode.textmode: '\\mathmit{A}'}, 119861: {LaTeXMode.textmode: '\\mathmit{B}'}, 119862: {LaTeXMode.textmode: '\\mathmit{C}'}, 119863: {LaTeXMode.textmode: '\\mathmit{D}'}, 119864: {LaTeXMode.textmode: '\\mathmit{E}'}, 119865: {LaTeXMode.textmode: '\\mathmit{F}'}, 119866: {LaTeXMode.textmode: '\\mathmit{G}'}, 119867: {LaTeXMode.textmode: '\\mathmit{H}'}, 119868: {LaTeXMode.textmode: '\\mathmit{I}'}, 119869: {LaTeXMode.textmode: '\\mathmit{J}'}, 119870: {LaTeXMode.textmode: '\\mathmit{K}'}, 119871: {LaTeXMode.textmode: '\\mathmit{L}'}, 119872: {LaTeXMode.textmode: '\\mathmit{M}'}, 119873: {LaTeXMode.textmode: '\\mathmit{N}'}, 119874: {LaTeXMode.textmode: '\\mathmit{O}'}, 119875: {LaTeXMode.textmode: '\\mathmit{P}'}, 119876: {LaTeXMode.textmode: '\\mathmit{Q}'}, 119877: {LaTeXMode.textmode: '\\mathmit{R}'}, 119878: {LaTeXMode.textmode: '\\mathmit{S}'}, 119879: {LaTeXMode.textmode: '\\mathmit{T}'}, 119880: {LaTeXMode.textmode: '\\mathmit{U}'}, 119881: {LaTeXMode.textmode: '\\mathmit{V}'}, 119882: {LaTeXMode.textmode: '\\mathmit{W}'}, 119883: {LaTeXMode.textmode: '\\mathmit{X}'}, 119884: {LaTeXMode.textmode: '\\mathmit{Y}'}, 119885: {LaTeXMode.textmode: '\\mathmit{Z}'}, 119886: {LaTeXMode.textmode: '\\mathmit{a}'}, 119887: {LaTeXMode.textmode: '\\mathmit{b}'}, 119888: {LaTeXMode.textmode: '\\mathmit{c}'}, 119889: {LaTeXMode.textmode: '\\mathmit{d}'}, 119890: {LaTeXMode.textmode: '\\mathmit{e}'}, 119891: {LaTeXMode.textmode: '\\mathmit{f}'}, 119892: {LaTeXMode.textmode: '\\mathmit{g}'}, 119894: {LaTeXMode.textmode: '\\mathmit{i}'}, 119895: {LaTeXMode.textmode: '\\mathmit{j}'}, 119896: {LaTeXMode.textmode: '\\mathmit{k}'}, 119897: {LaTeXMode.textmode: '\\mathmit{l}'}, 119898: {LaTeXMode.textmode: '\\mathmit{m}'}, 119899: {LaTeXMode.textmode: '\\mathmit{n}'}, 119900: {LaTeXMode.textmode: '\\mathmit{o}'}, 119901: {LaTeXMode.textmode: '\\mathmit{p}'}, 119902: {LaTeXMode.textmode: '\\mathmit{q}'}, 119903: {LaTeXMode.textmode: '\\mathmit{r}'}, 119904: {LaTeXMode.textmode: '\\mathmit{s}'}, 119905: {LaTeXMode.textmode: '\\mathmit{t}'}, 119906: {LaTeXMode.textmode: '\\mathmit{u}'}, 119907: {LaTeXMode.textmode: '\\mathmit{v}'}, 119908: {LaTeXMode.textmode: '\\mathmit{w}'}, 119909: {LaTeXMode.textmode: '\\mathmit{x}'}, 119910: {LaTeXMode.textmode: '\\mathmit{y}'}, 119911: {LaTeXMode.textmode: '\\mathmit{z}'}, 119912: {LaTeXMode.textmode: '\\mathbit{A}'}, 119913: {LaTeXMode.textmode: '\\mathbit{B}'}, 119914: {LaTeXMode.textmode: '\\mathbit{C}'}, 119915: {LaTeXMode.textmode: '\\mathbit{D}'}, 119916: {LaTeXMode.textmode: '\\mathbit{E}'}, 119917: {LaTeXMode.textmode: '\\mathbit{F}'}, 119918: {LaTeXMode.textmode: '\\mathbit{G}'}, 119919: {LaTeXMode.textmode: '\\mathbit{H}'}, 119920: {LaTeXMode.textmode: '\\mathbit{I}'}, 119921: {LaTeXMode.textmode: '\\mathbit{J}'}, 119922: {LaTeXMode.textmode: '\\mathbit{K}'}, 119923: {LaTeXMode.textmode: '\\mathbit{L}'}, 119924: {LaTeXMode.textmode: '\\mathbit{M}'}, 119925: {LaTeXMode.textmode: '\\mathbit{N}'}, 119926: {LaTeXMode.textmode: '\\mathbit{O}'}, 119927: {LaTeXMode.textmode: '\\mathbit{P}'}, 119928: {LaTeXMode.textmode: '\\mathbit{Q}'}, 119929: {LaTeXMode.textmode: '\\mathbit{R}'}, 119930: {LaTeXMode.textmode: '\\mathbit{S}'}, 119931: {LaTeXMode.textmode: '\\mathbit{T}'}, 119932: {LaTeXMode.textmode: '\\mathbit{U}'}, 119933: {LaTeXMode.textmode: '\\mathbit{V}'}, 119934: {LaTeXMode.textmode: '\\mathbit{W}'}, 119935: {LaTeXMode.textmode: '\\mathbit{X}'}, 119936: {LaTeXMode.textmode: '\\mathbit{Y}'}, 119937: {LaTeXMode.textmode: '\\mathbit{Z}'}, 119938: {LaTeXMode.textmode: '\\mathbit{a}'}, 119939: {LaTeXMode.textmode: '\\mathbit{b}'}, 119940: {LaTeXMode.textmode: '\\mathbit{c}'}, 119941: {LaTeXMode.textmode: '\\mathbit{d}'}, 119942: {LaTeXMode.textmode: '\\mathbit{e}'}, 119943: {LaTeXMode.textmode: '\\mathbit{f}'}, 119944: {LaTeXMode.textmode: '\\mathbit{g}'}, 119945: {LaTeXMode.textmode: '\\mathbit{h}'}, 119946: {LaTeXMode.textmode: '\\mathbit{i}'}, 119947: {LaTeXMode.textmode: '\\mathbit{j}'}, 119948: {LaTeXMode.textmode: '\\mathbit{k}'}, 119949: {LaTeXMode.textmode: '\\mathbit{l}'}, 119950: {LaTeXMode.textmode: '\\mathbit{m}'}, 119951: {LaTeXMode.textmode: '\\mathbit{n}'}, 119952: {LaTeXMode.textmode: '\\mathbit{o}'}, 119953: {LaTeXMode.textmode: '\\mathbit{p}'}, 119954: {LaTeXMode.textmode: '\\mathbit{q}'}, 119955: {LaTeXMode.textmode: '\\mathbit{r}'}, 119956: {LaTeXMode.textmode: '\\mathbit{s}'}, 119957: {LaTeXMode.textmode: '\\mathbit{t}'}, 119958: {LaTeXMode.textmode: '\\mathbit{u}'}, 119959: {LaTeXMode.textmode: '\\mathbit{v}'}, 119960: {LaTeXMode.textmode: '\\mathbit{w}'}, 119961: {LaTeXMode.textmode: '\\mathbit{x}'}, 119962: {LaTeXMode.textmode: '\\mathbit{y}'}, 119963: {LaTeXMode.textmode: '\\mathbit{z}'}, 119964: {LaTeXMode.textmode: '\\mathscr{A}'}, 119966: {LaTeXMode.textmode: '\\mathscr{C}'}, 119967: {LaTeXMode.textmode: '\\mathscr{D}'}, 119970: {LaTeXMode.textmode: '\\mathscr{G}'}, 119973: {LaTeXMode.textmode: '\\mathscr{J}'}, 119974: {LaTeXMode.textmode: '\\mathscr{K}'}, 119977: {LaTeXMode.textmode: '\\mathscr{N}'}, 119978: {LaTeXMode.textmode: '\\mathscr{O}'}, 119979: {LaTeXMode.textmode: '\\mathscr{P}'}, 119980: {LaTeXMode.textmode: '\\mathscr{Q}'}, 119982: {LaTeXMode.textmode: '\\mathscr{S}'}, 119983: {LaTeXMode.textmode: '\\mathscr{T}'}, 119984: {LaTeXMode.textmode: '\\mathscr{U}'}, 119985: {LaTeXMode.textmode: '\\mathscr{V}'}, 119986: {LaTeXMode.textmode: '\\mathscr{W}'}, 119987: {LaTeXMode.textmode: '\\mathscr{X}'}, 119988: {LaTeXMode.textmode: '\\mathscr{Y}'}, 119989: {LaTeXMode.textmode: '\\mathscr{Z}'}, 119990: {LaTeXMode.textmode: '\\mathscr{a}'}, 119991: {LaTeXMode.textmode: '\\mathscr{b}'}, 119992: {LaTeXMode.textmode: '\\mathscr{c}'}, 119993: {LaTeXMode.textmode: '\\mathscr{d}'}, 119995: {LaTeXMode.textmode: '\\mathscr{f}'}, 119997: {LaTeXMode.textmode: '\\mathscr{h}'}, 119998: {LaTeXMode.textmode: '\\mathscr{i}'}, 119999: {LaTeXMode.textmode: '\\mathscr{j}'}, 120000: {LaTeXMode.textmode: '\\mathscr{k}'}, 120001: {LaTeXMode.textmode: '\\mathscr{l}'}, 120002: {LaTeXMode.textmode: '\\mathscr{m}'}, 120003: {LaTeXMode.textmode: '\\mathscr{n}'}, 120005: {LaTeXMode.textmode: '\\mathscr{p}'}, 120006: {LaTeXMode.textmode: '\\mathscr{q}'}, 120007: {LaTeXMode.textmode: '\\mathscr{r}'}, 120008: {LaTeXMode.textmode: '\\mathscr{s}'}, 120009: {LaTeXMode.textmode: '\\mathscr{t}'}, 120010: {LaTeXMode.textmode: '\\mathscr{u}'}, 120011: {LaTeXMode.textmode: '\\mathscr{v}'}, 120012: {LaTeXMode.textmode: '\\mathscr{w}'}, 120013: {LaTeXMode.textmode: '\\mathscr{x}'}, 120014: {LaTeXMode.textmode: '\\mathscr{y}'}, 120015: {LaTeXMode.textmode: '\\mathscr{z}'}, 120016: {LaTeXMode.textmode: '\\mathbcal{A}'}, 120017: {LaTeXMode.textmode: '\\mathbcal{B}'}, 120018: {LaTeXMode.textmode: '\\mathbcal{C}'}, 120019: {LaTeXMode.textmode: '\\mathbcal{D}'}, 120020: {LaTeXMode.textmode: '\\mathbcal{E}'}, 120021: {LaTeXMode.textmode: '\\mathbcal{F}'}, 120022: {LaTeXMode.textmode: '\\mathbcal{G}'}, 120023: {LaTeXMode.textmode: '\\mathbcal{H}'}, 120024: {LaTeXMode.textmode: '\\mathbcal{I}'}, 120025: {LaTeXMode.textmode: '\\mathbcal{J}'}, 120026: {LaTeXMode.textmode: '\\mathbcal{K}'}, 120027: {LaTeXMode.textmode: '\\mathbcal{L}'}, 120028: {LaTeXMode.textmode: '\\mathbcal{M}'}, 120029: {LaTeXMode.textmode: '\\mathbcal{N}'}, 120030: {LaTeXMode.textmode: '\\mathbcal{O}'}, 120031: {LaTeXMode.textmode: '\\mathbcal{P}'}, 120032: {LaTeXMode.textmode: '\\mathbcal{Q}'}, 120033: {LaTeXMode.textmode: '\\mathbcal{R}'}, 120034: {LaTeXMode.textmode: '\\mathbcal{S}'}, 120035: {LaTeXMode.textmode: '\\mathbcal{T}'}, 120036: {LaTeXMode.textmode: '\\mathbcal{U}'}, 120037: {LaTeXMode.textmode: '\\mathbcal{V}'}, 120038: {LaTeXMode.textmode: '\\mathbcal{W}'}, 120039: {LaTeXMode.textmode: '\\mathbcal{X}'}, 120040: {LaTeXMode.textmode: '\\mathbcal{Y}'}, 120041: {LaTeXMode.textmode: '\\mathbcal{Z}'}, 120042: {LaTeXMode.textmode: '\\mathbcal{a}'}, 120043: {LaTeXMode.textmode: '\\mathbcal{b}'}, 120044: {LaTeXMode.textmode: '\\mathbcal{c}'}, 120045: {LaTeXMode.textmode: '\\mathbcal{d}'}, 120046: {LaTeXMode.textmode: '\\mathbcal{e}'}, 120047: {LaTeXMode.textmode: '\\mathbcal{f}'}, 120048: {LaTeXMode.textmode: '\\mathbcal{g}'}, 120049: {LaTeXMode.textmode: '\\mathbcal{h}'}, 120050: {LaTeXMode.textmode: '\\mathbcal{i}'}, 120051: {LaTeXMode.textmode: '\\mathbcal{j}'}, 120052: {LaTeXMode.textmode: '\\mathbcal{k}'}, 120053: {LaTeXMode.textmode: '\\mathbcal{l}'}, 120054: {LaTeXMode.textmode: '\\mathbcal{m}'}, 120055: {LaTeXMode.textmode: '\\mathbcal{n}'}, 120056: {LaTeXMode.textmode: '\\mathbcal{o}'}, 120057: {LaTeXMode.textmode: '\\mathbcal{p}'}, 120058: {LaTeXMode.textmode: '\\mathbcal{q}'}, 120059: {LaTeXMode.textmode: '\\mathbcal{r}'}, 120060: {LaTeXMode.textmode: '\\mathbcal{s}'}, 120061: {LaTeXMode.textmode: '\\mathbcal{t}'}, 120062: {LaTeXMode.textmode: '\\mathbcal{u}'}, 120063: {LaTeXMode.textmode: '\\mathbcal{v}'}, 120064: {LaTeXMode.textmode: '\\mathbcal{w}'}, 120065: {LaTeXMode.textmode: '\\mathbcal{x}'}, 120066: {LaTeXMode.textmode: '\\mathbcal{y}'}, 120067: {LaTeXMode.textmode: '\\mathbcal{z}'}, 120068: {LaTeXMode.textmode: '\\mathfrak{A}'}, 120069: {LaTeXMode.textmode: '\\mathfrak{B}'}, 120071: {LaTeXMode.textmode: '\\mathfrak{D}'}, 120072: {LaTeXMode.textmode: '\\mathfrak{E}'}, 120073: {LaTeXMode.textmode: '\\mathfrak{F}'}, 120074: {LaTeXMode.textmode: '\\mathfrak{G}'}, 120077: {LaTeXMode.textmode: '\\mathfrak{J}'}, 120078: {LaTeXMode.textmode: '\\mathfrak{K}'}, 120079: {LaTeXMode.textmode: '\\mathfrak{L}'}, 120080: {LaTeXMode.textmode: '\\mathfrak{M}'}, 120081: {LaTeXMode.textmode: '\\mathfrak{N}'}, 120082: {LaTeXMode.textmode: '\\mathfrak{O}'}, 120083: {LaTeXMode.textmode: '\\mathfrak{P}'}, 120084: {LaTeXMode.textmode: '\\mathfrak{Q}'}, 120086: {LaTeXMode.textmode: '\\mathfrak{S}'}, 120087: {LaTeXMode.textmode: '\\mathfrak{T}'}, 120088: {LaTeXMode.textmode: '\\mathfrak{U}'}, 120089: {LaTeXMode.textmode: '\\mathfrak{V}'}, 120090: {LaTeXMode.textmode: '\\mathfrak{W}'}, 120091: {LaTeXMode.textmode: '\\mathfrak{X}'}, 120092: {LaTeXMode.textmode: '\\mathfrak{Y}'}, 120094: {LaTeXMode.textmode: '\\mathfrak{a}'}, 120095: {LaTeXMode.textmode: '\\mathfrak{b}'}, 120096: {LaTeXMode.textmode: '\\mathfrak{c}'}, 120097: {LaTeXMode.textmode: '\\mathfrak{d}'}, 120098: {LaTeXMode.textmode: '\\mathfrak{e}'}, 120099: {LaTeXMode.textmode: '\\mathfrak{f}'}, 120100: {LaTeXMode.textmode: '\\mathfrak{g}'}, 120101: {LaTeXMode.textmode: '\\mathfrak{h}'}, 120102: {LaTeXMode.textmode: '\\mathfrak{i}'}, 120103: {LaTeXMode.textmode: '\\mathfrak{j}'}, 120104: {LaTeXMode.textmode: '\\mathfrak{k}'}, 120105: {LaTeXMode.textmode: '\\mathfrak{l}'}, 120106: {LaTeXMode.textmode: '\\mathfrak{m}'}, 120107: {LaTeXMode.textmode: '\\mathfrak{n}'}, 120108: {LaTeXMode.textmode: '\\mathfrak{o}'}, 120109: {LaTeXMode.textmode: '\\mathfrak{p}'}, 120110: {LaTeXMode.textmode: '\\mathfrak{q}'}, 120111: {LaTeXMode.textmode: '\\mathfrak{r}'}, 120112: {LaTeXMode.textmode: '\\mathfrak{s}'}, 120113: {LaTeXMode.textmode: '\\mathfrak{t}'}, 120114: {LaTeXMode.textmode: '\\mathfrak{u}'}, 120115: {LaTeXMode.textmode: '\\mathfrak{v}'}, 120116: {LaTeXMode.textmode: '\\mathfrak{w}'}, 120117: {LaTeXMode.textmode: '\\mathfrak{x}'}, 120118: {LaTeXMode.textmode: '\\mathfrak{y}'}, 120119: {LaTeXMode.textmode: '\\mathfrak{z}'}, 120120: {LaTeXMode.textmode: '\\mathbb{A}'}, 120121: {LaTeXMode.textmode: '\\mathbb{B}'}, 120123: {LaTeXMode.textmode: '\\mathbb{D}'}, 120124: {LaTeXMode.textmode: '\\mathbb{E}'}, 120125: {LaTeXMode.textmode: '\\mathbb{F}'}, 120126: {LaTeXMode.textmode: '\\mathbb{G}'}, 120128: {LaTeXMode.textmode: '\\mathbb{I}'}, 120129: {LaTeXMode.textmode: '\\mathbb{J}'}, 120130: {LaTeXMode.textmode: '\\mathbb{K}'}, 120131: {LaTeXMode.textmode: '\\mathbb{L}'}, 120132: {LaTeXMode.textmode: '\\mathbb{M}'}, 120134: {LaTeXMode.textmode: '\\mathbb{O}'}, 120138: {LaTeXMode.textmode: '\\mathbb{S}'}, 120139: {LaTeXMode.textmode: '\\mathbb{T}'}, 120140: {LaTeXMode.textmode: '\\mathbb{U}'}, 120141: {LaTeXMode.textmode: '\\mathbb{V}'}, 120142: {LaTeXMode.textmode: '\\mathbb{W}'}, 120143: {LaTeXMode.textmode: '\\mathbb{X}'}, 120144: {LaTeXMode.textmode: '\\mathbb{Y}'}, 120146: {LaTeXMode.textmode: '\\mathbb{a}'}, 120147: {LaTeXMode.textmode: '\\mathbb{b}'}, 120148: {LaTeXMode.textmode: '\\mathbb{c}'}, 120149: {LaTeXMode.textmode: '\\mathbb{d}'}, 120150: {LaTeXMode.textmode: '\\mathbb{e}'}, 120151: {LaTeXMode.textmode: '\\mathbb{f}'}, 120152: {LaTeXMode.textmode: '\\mathbb{g}'}, 120153: {LaTeXMode.textmode: '\\mathbb{h}'}, 120154: {LaTeXMode.textmode: '\\mathbb{i}'}, 120155: {LaTeXMode.textmode: '\\mathbb{j}'}, 120156: {LaTeXMode.textmode: '\\mathbb{k}'}, 120157: {LaTeXMode.textmode: '\\mathbb{l}'}, 120158: {LaTeXMode.textmode: '\\mathbb{m}'}, 120159: {LaTeXMode.textmode: '\\mathbb{n}'}, 120160: {LaTeXMode.textmode: '\\mathbb{o}'}, 120161: {LaTeXMode.textmode: '\\mathbb{p}'}, 120162: {LaTeXMode.textmode: '\\mathbb{q}'}, 120163: {LaTeXMode.textmode: '\\mathbb{r}'}, 120164: {LaTeXMode.textmode: '\\mathbb{s}'}, 120165: {LaTeXMode.textmode: '\\mathbb{t}'}, 120166: {LaTeXMode.textmode: '\\mathbb{u}'}, 120167: {LaTeXMode.textmode: '\\mathbb{v}'}, 120168: {LaTeXMode.textmode: '\\mathbb{w}'}, 120169: {LaTeXMode.textmode: '\\mathbb{x}'}, 120170: {LaTeXMode.textmode: '\\mathbb{y}'}, 120171: {LaTeXMode.textmode: '\\mathbb{z}'}, 120172: {LaTeXMode.textmode: '\\mathbfrak{A}'}, 120173: {LaTeXMode.textmode: '\\mathbfrak{B}'}, 120174: {LaTeXMode.textmode: '\\mathbfrak{C}'}, 120175: {LaTeXMode.textmode: '\\mathbfrak{D}'}, 120176: {LaTeXMode.textmode: '\\mathbfrak{E}'}, 120177: {LaTeXMode.textmode: '\\mathbfrak{F}'}, 120178: {LaTeXMode.textmode: '\\mathbfrak{G}'}, 120179: {LaTeXMode.textmode: '\\mathbfrak{H}'}, 120180: {LaTeXMode.textmode: '\\mathbfrak{I}'}, 120181: {LaTeXMode.textmode: '\\mathbfrak{J}'}, 120182: {LaTeXMode.textmode: '\\mathbfrak{K}'}, 120183: {LaTeXMode.textmode: '\\mathbfrak{L}'}, 120184: {LaTeXMode.textmode: '\\mathbfrak{M}'}, 120185: {LaTeXMode.textmode: '\\mathbfrak{N}'}, 120186: {LaTeXMode.textmode: '\\mathbfrak{O}'}, 120187: {LaTeXMode.textmode: '\\mathbfrak{P}'}, 120188: {LaTeXMode.textmode: '\\mathbfrak{Q}'}, 120189: {LaTeXMode.textmode: '\\mathbfrak{R}'}, 120190: {LaTeXMode.textmode: '\\mathbfrak{S}'}, 120191: {LaTeXMode.textmode: '\\mathbfrak{T}'}, 120192: {LaTeXMode.textmode: '\\mathbfrak{U}'}, 120193: {LaTeXMode.textmode: '\\mathbfrak{V}'}, 120194: {LaTeXMode.textmode: '\\mathbfrak{W}'}, 120195: {LaTeXMode.textmode: '\\mathbfrak{X}'}, 120196: {LaTeXMode.textmode: '\\mathbfrak{Y}'}, 120197: {LaTeXMode.textmode: '\\mathbfrak{Z}'}, 120198: {LaTeXMode.textmode: '\\mathbfrak{a}'}, 120199: {LaTeXMode.textmode: '\\mathbfrak{b}'}, 120200: {LaTeXMode.textmode: '\\mathbfrak{c}'}, 120201: {LaTeXMode.textmode: '\\mathbfrak{d}'}, 120202: {LaTeXMode.textmode: '\\mathbfrak{e}'}, 120203: {LaTeXMode.textmode: '\\mathbfrak{f}'}, 120204: {LaTeXMode.textmode: '\\mathbfrak{g}'}, 120205: {LaTeXMode.textmode: '\\mathbfrak{h}'}, 120206: {LaTeXMode.textmode: '\\mathbfrak{i}'}, 120207: {LaTeXMode.textmode: '\\mathbfrak{j}'}, 120208: {LaTeXMode.textmode: '\\mathbfrak{k}'}, 120209: {LaTeXMode.textmode: '\\mathbfrak{l}'}, 120210: {LaTeXMode.textmode: '\\mathbfrak{m}'}, 120211: {LaTeXMode.textmode: '\\mathbfrak{n}'}, 120212: {LaTeXMode.textmode: '\\mathbfrak{o}'}, 120213: {LaTeXMode.textmode: '\\mathbfrak{p}'}, 120214: {LaTeXMode.textmode: '\\mathbfrak{q}'}, 120215: {LaTeXMode.textmode: '\\mathbfrak{r}'}, 120216: {LaTeXMode.textmode: '\\mathbfrak{s}'}, 120217: {LaTeXMode.textmode: '\\mathbfrak{t}'}, 120218: {LaTeXMode.textmode: '\\mathbfrak{u}'}, 120219: {LaTeXMode.textmode: '\\mathbfrak{v}'}, 120220: {LaTeXMode.textmode: '\\mathbfrak{w}'}, 120221: {LaTeXMode.textmode: '\\mathbfrak{x}'}, 120222: {LaTeXMode.textmode: '\\mathbfrak{y}'}, 120223: {LaTeXMode.textmode: '\\mathbfrak{z}'}, 120224: {LaTeXMode.textmode: '\\mathsf{A}'}, 120225: {LaTeXMode.textmode: '\\mathsf{B}'}, 120226: {LaTeXMode.textmode: '\\mathsf{C}'}, 120227: {LaTeXMode.textmode: '\\mathsf{D}'}, 120228: {LaTeXMode.textmode: '\\mathsf{E}'}, 120229: {LaTeXMode.textmode: '\\mathsf{F}'}, 120230: {LaTeXMode.textmode: '\\mathsf{G}'}, 120231: {LaTeXMode.textmode: '\\mathsf{H}'}, 120232: {LaTeXMode.textmode: '\\mathsf{I}'}, 120233: {LaTeXMode.textmode: '\\mathsf{J}'}, 120234: {LaTeXMode.textmode: '\\mathsf{K}'}, 120235: {LaTeXMode.textmode: '\\mathsf{L}'}, 120236: {LaTeXMode.textmode: '\\mathsf{M}'}, 120237: {LaTeXMode.textmode: '\\mathsf{N}'}, 120238: {LaTeXMode.textmode: '\\mathsf{O}'}, 120239: {LaTeXMode.textmode: '\\mathsf{P}'}, 120240: {LaTeXMode.textmode: '\\mathsf{Q}'}, 120241: {LaTeXMode.textmode: '\\mathsf{R}'}, 120242: {LaTeXMode.textmode: '\\mathsf{S}'}, 120243: {LaTeXMode.textmode: '\\mathsf{T}'}, 120244: {LaTeXMode.textmode: '\\mathsf{U}'}, 120245: {LaTeXMode.textmode: '\\mathsf{V}'}, 120246: {LaTeXMode.textmode: '\\mathsf{W}'}, 120247: {LaTeXMode.textmode: '\\mathsf{X}'}, 120248: {LaTeXMode.textmode: '\\mathsf{Y}'}, 120249: {LaTeXMode.textmode: '\\mathsf{Z}'}, 120250: {LaTeXMode.textmode: '\\mathsf{a}'}, 120251: {LaTeXMode.textmode: '\\mathsf{b}'}, 120252: {LaTeXMode.textmode: '\\mathsf{c}'}, 120253: {LaTeXMode.textmode: '\\mathsf{d}'}, 120254: {LaTeXMode.textmode: '\\mathsf{e}'}, 120255: {LaTeXMode.textmode: '\\mathsf{f}'}, 120256: {LaTeXMode.textmode: '\\mathsf{g}'}, 120257: {LaTeXMode.textmode: '\\mathsf{h}'}, 120258: {LaTeXMode.textmode: '\\mathsf{i}'}, 120259: {LaTeXMode.textmode: '\\mathsf{j}'}, 120260: {LaTeXMode.textmode: '\\mathsf{k}'}, 120261: {LaTeXMode.textmode: '\\mathsf{l}'}, 120262: {LaTeXMode.textmode: '\\mathsf{m}'}, 120263: {LaTeXMode.textmode: '\\mathsf{n}'}, 120264: {LaTeXMode.textmode: '\\mathsf{o}'}, 120265: {LaTeXMode.textmode: '\\mathsf{p}'}, 120266: {LaTeXMode.textmode: '\\mathsf{q}'}, 120267: {LaTeXMode.textmode: '\\mathsf{r}'}, 120268: {LaTeXMode.textmode: '\\mathsf{s}'}, 120269: {LaTeXMode.textmode: '\\mathsf{t}'}, 120270: {LaTeXMode.textmode: '\\mathsf{u}'}, 120271: {LaTeXMode.textmode: '\\mathsf{v}'}, 120272: {LaTeXMode.textmode: '\\mathsf{w}'}, 120273: {LaTeXMode.textmode: '\\mathsf{x}'}, 120274: {LaTeXMode.textmode: '\\mathsf{y}'}, 120275: {LaTeXMode.textmode: '\\mathsf{z}'}, 120276: {LaTeXMode.textmode: '\\mathsfbf{A}'}, 120277: {LaTeXMode.textmode: '\\mathsfbf{B}'}, 120278: {LaTeXMode.textmode: '\\mathsfbf{C}'}, 120279: {LaTeXMode.textmode: '\\mathsfbf{D}'}, 120280: {LaTeXMode.textmode: '\\mathsfbf{E}'}, 120281: {LaTeXMode.textmode: '\\mathsfbf{F}'}, 120282: {LaTeXMode.textmode: '\\mathsfbf{G}'}, 120283: {LaTeXMode.textmode: '\\mathsfbf{H}'}, 120284: {LaTeXMode.textmode: '\\mathsfbf{I}'}, 120285: {LaTeXMode.textmode: '\\mathsfbf{J}'}, 120286: {LaTeXMode.textmode: '\\mathsfbf{K}'}, 120287: {LaTeXMode.textmode: '\\mathsfbf{L}'}, 120288: {LaTeXMode.textmode: '\\mathsfbf{M}'}, 120289: {LaTeXMode.textmode: '\\mathsfbf{N}'}, 120290: {LaTeXMode.textmode: '\\mathsfbf{O}'}, 120291: {LaTeXMode.textmode: '\\mathsfbf{P}'}, 120292: {LaTeXMode.textmode: '\\mathsfbf{Q}'}, 120293: {LaTeXMode.textmode: '\\mathsfbf{R}'}, 120294: {LaTeXMode.textmode: '\\mathsfbf{S}'}, 120295: {LaTeXMode.textmode: '\\mathsfbf{T}'}, 120296: {LaTeXMode.textmode: '\\mathsfbf{U}'}, 120297: {LaTeXMode.textmode: '\\mathsfbf{V}'}, 120298: {LaTeXMode.textmode: '\\mathsfbf{W}'}, 120299: {LaTeXMode.textmode: '\\mathsfbf{X}'}, 120300: {LaTeXMode.textmode: '\\mathsfbf{Y}'}, 120301: {LaTeXMode.textmode: '\\mathsfbf{Z}'}, 120302: {LaTeXMode.textmode: '\\mathsfbf{a}'}, 120303: {LaTeXMode.textmode: '\\mathsfbf{b}'}, 120304: {LaTeXMode.textmode: '\\mathsfbf{c}'}, 120305: {LaTeXMode.textmode: '\\mathsfbf{d}'}, 120306: {LaTeXMode.textmode: '\\mathsfbf{e}'}, 120307: {LaTeXMode.textmode: '\\mathsfbf{f}'}, 120308: {LaTeXMode.textmode: '\\mathsfbf{g}'}, 120309: {LaTeXMode.textmode: '\\mathsfbf{h}'}, 120310: {LaTeXMode.textmode: '\\mathsfbf{i}'}, 120311: {LaTeXMode.textmode: '\\mathsfbf{j}'}, 120312: {LaTeXMode.textmode: '\\mathsfbf{k}'}, 120313: {LaTeXMode.textmode: '\\mathsfbf{l}'}, 120314: {LaTeXMode.textmode: '\\mathsfbf{m}'}, 120315: {LaTeXMode.textmode: '\\mathsfbf{n}'}, 120316: {LaTeXMode.textmode: '\\mathsfbf{o}'}, 120317: {LaTeXMode.textmode: '\\mathsfbf{p}'}, 120318: {LaTeXMode.textmode: '\\mathsfbf{q}'}, 120319: {LaTeXMode.textmode: '\\mathsfbf{r}'}, 120320: {LaTeXMode.textmode: '\\mathsfbf{s}'}, 120321: {LaTeXMode.textmode: '\\mathsfbf{t}'}, 120322: {LaTeXMode.textmode: '\\mathsfbf{u}'}, 120323: {LaTeXMode.textmode: '\\mathsfbf{v}'}, 120324: {LaTeXMode.textmode: '\\mathsfbf{w}'}, 120325: {LaTeXMode.textmode: '\\mathsfbf{x}'}, 120326: {LaTeXMode.textmode: '\\mathsfbf{y}'}, 120327: {LaTeXMode.textmode: '\\mathsfbf{z}'}, 120328: {LaTeXMode.textmode: '\\mathsfsl{A}'}, 120329: {LaTeXMode.textmode: '\\mathsfsl{B}'}, 120330: {LaTeXMode.textmode: '\\mathsfsl{C}'}, 120331: {LaTeXMode.textmode: '\\mathsfsl{D}'}, 120332: {LaTeXMode.textmode: '\\mathsfsl{E}'}, 120333: {LaTeXMode.textmode: '\\mathsfsl{F}'}, 120334: {LaTeXMode.textmode: '\\mathsfsl{G}'}, 120335: {LaTeXMode.textmode: '\\mathsfsl{H}'}, 120336: {LaTeXMode.textmode: '\\mathsfsl{I}'}, 120337: {LaTeXMode.textmode: '\\mathsfsl{J}'}, 120338: {LaTeXMode.textmode: '\\mathsfsl{K}'}, 120339: {LaTeXMode.textmode: '\\mathsfsl{L}'}, 120340: {LaTeXMode.textmode: '\\mathsfsl{M}'}, 120341: {LaTeXMode.textmode: '\\mathsfsl{N}'}, 120342: {LaTeXMode.textmode: '\\mathsfsl{O}'}, 120343: {LaTeXMode.textmode: '\\mathsfsl{P}'}, 120344: {LaTeXMode.textmode: '\\mathsfsl{Q}'}, 120345: {LaTeXMode.textmode: '\\mathsfsl{R}'}, 120346: {LaTeXMode.textmode: '\\mathsfsl{S}'}, 120347: {LaTeXMode.textmode: '\\mathsfsl{T}'}, 120348: {LaTeXMode.textmode: '\\mathsfsl{U}'}, 120349: {LaTeXMode.textmode: '\\mathsfsl{V}'}, 120350: {LaTeXMode.textmode: '\\mathsfsl{W}'}, 120351: {LaTeXMode.textmode: '\\mathsfsl{X}'}, 120352: {LaTeXMode.textmode: '\\mathsfsl{Y}'}, 120353: {LaTeXMode.textmode: '\\mathsfsl{Z}'}, 120354: {LaTeXMode.textmode: '\\mathsfsl{a}'}, 120355: {LaTeXMode.textmode: '\\mathsfsl{b}'}, 120356: {LaTeXMode.textmode: '\\mathsfsl{c}'}, 120357: {LaTeXMode.textmode: '\\mathsfsl{d}'}, 120358: {LaTeXMode.textmode: '\\mathsfsl{e}'}, 120359: {LaTeXMode.textmode: '\\mathsfsl{f}'}, 120360: {LaTeXMode.textmode: '\\mathsfsl{g}'}, 120361: {LaTeXMode.textmode: '\\mathsfsl{h}'}, 120362: {LaTeXMode.textmode: '\\mathsfsl{i}'}, 120363: {LaTeXMode.textmode: '\\mathsfsl{j}'}, 120364: {LaTeXMode.textmode: '\\mathsfsl{k}'}, 120365: {LaTeXMode.textmode: '\\mathsfsl{l}'}, 120366: {LaTeXMode.textmode: '\\mathsfsl{m}'}, 120367: {LaTeXMode.textmode: '\\mathsfsl{n}'}, 120368: {LaTeXMode.textmode: '\\mathsfsl{o}'}, 120369: {LaTeXMode.textmode: '\\mathsfsl{p}'}, 120370: {LaTeXMode.textmode: '\\mathsfsl{q}'}, 120371: {LaTeXMode.textmode: '\\mathsfsl{r}'}, 120372: {LaTeXMode.textmode: '\\mathsfsl{s}'}, 120373: {LaTeXMode.textmode: '\\mathsfsl{t}'}, 120374: {LaTeXMode.textmode: '\\mathsfsl{u}'}, 120375: {LaTeXMode.textmode: '\\mathsfsl{v}'}, 120376: {LaTeXMode.textmode: '\\mathsfsl{w}'}, 120377: {LaTeXMode.textmode: '\\mathsfsl{x}'}, 120378: {LaTeXMode.textmode: '\\mathsfsl{y}'}, 120379: {LaTeXMode.textmode: '\\mathsfsl{z}'}, 120380: {LaTeXMode.textmode: '\\mathsfbfsl{A}'}, 120381: {LaTeXMode.textmode: '\\mathsfbfsl{B}'}, 120382: {LaTeXMode.textmode: '\\mathsfbfsl{C}'}, 120383: {LaTeXMode.textmode: '\\mathsfbfsl{D}'}, 120384: {LaTeXMode.textmode: '\\mathsfbfsl{E}'}, 120385: {LaTeXMode.textmode: '\\mathsfbfsl{F}'}, 120386: {LaTeXMode.textmode: '\\mathsfbfsl{G}'}, 120387: {LaTeXMode.textmode: '\\mathsfbfsl{H}'}, 120388: {LaTeXMode.textmode: '\\mathsfbfsl{I}'}, 120389: {LaTeXMode.textmode: '\\mathsfbfsl{J}'}, 120390: {LaTeXMode.textmode: '\\mathsfbfsl{K}'}, 120391: {LaTeXMode.textmode: '\\mathsfbfsl{L}'}, 120392: {LaTeXMode.textmode: '\\mathsfbfsl{M}'}, 120393: {LaTeXMode.textmode: '\\mathsfbfsl{N}'}, 120394: {LaTeXMode.textmode: '\\mathsfbfsl{O}'}, 120395: {LaTeXMode.textmode: '\\mathsfbfsl{P}'}, 120396: {LaTeXMode.textmode: '\\mathsfbfsl{Q}'}, 120397: {LaTeXMode.textmode: '\\mathsfbfsl{R}'}, 120398: {LaTeXMode.textmode: '\\mathsfbfsl{S}'}, 120399: {LaTeXMode.textmode: '\\mathsfbfsl{T}'}, 120400: {LaTeXMode.textmode: '\\mathsfbfsl{U}'}, 120401: {LaTeXMode.textmode: '\\mathsfbfsl{V}'}, 120402: {LaTeXMode.textmode: '\\mathsfbfsl{W}'}, 120403: {LaTeXMode.textmode: '\\mathsfbfsl{X}'}, 120404: {LaTeXMode.textmode: '\\mathsfbfsl{Y}'}, 120405: {LaTeXMode.textmode: '\\mathsfbfsl{Z}'}, 120406: {LaTeXMode.textmode: '\\mathsfbfsl{a}'}, 120407: {LaTeXMode.textmode: '\\mathsfbfsl{b}'}, 120408: {LaTeXMode.textmode: '\\mathsfbfsl{c}'}, 120409: {LaTeXMode.textmode: '\\mathsfbfsl{d}'}, 120410: {LaTeXMode.textmode: '\\mathsfbfsl{e}'}, 120411: {LaTeXMode.textmode: '\\mathsfbfsl{f}'}, 120412: {LaTeXMode.textmode: '\\mathsfbfsl{g}'}, 120413: {LaTeXMode.textmode: '\\mathsfbfsl{h}'}, 120414: {LaTeXMode.textmode: '\\mathsfbfsl{i}'}, 120415: {LaTeXMode.textmode: '\\mathsfbfsl{j}'}, 120416: {LaTeXMode.textmode: '\\mathsfbfsl{k}'}, 120417: {LaTeXMode.textmode: '\\mathsfbfsl{l}'}, 120418: {LaTeXMode.textmode: '\\mathsfbfsl{m}'}, 120419: {LaTeXMode.textmode: '\\mathsfbfsl{n}'}, 120420: {LaTeXMode.textmode: '\\mathsfbfsl{o}'}, 120421: {LaTeXMode.textmode: '\\mathsfbfsl{p}'}, 120422: {LaTeXMode.textmode: '\\mathsfbfsl{q}'}, 120423: {LaTeXMode.textmode: '\\mathsfbfsl{r}'}, 120424: {LaTeXMode.textmode: '\\mathsfbfsl{s}'}, 120425: {LaTeXMode.textmode: '\\mathsfbfsl{t}'}, 120426: {LaTeXMode.textmode: '\\mathsfbfsl{u}'}, 120427: {LaTeXMode.textmode: '\\mathsfbfsl{v}'}, 120428: {LaTeXMode.textmode: '\\mathsfbfsl{w}'}, 120429: {LaTeXMode.textmode: '\\mathsfbfsl{x}'}, 120430: {LaTeXMode.textmode: '\\mathsfbfsl{y}'}, 120431: {LaTeXMode.textmode: '\\mathsfbfsl{z}'}, 120432: {LaTeXMode.textmode: '\\mathtt{A}'}, 120433: {LaTeXMode.textmode: '\\mathtt{B}'}, 120434: {LaTeXMode.textmode: '\\mathtt{C}'}, 120435: {LaTeXMode.textmode: '\\mathtt{D}'}, 120436: {LaTeXMode.textmode: '\\mathtt{E}'}, 120437: {LaTeXMode.textmode: '\\mathtt{F}'}, 120438: {LaTeXMode.textmode: '\\mathtt{G}'}, 120439: {LaTeXMode.textmode: '\\mathtt{H}'}, 120440: {LaTeXMode.textmode: '\\mathtt{I}'}, 120441: {LaTeXMode.textmode: '\\mathtt{J}'}, 120442: {LaTeXMode.textmode: '\\mathtt{K}'}, 120443: {LaTeXMode.textmode: '\\mathtt{L}'}, 120444: {LaTeXMode.textmode: '\\mathtt{M}'}, 120445: {LaTeXMode.textmode: '\\mathtt{N}'}, 120446: {LaTeXMode.textmode: '\\mathtt{O}'}, 120447: {LaTeXMode.textmode: '\\mathtt{P}'}, 120448: {LaTeXMode.textmode: '\\mathtt{Q}'}, 120449: {LaTeXMode.textmode: '\\mathtt{R}'}, 120450: {LaTeXMode.textmode: '\\mathtt{S}'}, 120451: {LaTeXMode.textmode: '\\mathtt{T}'}, 120452: {LaTeXMode.textmode: '\\mathtt{U}'}, 120453: {LaTeXMode.textmode: '\\mathtt{V}'}, 120454: {LaTeXMode.textmode: '\\mathtt{W}'}, 120455: {LaTeXMode.textmode: '\\mathtt{X}'}, 120456: {LaTeXMode.textmode: '\\mathtt{Y}'}, 120457: {LaTeXMode.textmode: '\\mathtt{Z}'}, 120458: {LaTeXMode.textmode: '\\mathtt{a}'}, 120459: {LaTeXMode.textmode: '\\mathtt{b}'}, 120460: {LaTeXMode.textmode: '\\mathtt{c}'}, 120461: {LaTeXMode.textmode: '\\mathtt{d}'}, 120462: {LaTeXMode.textmode: '\\mathtt{e}'}, 120463: {LaTeXMode.textmode: '\\mathtt{f}'}, 120464: {LaTeXMode.textmode: '\\mathtt{g}'}, 120465: {LaTeXMode.textmode: '\\mathtt{h}'}, 120466: {LaTeXMode.textmode: '\\mathtt{i}'}, 120467: {LaTeXMode.textmode: '\\mathtt{j}'}, 120468: {LaTeXMode.textmode: '\\mathtt{k}'}, 120469: {LaTeXMode.textmode: '\\mathtt{l}'}, 120470: {LaTeXMode.textmode: '\\mathtt{m}'}, 120471: {LaTeXMode.textmode: '\\mathtt{n}'}, 120472: {LaTeXMode.textmode: '\\mathtt{o}'}, 120473: {LaTeXMode.textmode: '\\mathtt{p}'}, 120474: {LaTeXMode.textmode: '\\mathtt{q}'}, 120475: {LaTeXMode.textmode: '\\mathtt{r}'}, 120476: {LaTeXMode.textmode: '\\mathtt{s}'}, 120477: {LaTeXMode.textmode: '\\mathtt{t}'}, 120478: {LaTeXMode.textmode: '\\mathtt{u}'}, 120479: {LaTeXMode.textmode: '\\mathtt{v}'}, 120480: {LaTeXMode.textmode: '\\mathtt{w}'}, 120481: {LaTeXMode.textmode: '\\mathtt{x}'}, 120482: {LaTeXMode.textmode: '\\mathtt{y}'}, 120483: {LaTeXMode.textmode: '\\mathtt{z}'}, 120488: {LaTeXMode.textmode: '\\mathbf{\\Alpha}'}, 120489: {LaTeXMode.textmode: '\\mathbf{\\Beta}'}, 120490: {LaTeXMode.textmode: '\\mathbf{\\Gamma}'}, 120491: {LaTeXMode.textmode: '\\mathbf{\\Delta}'}, 120492: {LaTeXMode.textmode: '\\mathbf{\\Epsilon}'}, 120493: {LaTeXMode.textmode: '\\mathbf{\\Zeta}'}, 120494: {LaTeXMode.textmode: '\\mathbf{\\Eta}'}, 120495: {LaTeXMode.textmode: '\\mathbf{\\Theta}'}, 120496: {LaTeXMode.textmode: '\\mathbf{\\Iota}'}, 120497: {LaTeXMode.textmode: '\\mathbf{\\Kappa}'}, 120498: {LaTeXMode.textmode: '\\mathbf{\\Lambda}'}, 120499: {LaTeXMode.textmode: '\\mathbf{M}'}, 120500: {LaTeXMode.textmode: 'N'}, 120501: {LaTeXMode.textmode: '\\mathbf{\\Xi}'}, 120502: {LaTeXMode.textmode: 'O'}, 120503: {LaTeXMode.textmode: '\\mathbf{\\Pi}'}, 120504: {LaTeXMode.textmode: '\\mathbf{\\Rho}'}, 120505: {LaTeXMode.textmode: '\\mathbf{\\vartheta}'}, 120506: {LaTeXMode.textmode: '\\mathbf{\\Sigma}'}, 120507: {LaTeXMode.textmode: '\\mathbf{\\Tau}'}, 120508: {LaTeXMode.textmode: '\\mathbf{\\Upsilon}'}, 120509: {LaTeXMode.textmode: '\\mathbf{\\Phi}'}, 120510: {LaTeXMode.textmode: '\\mathbf{\\Chi}'}, 120511: {LaTeXMode.textmode: '\\mathbf{\\Psi}'}, 120512: {LaTeXMode.textmode: '\\mathbf{\\Omega}'}, 120513: {LaTeXMode.textmode: '\\mathbf{\\nabla}'}, 120514: {LaTeXMode.textmode: '\\mathbf{\\alpha}'}, 120515: {LaTeXMode.textmode: '\\mathbf{\\beta}'}, 120516: {LaTeXMode.textmode: '\\mathbf{\\gamma}'}, 120517: {LaTeXMode.textmode: '\\mathbf{\\delta}'}, 120518: {LaTeXMode.textmode: '\\mathbf{\\epsilon}'}, 120519: {LaTeXMode.textmode: '\\mathbf{\\zeta}'}, 120520: {LaTeXMode.textmode: '\\mathbf{\\eta}'}, 120521: {LaTeXMode.textmode: '\\mathbf{\\theta}'}, 120522: {LaTeXMode.textmode: '\\mathbf{\\iota}'}, 120523: {LaTeXMode.textmode: '\\mathbf{\\kappa}'}, 120524: {LaTeXMode.textmode: '\\mathbf{\\lambda}'}, 120525: {LaTeXMode.textmode: '\\mathbf{\\mu}'}, 120526: {LaTeXMode.textmode: '\\mathbf{\\nu}'}, 120527: {LaTeXMode.textmode: '\\mathbf{\\xi}'}, 120528: {LaTeXMode.textmode: '\\mathbf{o}'}, 120529: {LaTeXMode.textmode: '\\mathbf{\\pi}'}, 120530: {LaTeXMode.textmode: '\\mathbf{\\rho}'}, 120531: {LaTeXMode.textmode: '\\mathbf{\\varsigma}'}, 120532: {LaTeXMode.textmode: '\\mathbf{\\sigma}'}, 120533: {LaTeXMode.textmode: '\\mathbf{\\tau}'}, 120534: {LaTeXMode.textmode: '\\mathbf{\\upsilon}'}, 120535: {LaTeXMode.textmode: '\\mathbf{\\phi}'}, 120536: {LaTeXMode.textmode: '\\mathbf{\\chi}'}, 120537: {LaTeXMode.textmode: '\\mathbf{\\psi}'}, 120538: {LaTeXMode.textmode: '\\mathbf{\\omega}'}, 120539: {LaTeXMode.textmode: '\\partial '}, 120540: {LaTeXMode.textmode: '\\mathbf{\\varepsilon}'}, 120541: {LaTeXMode.textmode: '\\mathbf{\\vartheta}'}, 120542: {LaTeXMode.textmode: '\\mathbf{\\varkappa}'}, 120543: {LaTeXMode.textmode: '\\mathbf{\\phi}'}, 120544: {LaTeXMode.textmode: '\\mathbf{\\varrho}'}, 120545: {LaTeXMode.textmode: '\\mathbf{\\varpi}'}, 120546: {LaTeXMode.textmode: '\\mathmit{\\Alpha}'}, 120547: {LaTeXMode.textmode: '\\mathmit{\\Beta}'}, 120548: {LaTeXMode.textmode: '\\mathmit{\\Gamma}'}, 120549: {LaTeXMode.textmode: '\\mathmit{\\Delta}'}, 120550: {LaTeXMode.textmode: '\\mathmit{\\Epsilon}'}, 120551: {LaTeXMode.textmode: '\\mathmit{\\Zeta}'}, 120552: {LaTeXMode.textmode: '\\mathmit{\\Eta}'}, 120553: {LaTeXMode.textmode: '\\mathmit{\\Theta}'}, 120554: {LaTeXMode.textmode: '\\mathmit{\\Iota}'}, 120555: {LaTeXMode.textmode: '\\mathmit{\\Kappa}'}, 120556: {LaTeXMode.textmode: '\\mathmit{\\Lambda}'}, 120557: {LaTeXMode.textmode: '\\mathmit{M}'}, 120558: {LaTeXMode.textmode: 'N'}, 120559: {LaTeXMode.textmode: '\\mathmit{\\Xi}'}, 120560: {LaTeXMode.textmode: 'O'}, 120561: {LaTeXMode.textmode: '\\mathmit{\\Pi}'}, 120562: {LaTeXMode.textmode: '\\mathmit{\\Rho}'}, 120563: {LaTeXMode.textmode: '\\mathmit{\\vartheta}'}, 120564: {LaTeXMode.textmode: '\\mathmit{\\Sigma}'}, 120565: {LaTeXMode.textmode: '\\mathmit{\\Tau}'}, 120566: {LaTeXMode.textmode: '\\mathmit{\\Upsilon}'}, 120567: {LaTeXMode.textmode: '\\mathmit{\\Phi}'}, 120568: {LaTeXMode.textmode: '\\mathmit{\\Chi}'}, 120569: {LaTeXMode.textmode: '\\mathmit{\\Psi}'}, 120570: {LaTeXMode.textmode: '\\mathmit{\\Omega}'}, 120571: {LaTeXMode.textmode: '\\mathmit{\\nabla}'}, 120572: {LaTeXMode.textmode: '\\mathmit{\\alpha}'}, 120573: {LaTeXMode.textmode: '\\mathmit{\\beta}'}, 120574: {LaTeXMode.textmode: '\\mathmit{\\gamma}'}, 120575: {LaTeXMode.textmode: '\\mathmit{\\delta}'}, 120576: {LaTeXMode.textmode: '\\mathmit{\\epsilon}'}, 120577: {LaTeXMode.textmode: '\\mathmit{\\zeta}'}, 120578: {LaTeXMode.textmode: '\\mathmit{\\eta}'}, 120579: {LaTeXMode.textmode: '\\mathmit{\\theta}'}, 120580: {LaTeXMode.textmode: '\\mathmit{\\iota}'}, 120581: {LaTeXMode.textmode: '\\mathmit{\\kappa}'}, 120582: {LaTeXMode.textmode: '\\mathmit{\\lambda}'}, 120583: {LaTeXMode.textmode: '\\mathmit{\\mu}'}, 120584: {LaTeXMode.textmode: '\\mathmit{\\nu}'}, 120585: {LaTeXMode.textmode: '\\mathmit{\\xi}'}, 120586: {LaTeXMode.textmode: '\\mathmit{o}'}, 120587: {LaTeXMode.textmode: '\\mathmit{\\pi}'}, 120588: {LaTeXMode.textmode: '\\mathmit{\\rho}'}, 120589: {LaTeXMode.textmode: '\\mathmit{\\varsigma}'}, 120590: {LaTeXMode.textmode: '\\mathmit{\\sigma}'}, 120591: {LaTeXMode.textmode: '\\mathmit{\\tau}'}, 120592: {LaTeXMode.textmode: '\\mathmit{\\upsilon}'}, 120593: {LaTeXMode.textmode: '\\mathmit{\\phi}'}, 120594: {LaTeXMode.textmode: '\\mathmit{\\chi}'}, 120595: {LaTeXMode.textmode: '\\mathmit{\\psi}'}, 120596: {LaTeXMode.textmode: '\\mathmit{\\omega}'}, 120597: {LaTeXMode.textmode: '\\partial '}, 120598: {LaTeXMode.textmode: '\\in'}, 120599: {LaTeXMode.textmode: '\\mathmit{\\vartheta}'}, 120600: {LaTeXMode.textmode: '\\mathmit{\\varkappa}'}, 120601: {LaTeXMode.textmode: '\\mathmit{\\phi}'}, 120602: {LaTeXMode.textmode: '\\mathmit{\\varrho}'}, 120603: {LaTeXMode.textmode: '\\mathmit{\\varpi}'}, 120604: {LaTeXMode.textmode: '\\mathbit{\\Alpha}'}, 120605: {LaTeXMode.textmode: '\\mathbit{\\Beta}'}, 120606: {LaTeXMode.textmode: '\\mathbit{\\Gamma}'}, 120607: {LaTeXMode.textmode: '\\mathbit{\\Delta}'}, 120608: {LaTeXMode.textmode: '\\mathbit{\\Epsilon}'}, 120609: {LaTeXMode.textmode: '\\mathbit{\\Zeta}'}, 120610: {LaTeXMode.textmode: '\\mathbit{\\Eta}'}, 120611: {LaTeXMode.textmode: '\\mathbit{\\Theta}'}, 120612: {LaTeXMode.textmode: '\\mathbit{\\Iota}'}, 120613: {LaTeXMode.textmode: '\\mathbit{\\Kappa}'}, 120614: {LaTeXMode.textmode: '\\mathbit{\\Lambda}'}, 120615: {LaTeXMode.textmode: '\\mathbit{M}'}, 120616: {LaTeXMode.textmode: '\\mathbit{N}'}, 120617: {LaTeXMode.textmode: '\\mathbit{\\Xi}'}, 120618: {LaTeXMode.textmode: 'O'}, 120619: {LaTeXMode.textmode: '\\mathbit{\\Pi}'}, 120620: {LaTeXMode.textmode: '\\mathbit{\\Rho}'}, 120621: {LaTeXMode.textmode: '\\mathbit{O}'}, 120622: {LaTeXMode.textmode: '\\mathbit{\\Sigma}'}, 120623: {LaTeXMode.textmode: '\\mathbit{\\Tau}'}, 120624: {LaTeXMode.textmode: '\\mathbit{\\Upsilon}'}, 120625: {LaTeXMode.textmode: '\\mathbit{\\Phi}'}, 120626: {LaTeXMode.textmode: '\\mathbit{\\Chi}'}, 120627: {LaTeXMode.textmode: '\\mathbit{\\Psi}'}, 120628: {LaTeXMode.textmode: '\\mathbit{\\Omega}'}, 120629: {LaTeXMode.textmode: '\\mathbit{\\nabla}'}, 120630: {LaTeXMode.textmode: '\\mathbit{\\alpha}'}, 120631: {LaTeXMode.textmode: '\\mathbit{\\beta}'}, 120632: {LaTeXMode.textmode: '\\mathbit{\\gamma}'}, 120633: {LaTeXMode.textmode: '\\mathbit{\\delta}'}, 120634: {LaTeXMode.textmode: '\\mathbit{\\epsilon}'}, 120635: {LaTeXMode.textmode: '\\mathbit{\\zeta}'}, 120636: {LaTeXMode.textmode: '\\mathbit{\\eta}'}, 120637: {LaTeXMode.textmode: '\\mathbit{\\theta}'}, 120638: {LaTeXMode.textmode: '\\mathbit{\\iota}'}, 120639: {LaTeXMode.textmode: '\\mathbit{\\kappa}'}, 120640: {LaTeXMode.textmode: '\\mathbit{\\lambda}'}, 120641: {LaTeXMode.textmode: '\\mathbit{\\mu}'}, 120642: {LaTeXMode.textmode: '\\mathbit{\\nu}'}, 120643: {LaTeXMode.textmode: '\\mathbit{\\xi}'}, 120644: {LaTeXMode.textmode: '\\mathbit{o}'}, 120645: {LaTeXMode.textmode: '\\mathbit{\\pi}'}, 120646: {LaTeXMode.textmode: '\\mathbit{\\rho}'}, 120647: {LaTeXMode.textmode: '\\mathbit{\\varsigma}'}, 120648: {LaTeXMode.textmode: '\\mathbit{\\sigma}'}, 120649: {LaTeXMode.textmode: '\\mathbit{\\tau}'}, 120650: {LaTeXMode.textmode: '\\mathbit{\\upsilon}'}, 120651: {LaTeXMode.textmode: '\\mathbit{\\phi}'}, 120652: {LaTeXMode.textmode: '\\mathbit{\\chi}'}, 120653: {LaTeXMode.textmode: '\\mathbit{\\psi}'}, 120654: {LaTeXMode.textmode: '\\mathbit{\\omega}'}, 120655: {LaTeXMode.textmode: '\\partial '}, 120656: {LaTeXMode.textmode: '\\in'}, 120657: {LaTeXMode.textmode: '\\mathbit{\\vartheta}'}, 120658: {LaTeXMode.textmode: '\\mathbit{\\varkappa}'}, 120659: {LaTeXMode.textmode: '\\mathbit{\\phi}'}, 120660: {LaTeXMode.textmode: '\\mathbit{\\varrho}'}, 120661: {LaTeXMode.textmode: '\\mathbit{\\varpi}'}, 120662: {LaTeXMode.textmode: '\\mathsfbf{\\Alpha}'}, 120663: {LaTeXMode.textmode: '\\mathsfbf{\\Beta}'}, 120664: {LaTeXMode.textmode: '\\mathsfbf{\\Gamma}'}, 120665: {LaTeXMode.textmode: '\\mathsfbf{\\Delta}'}, 120666: {LaTeXMode.textmode: '\\mathsfbf{\\Epsilon}'}, 120667: {LaTeXMode.textmode: '\\mathsfbf{\\Zeta}'}, 120668: {LaTeXMode.textmode: '\\mathsfbf{\\Eta}'}, 120669: {LaTeXMode.textmode: '\\mathsfbf{\\Theta}'}, 120670: {LaTeXMode.textmode: '\\mathsfbf{\\Iota}'}, 120671: {LaTeXMode.textmode: '\\mathsfbf{\\Kappa}'}, 120672: {LaTeXMode.textmode: '\\mathsfbf{\\Lambda}'}, 120673: {LaTeXMode.textmode: '\\mathsfbf{M}'}, 120674: {LaTeXMode.textmode: '\\mathsfbf{N}'}, 120675: {LaTeXMode.textmode: '\\mathsfbf{\\Xi}'}, 120676: {LaTeXMode.textmode: 'O'}, 120677: {LaTeXMode.textmode: '\\mathsfbf{\\Pi}'}, 120678: {LaTeXMode.textmode: '\\mathsfbf{\\Rho}'}, 120679: {LaTeXMode.textmode: '\\mathsfbf{\\vartheta}'}, 120680: {LaTeXMode.textmode: '\\mathsfbf{\\Sigma}'}, 120681: {LaTeXMode.textmode: '\\mathsfbf{\\Tau}'}, 120682: {LaTeXMode.textmode: '\\mathsfbf{\\Upsilon}'}, 120683: {LaTeXMode.textmode: '\\mathsfbf{\\Phi}'}, 120684: {LaTeXMode.textmode: '\\mathsfbf{\\Chi}'}, 120685: {LaTeXMode.textmode: '\\mathsfbf{\\Psi}'}, 120686: {LaTeXMode.textmode: '\\mathsfbf{\\Omega}'}, 120687: {LaTeXMode.textmode: '\\mathsfbf{\\nabla}'}, 120688: {LaTeXMode.textmode: '\\mathsfbf{\\alpha}'}, 120689: {LaTeXMode.textmode: '\\mathsfbf{\\beta}'}, 120690: {LaTeXMode.textmode: '\\mathsfbf{\\gamma}'}, 120691: {LaTeXMode.textmode: '\\mathsfbf{\\delta}'}, 120692: {LaTeXMode.textmode: '\\mathsfbf{\\epsilon}'}, 120693: {LaTeXMode.textmode: '\\mathsfbf{\\zeta}'}, 120694: {LaTeXMode.textmode: '\\mathsfbf{\\eta}'}, 120695: {LaTeXMode.textmode: '\\mathsfbf{\\theta}'}, 120696: {LaTeXMode.textmode: '\\mathsfbf{\\iota}'}, 120697: {LaTeXMode.textmode: '\\mathsfbf{\\kappa}'}, 120698: {LaTeXMode.textmode: '\\mathsfbf{\\lambda}'}, 120699: {LaTeXMode.textmode: '\\mathsfbf{\\mu}'}, 120700: {LaTeXMode.textmode: '\\mathsfbf{\\nu}'}, 120701: {LaTeXMode.textmode: '\\mathsfbf{\\xi}'}, 120702: {LaTeXMode.textmode: '\\mathsfbf{o}'}, 120703: {LaTeXMode.textmode: '\\mathsfbf{\\pi}'}, 120704: {LaTeXMode.textmode: '\\mathsfbf{\\rho}'}, 120705: {LaTeXMode.textmode: '\\mathsfbf{\\varsigma}'}, 120706: {LaTeXMode.textmode: '\\mathsfbf{\\sigma}'}, 120707: {LaTeXMode.textmode: '\\mathsfbf{\\tau}'}, 120708: {LaTeXMode.textmode: '\\mathsfbf{\\upsilon}'}, 120709: {LaTeXMode.textmode: '\\mathsfbf{\\phi}'}, 120710: {LaTeXMode.textmode: '\\mathsfbf{\\chi}'}, 120711: {LaTeXMode.textmode: '\\mathsfbf{\\psi}'}, 120712: {LaTeXMode.textmode: '\\mathsfbf{\\omega}'}, 120713: {LaTeXMode.textmode: '\\partial '}, 120714: {LaTeXMode.textmode: '\\mathsfbf{\\varepsilon}'}, 120715: {LaTeXMode.textmode: '\\mathsfbf{\\vartheta}'}, 120716: {LaTeXMode.textmode: '\\mathsfbf{\\varkappa}'}, 120717: {LaTeXMode.textmode: '\\mathsfbf{\\phi}'}, 120718: {LaTeXMode.textmode: '\\mathsfbf{\\varrho}'}, 120719: {LaTeXMode.textmode: '\\mathsfbf{\\varpi}'}, 120720: {LaTeXMode.textmode: '\\mathsfbfsl{\\Alpha}'}, 120721: {LaTeXMode.textmode: '\\mathsfbfsl{\\Beta}'}, 120722: {LaTeXMode.textmode: '\\mathsfbfsl{\\Gamma}'}, 120723: {LaTeXMode.textmode: '\\mathsfbfsl{\\Delta}'}, 120724: {LaTeXMode.textmode: '\\mathsfbfsl{\\Epsilon}'}, 120725: {LaTeXMode.textmode: '\\mathsfbfsl{\\Zeta}'}, 120726: {LaTeXMode.textmode: '\\mathsfbfsl{\\Eta}'}, 120727: {LaTeXMode.textmode: '\\mathsfbfsl{\\vartheta}'}, 120728: {LaTeXMode.textmode: '\\mathsfbfsl{\\Iota}'}, 120729: {LaTeXMode.textmode: '\\mathsfbfsl{\\Kappa}'}, 120730: {LaTeXMode.textmode: '\\mathsfbfsl{\\Lambda}'}, 120731: {LaTeXMode.textmode: '\\mathsfbfsl{M}'}, 120732: {LaTeXMode.textmode: '\\mathsfbfsl{N}'}, 120733: {LaTeXMode.textmode: '\\mathsfbfsl{\\Xi}'}, 120734: {LaTeXMode.textmode: 'O'}, 120735: {LaTeXMode.textmode: '\\mathsfbfsl{\\Pi}'}, 120736: {LaTeXMode.textmode: '\\mathsfbfsl{\\Rho}'}, 120737: {LaTeXMode.textmode: '\\mathsfbfsl{\\vartheta}'}, 120738: {LaTeXMode.textmode: '\\mathsfbfsl{\\Sigma}'}, 120739: {LaTeXMode.textmode: '\\mathsfbfsl{\\Tau}'}, 120740: {LaTeXMode.textmode: '\\mathsfbfsl{\\Upsilon}'}, 120741: {LaTeXMode.textmode: '\\mathsfbfsl{\\Phi}'}, 120742: {LaTeXMode.textmode: '\\mathsfbfsl{\\Chi}'}, 120743: {LaTeXMode.textmode: '\\mathsfbfsl{\\Psi}'}, 120744: {LaTeXMode.textmode: '\\mathsfbfsl{\\Omega}'}, 120745: {LaTeXMode.textmode: '\\mathsfbfsl{\\nabla}'}, 120746: {LaTeXMode.textmode: '\\mathsfbfsl{\\alpha}'}, 120747: {LaTeXMode.textmode: '\\mathsfbfsl{\\beta}'}, 120748: {LaTeXMode.textmode: '\\mathsfbfsl{\\gamma}'}, 120749: {LaTeXMode.textmode: '\\mathsfbfsl{\\delta}'}, 120750: {LaTeXMode.textmode: '\\mathsfbfsl{\\epsilon}'}, 120751: {LaTeXMode.textmode: '\\mathsfbfsl{\\zeta}'}, 120752: {LaTeXMode.textmode: '\\mathsfbfsl{\\eta}'}, 120753: {LaTeXMode.textmode: '\\mathsfbfsl{\\vartheta}'}, 120754: {LaTeXMode.textmode: '\\mathsfbfsl{\\iota}'}, 120755: {LaTeXMode.textmode: '\\mathsfbfsl{\\kappa}'}, 120756: {LaTeXMode.textmode: '\\mathsfbfsl{\\lambda}'}, 120757: {LaTeXMode.textmode: '\\mathsfbfsl{\\mu}'}, 120758: {LaTeXMode.textmode: '\\mathsfbfsl{\\nu}'}, 120759: {LaTeXMode.textmode: '\\mathsfbfsl{\\xi}'}, 120760: {LaTeXMode.textmode: '\\mathsfbfsl{o}'}, 120761: {LaTeXMode.textmode: '\\mathsfbfsl{\\pi}'}, 120762: {LaTeXMode.textmode: '\\mathsfbfsl{\\rho}'}, 120763: {LaTeXMode.textmode: '\\mathsfbfsl{\\varsigma}'}, 120764: {LaTeXMode.textmode: '\\mathsfbfsl{\\sigma}'}, 120765: {LaTeXMode.textmode: '\\mathsfbfsl{\\tau}'}, 120766: {LaTeXMode.textmode: '\\mathsfbfsl{\\upsilon}'}, 120767: {LaTeXMode.textmode: '\\mathsfbfsl{\\phi}'}, 120768: {LaTeXMode.textmode: '\\mathsfbfsl{\\chi}'}, 120769: {LaTeXMode.textmode: '\\mathsfbfsl{\\psi}'}, 120770: {LaTeXMode.textmode: '\\mathsfbfsl{\\omega}'}, 120771: {LaTeXMode.textmode: '\\partial '}, 120772: {LaTeXMode.textmode: '\\in'}, 120773: {LaTeXMode.textmode: '\\mathsfbfsl{\\vartheta}'}, 120774: {LaTeXMode.textmode: '\\mathsfbfsl{\\varkappa}'}, 120775: {LaTeXMode.textmode: '\\mathsfbfsl{\\phi}'}, 120776: {LaTeXMode.textmode: '\\mathsfbfsl{\\varrho}'}, 120777: {LaTeXMode.textmode: '\\mathsfbfsl{\\varpi}'}, 120782: {LaTeXMode.textmode: '\\mathbf{0}'}, 120783: {LaTeXMode.textmode: '\\mathbf{1}'}, 120784: {LaTeXMode.textmode: '\\mathbf{2}'}, 120785: {LaTeXMode.textmode: '\\mathbf{3}'}, 120786: {LaTeXMode.textmode: '\\mathbf{4}'}, 120787: {LaTeXMode.textmode: '\\mathbf{5}'}, 120788: {LaTeXMode.textmode: '\\mathbf{6}'}, 120789: {LaTeXMode.textmode: '\\mathbf{7}'}, 120790: {LaTeXMode.textmode: '\\mathbf{8}'}, 120791: {LaTeXMode.textmode: '\\mathbf{9}'}, 120792: {LaTeXMode.textmode: '\\mathbb{0}'}, 120793: {LaTeXMode.textmode: '\\mathbb{1}'}, 120794: {LaTeXMode.textmode: '\\mathbb{2}'}, 120795: {LaTeXMode.textmode: '\\mathbb{3}'}, 120796: {LaTeXMode.textmode: '\\mathbb{4}'}, 120797: {LaTeXMode.textmode: '\\mathbb{5}'}, 120798: {LaTeXMode.textmode: '\\mathbb{6}'}, 120799: {LaTeXMode.textmode: '\\mathbb{7}'}, 120800: {LaTeXMode.textmode: '\\mathbb{8}'}, 120801: {LaTeXMode.textmode: '\\mathbb{9}'}, 120802: {LaTeXMode.textmode: '\\mathsf{0}'}, 120803: {LaTeXMode.textmode: '\\mathsf{1}'}, 120804: {LaTeXMode.textmode: '\\mathsf{2}'}, 120805: {LaTeXMode.textmode: '\\mathsf{3}'}, 120806: {LaTeXMode.textmode: '\\mathsf{4}'}, 120807: {LaTeXMode.textmode: '\\mathsf{5}'}, 120808: {LaTeXMode.textmode: '\\mathsf{6}'}, 120809: {LaTeXMode.textmode: '\\mathsf{7}'}, 120810: {LaTeXMode.textmode: '\\mathsf{8}'}, 120811: {LaTeXMode.textmode: '\\mathsf{9}'}, 120812: {LaTeXMode.textmode: '\\mathsfbf{0}'}, 120813: {LaTeXMode.textmode: '\\mathsfbf{1}'}, 120814: {LaTeXMode.textmode: '\\mathsfbf{2}'}, 120815: {LaTeXMode.textmode: '\\mathsfbf{3}'}, 120816: {LaTeXMode.textmode: '\\mathsfbf{4}'}, 120817: {LaTeXMode.textmode: '\\mathsfbf{5}'}, 120818: {LaTeXMode.textmode: '\\mathsfbf{6}'}, 120819: {LaTeXMode.textmode: '\\mathsfbf{7}'}, 120820: {LaTeXMode.textmode: '\\mathsfbf{8}'}, 120821: {LaTeXMode.textmode: '\\mathsfbf{9}'}, 120822: {LaTeXMode.textmode: '\\mathtt{0}'}, 120823: {LaTeXMode.textmode: '\\mathtt{1}'}, 120824: {LaTeXMode.textmode: '\\mathtt{2}'}, 120825: {LaTeXMode.textmode: '\\mathtt{3}'}, 120826: {LaTeXMode.textmode: '\\mathtt{4}'}, 120827: {LaTeXMode.textmode: '\\mathtt{5}'}, 120828: {LaTeXMode.textmode: '\\mathtt{6}'}, 120829: {LaTeXMode.textmode: '\\mathtt{7}'}, 120830: {LaTeXMode.textmode: '\\mathtt{8}'}, 120831: {LaTeXMode.textmode: '\\mathtt{9}'}, }
PypiClean
/lyberry_dmenu-0.0.1-py3-none-any.whl/lyberry_dmenu/lyberrydmenu.py
import lyberry_api # This will first try to use a dmenu wrapper library, which you can get off of PyPI as simply dmenu. # If that is not installed it will make a class that emulates part of the library. try: import dmenu except: import subprocess class dmenu: def show(self, items: list, lines: int = 0, prompt: str = ""): items_str = "" for item in items: items_str += f"{item}\n" return ( subprocess.check_output( ["dmenu", "-l", str(lines), "-p", prompt], input=items_str.encode() ) .decode() .rstrip() ) dmenu = dmenu() class LyBerry_Dmenu: def __init__(self): self.lbry = lyberry_api.LBRY_Api() def repl(self): commands = ["search", "following"] selected = dmenu.show(commands, lines=len(commands), prompt="Command to run:") if selected == "search": self.search() if selected == "following": self.following() def search(self): query = dmenu.show([], prompt="Search for:") if query: results = self.lbry.lbrynet_search_feed(text=query) self.play_from_results(results) def following(self): results = self.lbry.sub_feed self.play_from_results(results) def play_from_results(self, results): items = [] for i in range(20): try: items.append(next(results)) except StopIteration: break options = [] for item in items: options.append( item.title + " - " + item.channel.title if hasattr(item, "channel") else "Anonymous" ) selection = dmenu.show(options, lines=20, prompt="Claim to open:") if selection: selected_item = items[options.index(selection)] self.open_external(selected_item) def open_external(self, pub): file_type = pub.media_type.split("/")[0] if file_type == "video" or file_type == "audio": lyberry_api.settings.media_player(pub.streaming_url) elif file_type == "text": lyberry_api.settings.text_viewer(pub.streaming_url) def main(): LyBerry_Dmenu().repl() if __name__ == "__main__": main()
PypiClean
/sage-utils-0.5.6.tar.gz/sage-utils-0.5.6/sage_utils/amqp/clients.py
import asyncio import json from copy import deepcopy class RpcAmqpClient(object): CONTENT_TYPE = 'application/json' DEFAULT_PROPERTIES = { 'content_type': CONTENT_TYPE, 'delivery_mode': 2, 'correlation_id': 'event-name' } def __init__(self, app, routing_key, request_exchange='', response_queue=None, response_exchange='', loop=None): self.app = app self.loop = loop or getattr(self.app, 'loop', None) or asyncio.get_event_loop() self.routing_key = routing_key self.request_exchange = request_exchange self.response_queue = response_queue self.response_exchange = response_exchange self.transport = None self.protocol = None self.channel = None self.waiter = asyncio.Event() self._response_queue_name = None self._response = None @property def response_queue_name(self): return self._response_queue_name async def connect(self, consume_timeout=None): self.transport, self.protocol = await self.app.amqp.connect() self.channel = await self.protocol.channel() if self.response_queue is not None: result = await self.channel.queue_declare( queue_name=self.response_queue, exclusive=True, durable=True, passive=False, auto_delete=True, ) self._response_queue_name = result['queue'] await self.channel.queue_bind( queue_name=self.response_queue_name, exchange_name=self.response_exchange, routing_key=self.response_queue_name ) await self.channel.basic_qos( prefetch_count=1, prefetch_size=0, connection_global=False ) await asyncio.wait_for( self.channel.basic_consume( self.on_response, queue_name=self._response_queue_name, ), timeout=consume_timeout, loop=self.loop ) async def on_response(self, _channel, body, _envelope, _properties): self._response = json.loads(body) self.waiter.set() async def send(self, payload={}, properties={}, raw_data=False, consume_timeout=None): if not self.protocol: await self.connect(consume_timeout=consume_timeout) request_properties = deepcopy(self.DEFAULT_PROPERTIES) request_properties.update({'reply_to': self.response_queue_name}) request_properties.update(properties) await self.channel.publish( payload if raw_data else json.dumps(payload), exchange_name=self.request_exchange, routing_key=self.routing_key, properties=request_properties ) response = None if self.response_queue_name is not None: await self.waiter.wait() response = self._response await self.protocol.close() self.protocol = None self.transport = None return response
PypiClean
/gwu_nn-0.2.0.tar.gz/gwu_nn-0.2.0/gwu_nn/layers.py
import numpy as np from abc import ABC, abstractmethod from gwu_nn.activation_layers import Sigmoid, RELU, Softmax activation_functions = {'relu': RELU, 'sigmoid': Sigmoid, 'softmax': Softmax} def apply_activation_forward(forward_pass): """Decorator that ensures that a layer's activation function is applied after the layer during forward propagation. """ def wrapper(*args): output = forward_pass(args[0], args[1]) if args[0].activation: return args[0].activation.forward_propagation(output) else: return output return wrapper def apply_activation_backward(backward_pass): """Decorator that ensures that a layer's activation function's derivative is applied before the layer during backwards propagation. """ def wrapper(*args): output_error = args[1] learning_rate = args[2] if args[0].activation: output_error = args[0].activation.backward_propagation(output_error, learning_rate) return backward_pass(args[0], output_error, learning_rate) return wrapper class Layer(): """The Layer layer is an abstract object used to define the template for other layer types to inherit""" def __init__(self, activation=None): """Because Layer is an abstract object, we don't provide any detailing on the initializtion""" self.type = "Layer" if activation: self.activation = activation_functions[activation]() else: self.activation = None @apply_activation_forward def forward_propagation(cls, input): """:noindex:""" pass @apply_activation_backward def backward_propogation(cls, output_error, learning_rate): """:noindex:""" pass class Dense(Layer): """The Dense layer class creates a layer that is fully connected with the previous layer. This means that the number of weights will be MxN where M is number of nodes in the previous layer and N = number of nodes in the current layer. """ def __init__(self, output_size, add_bias=False, activation=None, input_size=None): super().__init__(activation) self.type = None self.name = "Dense" self.input_size = input_size self.output_size = output_size self.add_bias = add_bias def init_weights(self, input_size): """Initialize the weights for the layer based on input and output size Args: input_size (numpy array): dimensions for the input array """ if self.input_size is None: self.input_size = input_size self.weights = np.random.randn(input_size, self.output_size) / np.sqrt(input_size + self.output_size) # TODO: Batching of inputs has broken how bias works. Need to address in next iteration if self.add_bias: self.bias = np.random.randn(1, self.output_size) / np.sqrt(input_size + self.output_size) @apply_activation_forward def forward_propagation(self, input): """Applies the forward propagation for a densely connected layer. This will compute the dot product between the input value (calculated during forward propagation) and the layer's weight tensor. Args: input (np.array): Input tensor calculated during forward propagation up to this layer. Returns: np.array(float): The dot product of the input and the layer's weight tensor.""" self.input = input output = np.dot(input, self.weights) if self.add_bias: return output + self.bias else: return output @apply_activation_backward def backward_propagation(self, output_error, learning_rate): """Applies the backward propagation for a densely connected layer. This will calculate the output error (dot product of the output_error and the layer's weights) and will calculate the update gradient for the weights (dot product of the layer's input values and the output_error). Args: output_error (np.array): The gradient of the error up to this point in the network. Returns: np.array(float): The gradient of the error up to and including this layer.""" input_error = np.dot(output_error, self.weights.T) weights_error = np.dot(self.input.T, output_error) self.weights -= learning_rate * weights_error if self.add_bias: self.bias -= learning_rate * output_error return input_error
PypiClean
/mis_modulos-0.1.tar.gz/mis_modulos-0.1/tensorflow/python/platform/tf_logging.py
"""Logging utilities.""" # pylint: disable=unused-import # pylint: disable=g-bad-import-order # pylint: disable=invalid-name import logging as _logging import os as _os import sys as _sys import _thread import time as _time import traceback as _traceback from logging import DEBUG from logging import ERROR from logging import FATAL from logging import INFO from logging import WARN import threading from tensorflow.python.util.tf_export import tf_export # Don't use this directly. Use get_logger() instead. _logger = None _logger_lock = threading.Lock() def _get_caller(offset=3): """Returns a code and frame object for the lowest non-logging stack frame.""" # Use sys._getframe(). This avoids creating a traceback object. # pylint: disable=protected-access f = _sys._getframe(offset) # pylint: enable=protected-access our_file = f.f_code.co_filename f = f.f_back while f: code = f.f_code if code.co_filename != our_file: return code, f f = f.f_back return None, None # The definition of `findCaller` changed in Python 3.2, # and further changed in Python 3.8 if _sys.version_info.major >= 3 and _sys.version_info.minor >= 8: def _logger_find_caller(stack_info=False, stacklevel=1): # pylint: disable=g-wrong-blank-lines code, frame = _get_caller(4) sinfo = None if stack_info: sinfo = '\n'.join(_traceback.format_stack()) if code: return (code.co_filename, frame.f_lineno, code.co_name, sinfo) else: return '(unknown file)', 0, '(unknown function)', sinfo elif _sys.version_info.major >= 3 and _sys.version_info.minor >= 2: def _logger_find_caller(stack_info=False): # pylint: disable=g-wrong-blank-lines code, frame = _get_caller(4) sinfo = None if stack_info: sinfo = '\n'.join(_traceback.format_stack()) if code: return (code.co_filename, frame.f_lineno, code.co_name, sinfo) else: return '(unknown file)', 0, '(unknown function)', sinfo else: def _logger_find_caller(): # pylint: disable=g-wrong-blank-lines code, frame = _get_caller(4) if code: return (code.co_filename, frame.f_lineno, code.co_name) else: return '(unknown file)', 0, '(unknown function)' @tf_export('get_logger') def get_logger(): """Return TF logger instance.""" global _logger # Use double-checked locking to avoid taking lock unnecessarily. if _logger: return _logger _logger_lock.acquire() try: if _logger: return _logger # Scope the TensorFlow logger to not conflict with users' loggers. logger = _logging.getLogger('tensorflow') # Override findCaller on the logger to skip internal helper functions logger.findCaller = _logger_find_caller # Don't further configure the TensorFlow logger if the root logger is # already configured. This prevents double logging in those cases. if not _logging.getLogger().handlers: # Determine whether we are in an interactive environment _interactive = False try: # This is only defined in interactive shells. if _sys.ps1: _interactive = True except AttributeError: # Even now, we may be in an interactive shell with `python -i`. _interactive = _sys.flags.interactive # If we are in an interactive environment (like Jupyter), set loglevel # to INFO and pipe the output to stdout. if _interactive: logger.setLevel(INFO) _logging_target = _sys.stdout else: _logging_target = _sys.stderr # Add the output handler. _handler = _logging.StreamHandler(_logging_target) _handler.setFormatter(_logging.Formatter(_logging.BASIC_FORMAT, None)) logger.addHandler(_handler) _logger = logger return _logger finally: _logger_lock.release() @tf_export(v1=['logging.log']) def log(level, msg, *args, **kwargs): get_logger().log(level, msg, *args, **kwargs) @tf_export(v1=['logging.debug']) def debug(msg, *args, **kwargs): get_logger().debug(msg, *args, **kwargs) @tf_export(v1=['logging.error']) def error(msg, *args, **kwargs): get_logger().error(msg, *args, **kwargs) @tf_export(v1=['logging.fatal']) def fatal(msg, *args, **kwargs): get_logger().fatal(msg, *args, **kwargs) @tf_export(v1=['logging.info']) def info(msg, *args, **kwargs): get_logger().info(msg, *args, **kwargs) @tf_export(v1=['logging.warn']) def warn(msg, *args, **kwargs): get_logger().warning(msg, *args, **kwargs) @tf_export(v1=['logging.warning']) def warning(msg, *args, **kwargs): get_logger().warning(msg, *args, **kwargs) _level_names = { FATAL: 'FATAL', ERROR: 'ERROR', WARN: 'WARN', INFO: 'INFO', DEBUG: 'DEBUG', } # Mask to convert integer thread ids to unsigned quantities for logging # purposes _THREAD_ID_MASK = 2 * _sys.maxsize + 1 _log_prefix = None # later set to google2_log_prefix # Counter to keep track of number of log entries per token. _log_counter_per_token = {} @tf_export(v1=['logging.TaskLevelStatusMessage']) def TaskLevelStatusMessage(msg): error(msg) @tf_export(v1=['logging.flush']) def flush(): raise NotImplementedError() # Code below is taken from pyglib/logging @tf_export(v1=['logging.vlog']) def vlog(level, msg, *args, **kwargs): get_logger().log(level, msg, *args, **kwargs) def _GetNextLogCountPerToken(token): """Wrapper for _log_counter_per_token. Args: token: The token for which to look up the count. Returns: The number of times this function has been called with *token* as an argument (starting at 0) """ global _log_counter_per_token # pylint: disable=global-variable-not-assigned _log_counter_per_token[token] = 1 + _log_counter_per_token.get(token, -1) return _log_counter_per_token[token] @tf_export(v1=['logging.log_every_n']) def log_every_n(level, msg, n, *args): """Log 'msg % args' at level 'level' once per 'n' times. Logs the 1st call, (N+1)st call, (2N+1)st call, etc. Not threadsafe. Args: level: The level at which to log. msg: The message to be logged. n: The number of times this should be called before it is logged. *args: The args to be substituted into the msg. """ count = _GetNextLogCountPerToken(_GetFileAndLine()) log_if(level, msg, not (count % n), *args) @tf_export(v1=['logging.log_first_n']) def log_first_n(level, msg, n, *args): # pylint: disable=g-bad-name """Log 'msg % args' at level 'level' only first 'n' times. Not threadsafe. Args: level: The level at which to log. msg: The message to be logged. n: The number of times this should be called before it is logged. *args: The args to be substituted into the msg. """ count = _GetNextLogCountPerToken(_GetFileAndLine()) log_if(level, msg, count < n, *args) @tf_export(v1=['logging.log_if']) def log_if(level, msg, condition, *args): """Log 'msg % args' at level 'level' only if condition is fulfilled.""" if condition: vlog(level, msg, *args) def _GetFileAndLine(): """Returns (filename, linenumber) for the stack frame.""" code, f = _get_caller() if not code: return ('<unknown>', 0) return (code.co_filename, f.f_lineno) def google2_log_prefix(level, timestamp=None, file_and_line=None): """Assemble a logline prefix using the google2 format.""" # pylint: disable=global-variable-not-assigned global _level_names # pylint: enable=global-variable-not-assigned # Record current time now = timestamp or _time.time() now_tuple = _time.localtime(now) now_microsecond = int(1e6 * (now % 1.0)) (filename, line) = file_and_line or _GetFileAndLine() basename = _os.path.basename(filename) # Severity string severity = 'I' if level in _level_names: severity = _level_names[level][0] s = '%c%02d%02d %02d:%02d:%02d.%06d %5d %s:%d] ' % ( severity, now_tuple[1], # month now_tuple[2], # day now_tuple[3], # hour now_tuple[4], # min now_tuple[5], # sec now_microsecond, _get_thread_id(), basename, line) return s @tf_export(v1=['logging.get_verbosity']) def get_verbosity(): """Return how much logging output will be produced.""" return get_logger().getEffectiveLevel() @tf_export(v1=['logging.set_verbosity']) def set_verbosity(v): """Sets the threshold for what messages will be logged.""" get_logger().setLevel(v) def _get_thread_id(): """Get id of current thread, suitable for logging as an unsigned quantity.""" thread_id = _thread.get_ident() return thread_id & _THREAD_ID_MASK _log_prefix = google2_log_prefix tf_export(v1=['logging.DEBUG']).export_constant(__name__, 'DEBUG') tf_export(v1=['logging.ERROR']).export_constant(__name__, 'ERROR') tf_export(v1=['logging.FATAL']).export_constant(__name__, 'FATAL') tf_export(v1=['logging.INFO']).export_constant(__name__, 'INFO') tf_export(v1=['logging.WARN']).export_constant(__name__, 'WARN')
PypiClean
/kivy_garden.ebs.clocks-1.0.1.tar.gz/kivy_garden.ebs.clocks-1.0.1/README.md
Clock Widgets for Kivy ====================== [![Github Build Status](https://github.com/ebs-universe/kivy_garden.ebs.clocks/workflows/Garden%20flower/badge.svg)](https://github.com/ebs-universe/kivy_garden.ebs.clocks/actions) This package provides relatively simple Clock widgets for Kivy. Presently it has only a single and trivial clock and is certainly not deserving of a whole package. In time, this package is intended to hold a curated collection of clocks in both Python and .kv. The presently included widgets are easily implemented from scratch in both Python and Kv, and for most non-EBS applications, you'd probably just want to roll your own or use some .kv based clock widget available in the wild. The typical EBS application can get unwieldy pretty quickly though, so this package affords a separation of concerns which aids in maintainability. If you are looking for something you can just pip install and which just works, and don't care that it might be written in python and might pull in additional dependencies, then this might be an option. Included clock widgets : - SimpleDigitalClock No clock which have its own kivy_garden package will be added to this collection, and the collection itself will grow (very) slowly. This package is part of the EBS widget collection for Kivy. It is written in mostly Python and depends on the EBS core widgets and widget infrastructure package. For more information, see [kivy_garden.ebs.core](https://github.com/ebs-universe/kivy_garden.ebs.core) See https://kivy-garden.github.io/ebs.flower/ for the rendered flower docs. Please see the garden [instructions](https://kivy-garden.github.io) for how to use kivy garden flowers. CI -- Every push or pull request run the [GitHub Action](https://github.com/kivy-garden/flower/actions) CI. It tests the code on various OS and also generates wheels that can be released on PyPI upon a tag. Docs are also generated and uploaded to the repo as well as artifacts of the CI. TODO ------- * add your code Contributing -------------- Check out our [contribution guide](CONTRIBUTING.md) and feel free to improve the flower. License --------- This software is released under the terms of the MIT License. Please see the [LICENSE.txt](LICENSE.txt) file. How to release =============== See the garden [instructions](https://kivy-garden.github.io/#makingareleaseforyourflower) for how to make a new release.
PypiClean
/onlinepayments-sdk-python2-4.14.0.zip/onlinepayments-sdk-python2-4.14.0/README.rst
Online Payments Python SDK ========================== Introduction ------------ The Python SDK helps you to communicate with the payment platform server API. Its primary features are: - convenient Python library for the API calls and responses - marshalls Python request objects to HTTP requests - unmarshalls HTTP responses to Python response objects or Python exceptions - handling of all the details concerning authentication - handling of required meta data Its use is demonstrated by an example for each possible call. The examples execute a call using the provided API key. Structure of this repository ---------------------------- This repository consists out of three main components: #. The source code of the SDK itself: ``/onlinepayments/sdk/`` #. The source code of the SDK unit tests: ``/tests/unit/`` #. The source code of the example integration tests: ``/tests/integration/`` Note that the source code of the unit tests and integration tests can only be found on GitHub. Requirements ------------ Python 2.7 is required. In addition, the following package is required: - `requests <https://requests.readthedocs.io/>`__ 2.20.0 or higher This package will be installed automatically if the SDK is installed manually or using pip following the below instructions. Installation ------------ To install the SDK using pip, execute the following command: :: pip install onlinepayments-sdk-python2 Alternatively, you can install the SDK from a source distribution file: #. Download the latest version of the Python SDK from GitHub. Choose the ``onlinepayments-sdk-python2-x.y.z.zip`` file from the `releases <https://github.com/wl-online-payments-direct/sdk-python2/releases>`__ page, where ``x.y.z`` is the version number. #. Execute the following command in the folder where the SDK was downloaded to: :: pip install onlinepayments-sdk-python2-x.y.z.zip Uninstalling ------------ After the Python SDK has been installed, it can be uninstalled using the following command: :: pip uninstall onlinepayments-sdk-python2 The required package can be uninstalled in the same way. Running tests ------------- | There are two types of tests: unit tests and integration tests. The unit tests will work out-of-the-box; for the integration tests some configuration is required. | First, some environment variables need to be set: - ``onlinePayments.api.apiKeyId`` for the API key id to use. - ``onlinePayments.api.secretApiKey`` for the secret API key to use. - ``onlinePayments.api.merchantId`` for your merchant ID. In order to run the unit and integration tests, the `mock <https://pypi.python.org/pypi/mock>`__ backport and `mockito <https://pypi.python.org/pypi/mockito>`__ packages are required. These can be installed using the following command: :: pip install mock mockito The following commands can then be executed from the ``tests`` directory to execute the tests: - Unit tests: :: python run_unit_tests.py - Integration tests: :: python run_integration_tests.py - Both unit and integration tests: :: python run_all_tests.py Note: in the current version of the unit tests, two errors will pop up ([Errno 10053] for Windows and [Errno 32] for Linux), indicating that there was a client disconnect. These errors occur during cleanup of the tests and do not hinder the tests in any way, and should therefore be ignored.
PypiClean
/smartautomatic_server_frontend-20220907.2-py3-none-any.whl/sas_frontend/frontend_latest/d3398bcb.js
"use strict";(self.webpackChunksmartautomatic_server_frontend=self.webpackChunksmartautomatic_server_frontend||[]).push([[28922],{89194:(e,t,i)=>{i(48175),i(65660),i(70019);var r=i(9672),o=i(50856);(0,r.k)({_template:o.d` <style> :host { overflow: hidden; /* needed for text-overflow: ellipsis to work on ff */ @apply --layout-vertical; @apply --layout-center-justified; @apply --layout-flex; } :host([two-line]) { min-height: var(--paper-item-body-two-line-min-height, 72px); } :host([three-line]) { min-height: var(--paper-item-body-three-line-min-height, 88px); } :host > ::slotted(*) { overflow: hidden; text-overflow: ellipsis; white-space: nowrap; } :host > ::slotted([secondary]) { @apply --paper-font-body1; color: var(--paper-item-body-secondary-color, var(--secondary-text-color)); @apply --paper-item-body-secondary; } </style> <slot></slot> `,is:"paper-item-body"})},63864:(e,t,i)=>{i.d(t,{I:()=>r});const r=(e,t,i,r)=>{const[o,n,a]=e.split(".",3);return Number(o)>t||Number(o)===t&&(void 0===r?Number(n)>=i:Number(n)>i)||void 0!==r&&Number(o)===t&&Number(n)===i&&Number(a)>=r}},44583:(e,t,i)=>{i.a(e,(async e=>{i.d(t,{o0:()=>a,yD:()=>l,E8:()=>d});var r=i(14516),o=i(54121),n=i(65810);o.Xp&&await o.Xp;const a=(e,t)=>s(t).format(e),s=(0,r.Z)((e=>new Intl.DateTimeFormat("en"!==e.language||(0,n.y)(e)?e.language:"en-u-hc-h23",{year:"numeric",month:"long",day:"numeric",hour:(0,n.y)(e)?"numeric":"2-digit",minute:"2-digit",hour12:(0,n.y)(e)}))),l=(e,t)=>c(t).format(e),c=(0,r.Z)((e=>new Intl.DateTimeFormat("en"!==e.language||(0,n.y)(e)?e.language:"en-u-hc-h23",{month:"short",day:"numeric",hour:(0,n.y)(e)?"numeric":"2-digit",minute:"2-digit",hour12:(0,n.y)(e)}))),d=(e,t)=>u(t).format(e),u=(0,r.Z)((e=>new Intl.DateTimeFormat("en"!==e.language||(0,n.y)(e)?e.language:"en-u-hc-h23",{year:"numeric",month:"long",day:"numeric",hour:(0,n.y)(e)?"numeric":"2-digit",minute:"2-digit",second:"2-digit",hour12:(0,n.y)(e)})));(0,r.Z)((e=>new Intl.DateTimeFormat("en"!==e.language||(0,n.y)(e)?e.language:"en-u-hc-h23",{year:"numeric",month:"numeric",day:"numeric",hour:"numeric",minute:"2-digit",hour12:(0,n.y)(e)})));e()}),1)},65810:(e,t,i)=>{i.d(t,{y:()=>n});var r=i(14516),o=i(66477);const n=(0,r.Z)((e=>{if(e.time_format===o.zt.language||e.time_format===o.zt.system){const t=e.time_format===o.zt.language?e.language:void 0,i=(new Date).toLocaleString(t);return i.includes("AM")||i.includes("PM")}return e.time_format===o.zt.am_pm}))},50577:(e,t,i)=>{i.d(t,{v:()=>r});const r=async e=>{if(navigator.clipboard)try{return void await navigator.clipboard.writeText(e)}catch{}const t=document.createElement("textarea");t.value=e,document.body.appendChild(t),t.select(),document.execCommand("copy"),document.body.removeChild(t)}},60893:(e,t,i)=>{i.d(t,{T:()=>r});const r=(e,t,i)=>{let r;const o=async()=>{try{await t(e)}finally{r=setTimeout((()=>o()),i)}};return o(),()=>clearTimeout(r)}},34821:(e,t,i)=>{i.d(t,{i:()=>g});var r=i(41085),o=i(91632),n=i(37500),a=i(33310),s=i(74265);i(10983);function l(){l=function(){return e};var e={elementsDefinitionOrder:[["method"],["field"]],initializeInstanceElements:function(e,t){["method","field"].forEach((function(i){t.forEach((function(t){t.kind===i&&"own"===t.placement&&this.defineClassElement(e,t)}),this)}),this)},initializeClassElements:function(e,t){var i=e.prototype;["method","field"].forEach((function(r){t.forEach((function(t){var o=t.placement;if(t.kind===r&&("static"===o||"prototype"===o)){var n="static"===o?e:i;this.defineClassElement(n,t)}}),this)}),this)},defineClassElement:function(e,t){var i=t.descriptor;if("field"===t.kind){var r=t.initializer;i={enumerable:i.enumerable,writable:i.writable,configurable:i.configurable,value:void 0===r?void 0:r.call(e)}}Object.defineProperty(e,t.key,i)},decorateClass:function(e,t){var i=[],r=[],o={static:[],prototype:[],own:[]};if(e.forEach((function(e){this.addElementPlacement(e,o)}),this),e.forEach((function(e){if(!u(e))return i.push(e);var t=this.decorateElement(e,o);i.push(t.element),i.push.apply(i,t.extras),r.push.apply(r,t.finishers)}),this),!t)return{elements:i,finishers:r};var n=this.decorateConstructor(i,t);return r.push.apply(r,n.finishers),n.finishers=r,n},addElementPlacement:function(e,t,i){var r=t[e.placement];if(!i&&-1!==r.indexOf(e.key))throw new TypeError("Duplicated element ("+e.key+")");r.push(e.key)},decorateElement:function(e,t){for(var i=[],r=[],o=e.decorators,n=o.length-1;n>=0;n--){var a=t[e.placement];a.splice(a.indexOf(e.key),1);var s=this.fromElementDescriptor(e),l=this.toElementFinisherExtras((0,o[n])(s)||s);e=l.element,this.addElementPlacement(e,t),l.finisher&&r.push(l.finisher);var c=l.extras;if(c){for(var d=0;d<c.length;d++)this.addElementPlacement(c[d],t);i.push.apply(i,c)}}return{element:e,finishers:r,extras:i}},decorateConstructor:function(e,t){for(var i=[],r=t.length-1;r>=0;r--){var o=this.fromClassDescriptor(e),n=this.toClassDescriptor((0,t[r])(o)||o);if(void 0!==n.finisher&&i.push(n.finisher),void 0!==n.elements){e=n.elements;for(var a=0;a<e.length-1;a++)for(var s=a+1;s<e.length;s++)if(e[a].key===e[s].key&&e[a].placement===e[s].placement)throw new TypeError("Duplicated element ("+e[a].key+")")}}return{elements:e,finishers:i}},fromElementDescriptor:function(e){var t={kind:e.kind,key:e.key,placement:e.placement,descriptor:e.descriptor};return Object.defineProperty(t,Symbol.toStringTag,{value:"Descriptor",configurable:!0}),"field"===e.kind&&(t.initializer=e.initializer),t},toElementDescriptors:function(e){var t;if(void 0!==e)return(t=e,function(e){if(Array.isArray(e))return e}(t)||function(e){if("undefined"!=typeof Symbol&&null!=e[Symbol.iterator]||null!=e["@@iterator"])return Array.from(e)}(t)||function(e,t){if(e){if("string"==typeof e)return m(e,t);var i=Object.prototype.toString.call(e).slice(8,-1);return"Object"===i&&e.constructor&&(i=e.constructor.name),"Map"===i||"Set"===i?Array.from(e):"Arguments"===i||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(i)?m(e,t):void 0}}(t)||function(){throw new TypeError("Invalid attempt to destructure non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}()).map((function(e){var t=this.toElementDescriptor(e);return this.disallowProperty(e,"finisher","An element descriptor"),this.disallowProperty(e,"extras","An element descriptor"),t}),this)},toElementDescriptor:function(e){var t=String(e.kind);if("method"!==t&&"field"!==t)throw new TypeError('An element descriptor\'s .kind property must be either "method" or "field", but a decorator created an element descriptor with .kind "'+t+'"');var i=f(e.key),r=String(e.placement);if("static"!==r&&"prototype"!==r&&"own"!==r)throw new TypeError('An element descriptor\'s .placement property must be one of "static", "prototype" or "own", but a decorator created an element descriptor with .placement "'+r+'"');var o=e.descriptor;this.disallowProperty(e,"elements","An element descriptor");var n={kind:t,key:i,placement:r,descriptor:Object.assign({},o)};return"field"!==t?this.disallowProperty(e,"initializer","A method descriptor"):(this.disallowProperty(o,"get","The property descriptor of a field descriptor"),this.disallowProperty(o,"set","The property descriptor of a field descriptor"),this.disallowProperty(o,"value","The property descriptor of a field descriptor"),n.initializer=e.initializer),n},toElementFinisherExtras:function(e){return{element:this.toElementDescriptor(e),finisher:h(e,"finisher"),extras:this.toElementDescriptors(e.extras)}},fromClassDescriptor:function(e){var t={kind:"class",elements:e.map(this.fromElementDescriptor,this)};return Object.defineProperty(t,Symbol.toStringTag,{value:"Descriptor",configurable:!0}),t},toClassDescriptor:function(e){var t=String(e.kind);if("class"!==t)throw new TypeError('A class descriptor\'s .kind property must be "class", but a decorator created a class descriptor with .kind "'+t+'"');this.disallowProperty(e,"key","A class descriptor"),this.disallowProperty(e,"placement","A class descriptor"),this.disallowProperty(e,"descriptor","A class descriptor"),this.disallowProperty(e,"initializer","A class descriptor"),this.disallowProperty(e,"extras","A class descriptor");var i=h(e,"finisher");return{elements:this.toElementDescriptors(e.elements),finisher:i}},runClassFinishers:function(e,t){for(var i=0;i<t.length;i++){var r=(0,t[i])(e);if(void 0!==r){if("function"!=typeof r)throw new TypeError("Finishers must return a constructor.");e=r}}return e},disallowProperty:function(e,t,i){if(void 0!==e[t])throw new TypeError(i+" can't have a ."+t+" property.")}};return e}function c(e){var t,i=f(e.key);"method"===e.kind?t={value:e.value,writable:!0,configurable:!0,enumerable:!1}:"get"===e.kind?t={get:e.value,configurable:!0,enumerable:!1}:"set"===e.kind?t={set:e.value,configurable:!0,enumerable:!1}:"field"===e.kind&&(t={configurable:!0,writable:!0,enumerable:!0});var r={kind:"field"===e.kind?"field":"method",key:i,placement:e.static?"static":"field"===e.kind?"own":"prototype",descriptor:t};return e.decorators&&(r.decorators=e.decorators),"field"===e.kind&&(r.initializer=e.value),r}function d(e,t){void 0!==e.descriptor.get?t.descriptor.get=e.descriptor.get:t.descriptor.set=e.descriptor.set}function u(e){return e.decorators&&e.decorators.length}function p(e){return void 0!==e&&!(void 0===e.value&&void 0===e.writable)}function h(e,t){var i=e[t];if(void 0!==i&&"function"!=typeof i)throw new TypeError("Expected '"+t+"' to be a function");return i}function f(e){var t=function(e,t){if("object"!=typeof e||null===e)return e;var i=e[Symbol.toPrimitive];if(void 0!==i){var r=i.call(e,t||"default");if("object"!=typeof r)return r;throw new TypeError("@@toPrimitive must return a primitive value.")}return("string"===t?String:Number)(e)}(e,"string");return"symbol"==typeof t?t:String(t)}function m(e,t){(null==t||t>e.length)&&(t=e.length);for(var i=0,r=new Array(t);i<t;i++)r[i]=e[i];return r}function y(e,t,i){return y="undefined"!=typeof Reflect&&Reflect.get?Reflect.get:function(e,t,i){var r=function(e,t){for(;!Object.prototype.hasOwnProperty.call(e,t)&&null!==(e=v(e)););return e}(e,t);if(r){var o=Object.getOwnPropertyDescriptor(r,t);return o.get?o.get.call(i):o.value}},y(e,t,i||e)}function v(e){return v=Object.setPrototypeOf?Object.getPrototypeOf:function(e){return e.__proto__||Object.getPrototypeOf(e)},v(e)}const g=(e,t)=>n.dy` <div class="header_title">${t}</div> <ha-icon-button .label=${e.localize("ui.dialogs.generic.close")} .path=${"M19,6.41L17.59,5L12,10.59L6.41,5L5,6.41L10.59,12L5,17.59L6.41,19L12,13.41L17.59,19L19,17.59L13.41,12L19,6.41Z"} dialogAction="close" class="header_button" ></ha-icon-button> `;!function(e,t,i,r){var o=l();if(r)for(var n=0;n<r.length;n++)o=r[n](o);var a=t((function(e){o.initializeInstanceElements(e,s.elements)}),i),s=o.decorateClass(function(e){for(var t=[],i=function(e){return"method"===e.kind&&e.key===n.key&&e.placement===n.placement},r=0;r<e.length;r++){var o,n=e[r];if("method"===n.kind&&(o=t.find(i)))if(p(n.descriptor)||p(o.descriptor)){if(u(n)||u(o))throw new ReferenceError("Duplicated methods ("+n.key+") can't be decorated.");o.descriptor=n.descriptor}else{if(u(n)){if(u(o))throw new ReferenceError("Decorators can't be placed on different accessors with for the same property ("+n.key+").");o.decorators=n.decorators}d(n,o)}else t.push(n)}return t}(a.d.map(c)),e);o.initializeClassElements(a.F,s.elements),o.runClassFinishers(a.F,s.finishers)}([(0,a.Mo)("ha-dialog")],(function(e,t){class i extends t{constructor(...t){super(...t),e(this)}}return{F:i,d:[{kind:"field",key:s.gA,value:void 0},{kind:"method",key:"scrollToPos",value:function(e,t){var i;null===(i=this.contentElement)||void 0===i||i.scrollTo(e,t)}},{kind:"method",key:"renderHeading",value:function(){return n.dy`<slot name="heading"> ${y(v(i.prototype),"renderHeading",this).call(this)} </slot>`}},{kind:"field",static:!0,key:"styles",value:()=>[o.W,n.iv` .mdc-dialog { --mdc-dialog-scroll-divider-color: var(--divider-color); z-index: var(--dialog-z-index, 7); -webkit-backdrop-filter: var(--dialog-backdrop-filter, none); backdrop-filter: var(--dialog-backdrop-filter, none); --mdc-dialog-box-shadow: var(--dialog-box-shadow, none); --mdc-typography-headline6-font-weight: 400; --mdc-typography-headline6-font-size: 1.574rem; } .mdc-dialog__actions { justify-content: var(--justify-action-buttons, flex-end); padding-bottom: max(env(safe-area-inset-bottom), 24px); } .mdc-dialog__actions span:nth-child(1) { flex: var(--secondary-action-button-flex, unset); } .mdc-dialog__actions span:nth-child(2) { flex: var(--primary-action-button-flex, unset); } .mdc-dialog__container { align-items: var(--vertial-align-dialog, center); } .mdc-dialog__title { padding: 24px 24px 0 24px; } .mdc-dialog__actions { padding: 0 24px 24px 24px; } .mdc-dialog__title::before { display: block; height: 0px; } .mdc-dialog .mdc-dialog__content { position: var(--dialog-content-position, relative); padding: var(--dialog-content-padding, 24px); } :host([hideactions]) .mdc-dialog .mdc-dialog__content { padding-bottom: max( var(--dialog-content-padding, 24px), env(safe-area-inset-bottom) ); } .mdc-dialog .mdc-dialog__surface { position: var(--dialog-surface-position, relative); top: var(--dialog-surface-top); margin-top: var(--dialog-surface-margin-top); min-height: var(--mdc-dialog-min-height, auto); border-radius: var(--ha-dialog-border-radius, 28px); } :host([flexContent]) .mdc-dialog .mdc-dialog__content { display: flex; flex-direction: column; } .header_button { position: absolute; right: 16px; top: 10px; text-decoration: none; color: inherit; } .header_title { margin-right: 32px; margin-inline-end: 32px; margin-inline-start: initial; direction: var(--direction); } .header_button { inset-inline-start: initial; inset-inline-end: 16px; direction: var(--direction); } .dialog-actions { inset-inline-start: initial !important; inset-inline-end: 0px !important; direction: var(--direction); } `]}]}}),r.M)},21897:(e,t,i)=>{i.d(t,{_F:()=>o,js:()=>n,yz:()=>s,yO:()=>l});var r=i(63864);const o=e=>e.data,n=e=>"object"==typeof e?"object"==typeof e.body?e.body.message||"Unknown error, see supervisor logs":e.body||e.message||"Unknown error, see supervisor logs":e,a=new Set([502,503,504]),s=e=>!!(e&&e.status_code&&a.has(e.status_code))||!(!e||!e.message||!e.message.includes("ERR_CONNECTION_CLOSED")&&!e.message.includes("ERR_CONNECTION_RESET")),l=async(e,t)=>(0,r.I)(e.config.version,2021,2,4)?e.callWS({type:"supervisor/api",endpoint:`/${t}/stats`,method:"get"}):o(await e.callApi("GET",`saserver/${t}/stats`))},46884:(e,t,i)=>{i.d(t,{I:()=>n});var r=i(63864),o=i(21897);const n=async e=>(0,r.I)(e.config.version,2021,2,4)?e.callWS({type:"supervisor/api",endpoint:"/resolution/info",method:"get"}):(0,o._F)(await e.callApi("GET","saserver/resolution/info"))},56799:(e,t,i)=>{i.d(t,{V:()=>r});const r=(e,t)=>{let i={};const r=e.connection.subscribeMessage((e=>{if("initial"===e.type)return i=e.data,void t(i);"finish"!==e.type?(i={...i,[e.domain]:{...i[e.domain],info:{...i[e.domain].info,[e.key]:e.success?e.data:{error:!0,value:e.error.msg}}}},t(i)):r.then((e=>e()))}),{type:"system_health/info"});return r}},26765:(e,t,i)=>{i.d(t,{Ys:()=>a,g7:()=>s,D9:()=>l});var r=i(47181);const o=()=>Promise.all([i.e(85084),i.e(1281)]).then(i.bind(i,1281)),n=(e,t,i)=>new Promise((n=>{const a=t.cancel,s=t.confirm;(0,r.B)(e,"show-dialog",{dialogTag:"dialog-box",dialogImport:o,dialogParams:{...t,...i,cancel:()=>{n(!(null==i||!i.prompt)&&null),a&&a()},confirm:e=>{n(null==i||!i.prompt||e),s&&s(e)}}})})),a=(e,t)=>n(e,t),s=(e,t)=>n(e,t,{confirmation:!0}),l=(e,t)=>n(e,t,{prompt:!0})},49426:(e,t,i)=>{i.a(e,(async e=>{i.r(t),i.d(t,{UNSUPPORTED_REASON_URL:()=>P,UNHEALTHY_REASON_URL:()=>T});i(51187);var r=i(37500),o=i(33310),n=i(7323),a=i(44583),s=i(47181),l=i(50577),c=i(60893),d=(i(9381),i(22098),i(34821)),u=(i(2130),i(21897)),p=i(46884),h=i(5986),f=i(56799),m=i(26765),y=i(11654),v=i(27322),g=i(81796),b=(i(31206),e([a]));function w(){w=function(){return e};var e={elementsDefinitionOrder:[["method"],["field"]],initializeInstanceElements:function(e,t){["method","field"].forEach((function(i){t.forEach((function(t){t.kind===i&&"own"===t.placement&&this.defineClassElement(e,t)}),this)}),this)},initializeClassElements:function(e,t){var i=e.prototype;["method","field"].forEach((function(r){t.forEach((function(t){var o=t.placement;if(t.kind===r&&("static"===o||"prototype"===o)){var n="static"===o?e:i;this.defineClassElement(n,t)}}),this)}),this)},defineClassElement:function(e,t){var i=t.descriptor;if("field"===t.kind){var r=t.initializer;i={enumerable:i.enumerable,writable:i.writable,configurable:i.configurable,value:void 0===r?void 0:r.call(e)}}Object.defineProperty(e,t.key,i)},decorateClass:function(e,t){var i=[],r=[],o={static:[],prototype:[],own:[]};if(e.forEach((function(e){this.addElementPlacement(e,o)}),this),e.forEach((function(e){if(!E(e))return i.push(e);var t=this.decorateElement(e,o);i.push(t.element),i.push.apply(i,t.extras),r.push.apply(r,t.finishers)}),this),!t)return{elements:i,finishers:r};var n=this.decorateConstructor(i,t);return r.push.apply(r,n.finishers),n.finishers=r,n},addElementPlacement:function(e,t,i){var r=t[e.placement];if(!i&&-1!==r.indexOf(e.key))throw new TypeError("Duplicated element ("+e.key+")");r.push(e.key)},decorateElement:function(e,t){for(var i=[],r=[],o=e.decorators,n=o.length-1;n>=0;n--){var a=t[e.placement];a.splice(a.indexOf(e.key),1);var s=this.fromElementDescriptor(e),l=this.toElementFinisherExtras((0,o[n])(s)||s);e=l.element,this.addElementPlacement(e,t),l.finisher&&r.push(l.finisher);var c=l.extras;if(c){for(var d=0;d<c.length;d++)this.addElementPlacement(c[d],t);i.push.apply(i,c)}}return{element:e,finishers:r,extras:i}},decorateConstructor:function(e,t){for(var i=[],r=t.length-1;r>=0;r--){var o=this.fromClassDescriptor(e),n=this.toClassDescriptor((0,t[r])(o)||o);if(void 0!==n.finisher&&i.push(n.finisher),void 0!==n.elements){e=n.elements;for(var a=0;a<e.length-1;a++)for(var s=a+1;s<e.length;s++)if(e[a].key===e[s].key&&e[a].placement===e[s].placement)throw new TypeError("Duplicated element ("+e[a].key+")")}}return{elements:e,finishers:i}},fromElementDescriptor:function(e){var t={kind:e.kind,key:e.key,placement:e.placement,descriptor:e.descriptor};return Object.defineProperty(t,Symbol.toStringTag,{value:"Descriptor",configurable:!0}),"field"===e.kind&&(t.initializer=e.initializer),t},toElementDescriptors:function(e){var t;if(void 0!==e)return(t=e,function(e){if(Array.isArray(e))return e}(t)||function(e){if("undefined"!=typeof Symbol&&null!=e[Symbol.iterator]||null!=e["@@iterator"])return Array.from(e)}(t)||function(e,t){if(e){if("string"==typeof e)return D(e,t);var i=Object.prototype.toString.call(e).slice(8,-1);return"Object"===i&&e.constructor&&(i=e.constructor.name),"Map"===i||"Set"===i?Array.from(e):"Arguments"===i||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(i)?D(e,t):void 0}}(t)||function(){throw new TypeError("Invalid attempt to destructure non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}()).map((function(e){var t=this.toElementDescriptor(e);return this.disallowProperty(e,"finisher","An element descriptor"),this.disallowProperty(e,"extras","An element descriptor"),t}),this)},toElementDescriptor:function(e){var t=String(e.kind);if("method"!==t&&"field"!==t)throw new TypeError('An element descriptor\'s .kind property must be either "method" or "field", but a decorator created an element descriptor with .kind "'+t+'"');var i=S(e.key),r=String(e.placement);if("static"!==r&&"prototype"!==r&&"own"!==r)throw new TypeError('An element descriptor\'s .placement property must be one of "static", "prototype" or "own", but a decorator created an element descriptor with .placement "'+r+'"');var o=e.descriptor;this.disallowProperty(e,"elements","An element descriptor");var n={kind:t,key:i,placement:r,descriptor:Object.assign({},o)};return"field"!==t?this.disallowProperty(e,"initializer","A method descriptor"):(this.disallowProperty(o,"get","The property descriptor of a field descriptor"),this.disallowProperty(o,"set","The property descriptor of a field descriptor"),this.disallowProperty(o,"value","The property descriptor of a field descriptor"),n.initializer=e.initializer),n},toElementFinisherExtras:function(e){return{element:this.toElementDescriptor(e),finisher:$(e,"finisher"),extras:this.toElementDescriptors(e.extras)}},fromClassDescriptor:function(e){var t={kind:"class",elements:e.map(this.fromElementDescriptor,this)};return Object.defineProperty(t,Symbol.toStringTag,{value:"Descriptor",configurable:!0}),t},toClassDescriptor:function(e){var t=String(e.kind);if("class"!==t)throw new TypeError('A class descriptor\'s .kind property must be "class", but a decorator created a class descriptor with .kind "'+t+'"');this.disallowProperty(e,"key","A class descriptor"),this.disallowProperty(e,"placement","A class descriptor"),this.disallowProperty(e,"descriptor","A class descriptor"),this.disallowProperty(e,"initializer","A class descriptor"),this.disallowProperty(e,"extras","A class descriptor");var i=$(e,"finisher");return{elements:this.toElementDescriptors(e.elements),finisher:i}},runClassFinishers:function(e,t){for(var i=0;i<t.length;i++){var r=(0,t[i])(e);if(void 0!==r){if("function"!=typeof r)throw new TypeError("Finishers must return a constructor.");e=r}}return e},disallowProperty:function(e,t,i){if(void 0!==e[t])throw new TypeError(i+" can't have a ."+t+" property.")}};return e}function k(e){var t,i=S(e.key);"method"===e.kind?t={value:e.value,writable:!0,configurable:!0,enumerable:!1}:"get"===e.kind?t={get:e.value,configurable:!0,enumerable:!1}:"set"===e.kind?t={set:e.value,configurable:!0,enumerable:!1}:"field"===e.kind&&(t={configurable:!0,writable:!0,enumerable:!0});var r={kind:"field"===e.kind?"field":"method",key:i,placement:e.static?"static":"field"===e.kind?"own":"prototype",descriptor:t};return e.decorators&&(r.decorators=e.decorators),"field"===e.kind&&(r.initializer=e.value),r}function _(e,t){void 0!==e.descriptor.get?t.descriptor.get=e.descriptor.get:t.descriptor.set=e.descriptor.set}function E(e){return e.decorators&&e.decorators.length}function x(e){return void 0!==e&&!(void 0===e.value&&void 0===e.writable)}function $(e,t){var i=e[t];if(void 0!==i&&"function"!=typeof i)throw new TypeError("Expected '"+t+"' to be a function");return i}function S(e){var t=function(e,t){if("object"!=typeof e||null===e)return e;var i=e[Symbol.toPrimitive];if(void 0!==i){var r=i.call(e,t||"default");if("object"!=typeof r)return r;throw new TypeError("@@toPrimitive must return a primitive value.")}return("string"===t?String:Number)(e)}(e,"string");return"symbol"==typeof t?t:String(t)}function D(e,t){(null==t||t>e.length)&&(t=e.length);for(var i=0,r=new Array(t);i<t;i++)r[i]=e[i];return r}a=(b.then?await b:b)[0];const z=(e,t)=>"smartautomatic"===e?-1:"smartautomatic"===t?1:e<t?-1:t<e?1:0,P={},T={privileged:"/more-info/unsupported/privileged"};!function(e,t,i,r){var o=w();if(r)for(var n=0;n<r.length;n++)o=r[n](o);var a=t((function(e){o.initializeInstanceElements(e,s.elements)}),i),s=o.decorateClass(function(e){for(var t=[],i=function(e){return"method"===e.kind&&e.key===n.key&&e.placement===n.placement},r=0;r<e.length;r++){var o,n=e[r];if("method"===n.kind&&(o=t.find(i)))if(x(n.descriptor)||x(o.descriptor)){if(E(n)||E(o))throw new ReferenceError("Duplicated methods ("+n.key+") can't be decorated.");o.descriptor=n.descriptor}else{if(E(n)){if(E(o))throw new ReferenceError("Decorators can't be placed on different accessors with for the same property ("+n.key+").");o.decorators=n.decorators}_(n,o)}else t.push(n)}return t}(a.d.map(k)),e);o.initializeClassElements(a.F,s.elements),o.runClassFinishers(a.F,s.finishers)}([(0,o.Mo)("dialog-system-information")],(function(e,t){return{F:class extends t{constructor(...t){super(...t),e(this)}},d:[{kind:"field",decorators:[(0,o.Cb)({attribute:!1})],key:"hass",value:void 0},{kind:"field",decorators:[(0,o.SB)()],key:"_systemInfo",value:void 0},{kind:"field",decorators:[(0,o.SB)()],key:"_resolutionInfo",value:void 0},{kind:"field",decorators:[(0,o.SB)()],key:"_supervisorStats",value:void 0},{kind:"field",decorators:[(0,o.SB)()],key:"_coreStats",value:void 0},{kind:"field",decorators:[(0,o.SB)()],key:"_opened",value:()=>!1},{kind:"field",key:"_subscriptions",value:void 0},{kind:"method",key:"showDialog",value:function(){this._opened=!0,this.hass.loadBackendTranslation("system_health"),this._subscribe()}},{kind:"method",key:"closeDialog",value:function(){this._opened=!1,this._unsubscribe(),(0,s.B)(this,"dialog-closed",{dialog:this.localName})}},{kind:"method",key:"_subscribe",value:function(){const e=[];(0,n.p)(this.hass,"system_health")&&e.push((0,f.V)(this.hass,(e=>{this._systemInfo=e}))),(0,n.p)(this.hass,"saserver")&&(e.push((0,c.T)(this.hass,(async()=>{this._supervisorStats=await(0,u.yO)(this.hass,"supervisor"),this._coreStats=await(0,u.yO)(this.hass,"core")}),1e4)),(0,p.I)(this.hass).then((e=>{this._resolutionInfo=e}))),this._subscriptions=e}},{kind:"method",key:"_unsubscribe",value:function(){for(;null!==(e=this._subscriptions)&&void 0!==e&&e.length;){var e;const t=this._subscriptions.pop();t instanceof Promise?t.then((e=>e())):t()}this._subscriptions=void 0,this._systemInfo=void 0,this._resolutionInfo=void 0,this._coreStats=void 0,this._supervisorStats=void 0}},{kind:"method",key:"render",value:function(){if(!this._opened)return r.dy``;const e=this._getSections();return r.dy` <ha-dialog open @closed=${this.closeDialog} scrimClickAction escapeKeyAction .heading=${(0,d.i)(this.hass,this.hass.localize("ui.panel.config.repairs.system_information"))} > <div> ${this._resolutionInfo?r.dy`${this._resolutionInfo.unhealthy.length?r.dy`<ha-alert alert-type="error"> ${this.hass.localize("ui.dialogs.unhealthy.title")} <mwc-button slot="action" .label=${this.hass.localize("ui.panel.config.common.learn_more")} @click=${this._unhealthyDialog} > </mwc-button ></ha-alert>`:""} ${this._resolutionInfo.unsupported.length?r.dy`<ha-alert alert-type="warning"> ${this.hass.localize("ui.dialogs.unsupported.title")} <mwc-button slot="action" .label=${this.hass.localize("ui.panel.config.common.learn_more")} @click=${this._unsupportedDialog} > </mwc-button> </ha-alert>`:""} `:""} <div>${e}</div> ${this._coreStats||this._supervisorStats?r.dy` <div> ${this._coreStats?r.dy` <h3> ${this.hass.localize("ui.panel.config.system_health.core_stats")} </h3> <ha-metric .heading=${this.hass.localize("ui.panel.config.system_health.cpu_usage")} .value=${this._coreStats.cpu_percent} ></ha-metric> <ha-metric .heading=${this.hass.localize("ui.panel.config.system_health.ram_usage")} .value=${this._coreStats.memory_percent} ></ha-metric> `:""} ${this._supervisorStats?r.dy` <h3> ${this.hass.localize("ui.panel.config.system_health.supervisor_stats")} </h3> <ha-metric .heading=${this.hass.localize("ui.panel.config.system_health.cpu_usage")} .value=${this._supervisorStats.cpu_percent} ></ha-metric> <ha-metric .heading=${this.hass.localize("ui.panel.config.system_health.ram_usage")} .value=${this._supervisorStats.memory_percent} ></ha-metric> `:""} </div> `:""} </div> <mwc-button slot="primaryAction" .label=${this.hass.localize("ui.panel.config.repairs.copy")} @click=${this._copyInfo} ></mwc-button> </ha-dialog> `}},{kind:"method",key:"_unsupportedDialog",value:async function(){await(0,m.Ys)(this,{title:this.hass.localize("ui.dialogs.unsupported.title"),text:r.dy`${this.hass.localize("ui.dialogs.unsupported.description")} <br /><br /> <ul> ${this._resolutionInfo.unsupported.map((e=>r.dy` <li> <a href=${(0,v.R)(this.hass,P[e]||`/more-info/unsupported/${e}`)} target="_blank" rel="noreferrer" > ${this.hass.localize(`ui.dialogs.unsupported.reason.${e}`)||e} </a> </li> `))} </ul>`})}},{kind:"method",key:"_unhealthyDialog",value:async function(){await(0,m.Ys)(this,{title:this.hass.localize("ui.dialogs.unhealthy.title"),text:r.dy`${this.hass.localize("ui.dialogs.unhealthy.description")} <br /><br /> <ul> ${this._resolutionInfo.unhealthy.map((e=>r.dy` <li> <a href=${(0,v.R)(this.hass,T[e]||`/more-info/unhealthy/${e}`)} target="_blank" rel="noreferrer" > ${this.hass.localize(`ui.dialogs.unhealthy.reason.${e}`)||e} </a> </li> `))} </ul>`})}},{kind:"method",key:"_getSections",value:function(){const e=[];if(this._systemInfo){const t=Object.keys(this._systemInfo).sort(z);for(const i of t){const t=this._systemInfo[i],o=[];for(const e of Object.keys(t.info)){let n;if(t.info[e]&&"object"==typeof t.info[e]){const i=t.info[e];"pending"===i.type?n=r.dy` <ha-circular-progress active size="tiny"></ha-circular-progress> `:"failed"===i.type?n=r.dy` <span class="error">${i.error}</span>${i.more_info?r.dy` – <a href=${i.more_info} target="_blank" rel="noreferrer noopener" > ${this.hass.localize("ui.panel.config.info.system_health.more_info")} </a> `:""} `:"date"===i.type&&(n=(0,a.o0)(new Date(i.value),this.hass.locale))}else n=t.info[e];o.push(r.dy` <tr> <td> ${this.hass.localize(`component.${i}.system_health.info.${e}`)||e} </td> <td>${n}</td> </tr> `)}"smartautomatic"!==i&&e.push(r.dy` <div class="card-header"> <h3>${(0,h.Lh)(this.hass.localize,i)}</h3> ${t.manage_url?r.dy` <a class="manage" href=${t.manage_url}> <mwc-button> ${this.hass.localize("ui.panel.config.info.system_health.manage")} </mwc-button> </a> `:""} </div> `),e.push(r.dy` <table> ${o} </table> `)}}else e.push(r.dy` <div class="loading-container"> <ha-circular-progress active></ha-circular-progress> </div> `);return e}},{kind:"method",key:"_copyInfo",value:async function(){let e;const t=[];for(const i of Object.keys(this._systemInfo).sort(z)){const r=this._systemInfo[i];let o=!0;const n=[""+("smartautomatic"!==i?`<details><summary>${(0,h.Lh)(this.hass.localize,i)}</summary>\n`:"")];for(const e of Object.keys(r.info)){let t;if(r.info[e]&&"object"==typeof r.info[e]){const i=r.info[e];"pending"===i.type?t="pending":"failed"===i.type?t=`failed to load: ${i.error}`:"date"===i.type&&(t=(0,a.o0)(new Date(i.value),this.hass.locale))}else t=r.info[e];o?(n.push(`${e} | ${t}\n-- | --`),o=!1):n.push(`${e} | ${t}`)}"smartautomatic"===i?e=n.join("\n"):(t.push(n.join("\n")),"smartautomatic"!==i&&t.push("</details>"))}await(0,l.v)(`## System Information\n${e}\n\n${t.join("\n\n")}`),(0,g.C)(this,{message:this.hass.localize("ui.common.copied_clipboard")})}},{kind:"field",static:!0,key:"styles",value:()=>[y.yu,r.iv` ha-alert { margin-bottom: 16px; display: block; } table { width: 100%; } td:first-child { width: 45%; } td:last-child { direction: ltr; } .loading-container { display: flex; align-items: center; justify-content: center; } .card-header { justify-content: space-between; display: flex; align-items: center; } .error { color: var(--error-color); } a.manage { text-decoration: none; } `]}]}}),r.oi)}))},27322:(e,t,i)=>{i.d(t,{R:()=>r});const r=(e,t)=>`https://${e.config.version.includes("b")?"rc":e.config.version.includes("dev")?"next":"www"}.smartautomatic.duckdns.org:8091${t}`}}]); //# sourceMappingURL=d3398bcb.js.map
PypiClean
/monk_keras_cuda100_test-0.0.1-py3-none-any.whl/monk/gluon/finetune/level_8_layers_main.py
import math from monk.gluon.finetune.imports import * from monk.system.imports import * from monk.gluon.finetune.level_7_aux_main import prototype_aux class prototype_layers(prototype_aux): ''' Main class for all layers and activations - Layers and activations while appending to base network - Layers and activations while creating custom network Args: verbose (int): Set verbosity levels 0 - Print Nothing 1 - Print desired details ''' @accepts("self", verbose=int, post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def __init__(self, verbose=1): super().__init__(verbose=verbose); ##################################################################################################################################### @warning_checks(None, num_neurons=["lt", 10000], final_layer=None, post_trace=False) @error_checks(None, num_neurons=["gt", 0], final_layer=None, post_trace=False) @accepts("self", num_neurons=[int, bool], final_layer=bool, post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def append_linear(self, num_neurons=False, final_layer=False): ''' Append dense (fully connected) layer to base network in transfer learning Args: num_neurons (int): Number of neurons in the dense layer final_layer (bool): If True, then number of neurons are directly set as number of classes in dataset for single label type classification Returns: None ''' if(self.system_dict["model"]["final_layer"]): msg = "Cannot append more layers.\n"; msg += "Tip: Previously appended layer termed as final layer"; raise ConstraintError(msg); else: if(not num_neurons): num_neurons = self.system_dict["dataset"]["params"]["num_classes"]; self.system_dict = layer_linear(self.system_dict, num_neurons=num_neurons, final_layer=final_layer); ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, probability=["lt", 0.7], final_layer=None, post_trace=False) @error_checks(None, probability=["gt", 0, "lt", 1], final_layer=None, post_trace=False) @accepts("self", probability=float, final_layer=bool, post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def append_dropout(self, probability=0.5, final_layer=False): ''' Append dropout layer to base network in transfer learning Args: probability (float): Droping probability of neurons in next layer final_layer (bool): Indicator that this layer marks the end of network. Returns: None ''' if(self.system_dict["model"]["final_layer"]): msg = "Cannot append more layers.\n"; msg += "Tip: Previously appended layer termed as final layer"; raise ConstraintError(msg); else: self.system_dict = layer_dropout(self.system_dict, probability=probability, final_layer=final_layer); ##################################################################################################################################### ##################################################################################################################################### @accepts("self", final_layer=bool, post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def append_relu(self, final_layer=False): ''' Append rectified linear unit activation to base network in transfer learning Args: final_layer (bool): Indicator that this layer marks the end of network. Returns: None ''' if(self.system_dict["model"]["final_layer"]): msg = "Cannot append more layers.\n"; msg += "Tip: Previously appended layer termed as final layer"; raise ConstraintError(msg); else: self.system_dict = activation_relu(self.system_dict, final_layer=final_layer); ##################################################################################################################################### ##################################################################################################################################### @accepts("self", final_layer=bool, post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def append_sigmoid(self, final_layer=False): ''' Append sigmoid activation to base network in transfer learning Args: final_layer (bool): Indicator that this layer marks the end of network. Returns: None ''' if(self.system_dict["model"]["final_layer"]): msg = "Cannot append more layers.\n"; msg += "Tip: Previously appended layer termed as final layer"; raise ConstraintError(msg); else: self.system_dict = activation_sigmoid(self.system_dict, final_layer=final_layer); ##################################################################################################################################### ##################################################################################################################################### @accepts("self", final_layer=bool, post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def append_tanh(self, final_layer=False): ''' Append tanh activation to base network in transfer learning Args: final_layer (bool): Indicator that this layer marks the end of network. Returns: None ''' if(self.system_dict["model"]["final_layer"]): msg = "Cannot append more layers.\n"; msg += "Tip: Previously appended layer termed as final layer"; raise ConstraintError(msg); else: self.system_dict = activation_tanh(self.system_dict, final_layer=final_layer); ##################################################################################################################################### ##################################################################################################################################### @accepts("self", beta=[float, int], threshold=[float, int], final_layer=bool, post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def append_softplus(self, beta=1, threshold=20, final_layer=False): ''' Append softplus activation to base network in transfer learning Args: beta (int): Multiplicative factor threshold (int): softplus (thresholded relu) limit final_layer (bool): Indicator that this layer marks the end of network. Returns: None ''' if(self.system_dict["model"]["final_layer"]): msg = "Cannot append more layers.\n"; msg += "Tip: Previously appended layer termed as final layer"; raise ConstraintError(msg); else: self.system_dict = activation_softplus(self.system_dict, beta=1, threshold=20, final_layer=False); ##################################################################################################################################### ##################################################################################################################################### @accepts("self", final_layer=bool, post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def append_softsign(self, final_layer=False): ''' Append softsign activation to base network in transfer learning Args: final_layer (bool): Indicator that this layer marks the end of network. Returns: None ''' if(self.system_dict["model"]["final_layer"]): msg = "Cannot append more layers.\n"; msg += "Tip: Previously appended layer termed as final layer"; raise ConstraintError(msg); else: self.system_dict = activation_softsign(self.system_dict, final_layer=final_layer); ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, negative_slope=["lt", 0.2], final_layer=None, post_trace=False) @error_checks(None, negative_slope=["gt", 0], final_layer=None, post_trace=False) @accepts("self", negative_slope=[float, int], final_layer=bool, post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def append_leakyrelu(self, negative_slope=0.01, final_layer=False): ''' Append Leaky - ReLU activation to base network in transfer learning Args: negative_slope (float): Multiplicatve factor towards negative spectrum of real numbers. final_layer (bool): Indicator that this layer marks the end of network. Returns: None ''' if(self.system_dict["model"]["final_layer"]): msg = "Cannot append more layers.\n"; msg += "Tip: Previously appended layer termed as final layer"; raise ConstraintError(msg); else: self.system_dict = activation_leakyrelu(self.system_dict, negative_slope=negative_slope, final_layer=final_layer); ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, num_parameters=["lt", 1], final_layer=None, post_trace=False) @error_checks(None, num_parameters=["gt", 0], init=["gt", 0], final_layer=None, post_trace=False) @accepts("self", num_parameters=int, init=[int, float], final_layer=bool, post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def append_prelu(self, num_parameters=1, init=0.25, final_layer=False): ''' Append Learnable parameerized rectified linear unit activation to base network in transfer learning Args: init (float): Initialization value for multiplicatve factor towards negative spectrum of real numbers. final_layer (bool): Indicator that this layer marks the end of network. Returns: None ''' if(self.system_dict["model"]["final_layer"]): msg = "Cannot append more layers.\n"; msg += "Tip: Previously appended layer termed as final layer"; raise ConstraintError(msg); else: self.system_dict = activation_prelu(self.system_dict, num_parameters=num_parameters, init=init, final_layer=final_layer); ##################################################################################################################################### ##################################################################################################################################### @accepts("self", alpha=[float, int], final_layer=bool, post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def append_elu(self, alpha=1.0, final_layer=False): ''' Append exponential linear unit activation to base network in transfer learning Args: alpha (float): Multiplicatve factor. final_layer (bool): Indicator that this layer marks the end of network. Returns: None ''' if(self.system_dict["model"]["final_layer"]): msg = "Cannot append more layers.\n"; msg += "Tip: Previously appended layer termed as final layer"; raise ConstraintError(msg); else: self.system_dict = activation_elu(self.system_dict, alpha=alpha, final_layer=final_layer); ##################################################################################################################################### ##################################################################################################################################### @accepts("self", final_layer=bool, post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def append_selu(self, final_layer=False): ''' Append scaled exponential linear unit activation to base network in transfer learning Args: final_layer (bool): Indicator that this layer marks the end of network. Returns: None ''' if(self.system_dict["model"]["final_layer"]): msg = "Cannot append more layers.\n"; msg += "Tip: Previously appended layer termed as final layer"; raise ConstraintError(msg); else: self.system_dict = activation_selu(self.system_dict, final_layer=final_layer); ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, beta=["lt", 2], final_layer=None, post_trace=False) @error_checks(None, beta=["gt", 0], final_layer=None, post_trace=False) @accepts("self", beta=[float, int], final_layer=bool, post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def append_swish(self, beta=1.0, final_layer=False): ''' Append swish activation to base network in transfer learning Args: beta (bool): Multiplicative factor final_layer (bool): Indicator that this layer marks the end of network. Returns: None ''' if(self.system_dict["model"]["final_layer"]): msg = "Cannot append more layers.\n"; msg += "Tip: Previously appended layer termed as final layer"; raise ConstraintError(msg); else: self.system_dict = activation_swish(self.system_dict, beta=beta, final_layer=final_layer); ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, output_channels=["lt", 2048], kernel_size=["lt", 16], stride=["lt", 16], padding=None, groups=None, dilation=None, use_bias=None, layout=None, uid=None, post_trace=False) @error_checks(None, output_channels=["gt", 0], kernel_size=["gt", 0], stride=["gte", 1], padding=None, groups=["gte", 1], dilation=["gte", 1], use_bias=None, layout=["eq", "NCW"], uid=None, post_trace=False) @accepts("self", output_channels=int, kernel_size=int, stride=int, padding=[str, int], groups=int, dilation=int, use_bias=bool, layout=str, uid=[type(None), str], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def convolution1d(self, output_channels=3, kernel_size=3, stride=1, padding="in_eq_out", groups=1, dilation=1, use_bias=True, layout='NCW', uid=None): ''' Append 1d-convolution to custom network Args: output_channels (int): Number of output features for this layer kernel_size (int, tuple): kernel matrix size stride (int, tuple): kernel movement stride padding (int, tuple, str): Zero padding applied to input 1) "in_eq_out": Automated padding applied to keep output shape same as input 2) integer or tuple value: Manually add padding groups (int): Number of groups for grouped convolution dilation (int): Factor for dilated convolution use_bias (bool): If True, learnable bias is added layout (str): Either of these values (order) 1) 'NCW' - order 2) 'NWC' - order - N: Number of elements in batches - C: Number of channels - W: Number of features in layers uid (str): Unique name for layer, if not mentioned then dynamically assigned Returns: dict: Containing all the parameters set as per function arguments ''' tmp={}; tmp["uid"] = uid; tmp["name"] = "convolution1d"; tmp["params"] = {}; tmp["params"]["output_channels"] = output_channels; tmp["params"]["kernel_size"] = kernel_size; tmp["params"]["stride"] = stride; tmp["params"]["padding"] = padding; tmp["params"]["groups"] = groups; tmp["params"]["dilation"] = dilation; tmp["params"]["use_bias"] = use_bias; tmp["params"]["layout"] = layout; return tmp; ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, output_channels=["lt", 2048], kernel_size=["lt", 16], stride=["lt", 16], padding=None, groups=None, dilation=None, use_bias=None, layout=None, uid=None, post_trace=False) @error_checks(None, output_channels=["gt", 0], kernel_size=["gt", 0], stride=["gte", 1], padding=None, groups=["gte", 1], dilation=["gte", 1], use_bias=None, layout=["eq", "NCHW"], uid=None, post_trace=False) @accepts("self", output_channels=int, kernel_size=[int, tuple], stride=[int, tuple], padding=[str, int, tuple], groups=int, dilation=int, use_bias=bool, layout=str, uid=[type(None), str], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def convolution2d(self, output_channels=3, kernel_size=3, stride=1, padding="in_eq_out", groups=1, dilation=1, use_bias=True, layout='NCHW', uid=None): ''' Append 2d-convolution to custom network Args: output_channels (int): Number of output features for this layer kernel_size (int, tuple): kernel matrix size stride (int, tuple): kernel movement stride padding (int, tuple, str): Zero padding applied to input 1) "in_eq_out": Automated padding applied to keep output shape same as input 2) integer or tuple value: Manually add padding groups (int): Number of groups for grouped convolution dilation (int): Factor for dilated convolution use_bias (bool): If True, learnable bias is added layout (str): Either of these values (order) 1) 'NCHW' - Order 2) 'NHWC' - Order - N: Number of elements in batches - C: Number of channels - H: Height of features in layers - W: Number of features in layers uid (str): Unique name for layer, if not mentioned then dynamically assigned Returns: dict: Containing all the parameters set as per function arguments ''' tmp={}; tmp["uid"] = uid; tmp["name"] = "convolution2d"; tmp["params"] = {}; tmp["params"]["output_channels"] = output_channels; tmp["params"]["kernel_size"] = kernel_size; tmp["params"]["stride"] = stride; tmp["params"]["padding"] = padding; tmp["params"]["groups"] = groups; tmp["params"]["dilation"] = dilation; tmp["params"]["use_bias"] = use_bias; tmp["params"]["layout"] = layout; return tmp; ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, output_channels=["lt", 2048], kernel_size=["lt", 16], stride=["lt", 16], padding=None, groups=None, dilation=None, use_bias=None, layout=None, uid=None, post_trace=False) @error_checks(None, output_channels=["gt", 0], kernel_size=["gt", 0], stride=["gte", 1], padding=None, groups=["gte", 1], dilation=["gte", 1], use_bias=None, layout=["eq", "NCHW"], uid=None, post_trace=False) @accepts("self", output_channels=int, kernel_size=[int, tuple], stride=[int, tuple], padding=[str, int, tuple], groups=int, dilation=int, use_bias=bool, layout=str, uid=[None, str], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def convolution(self, output_channels=3, kernel_size=3, stride=1, padding="in_eq_out", groups=1, dilation=1, use_bias=True, layout='NCHW', uid=None): ''' Append 2d-convolution to custom network Args: output_channels (int): Number of output features for this layer kernel_size (int, tuple): kernel matrix size stride (int, tuple): kernel movement stride padding (int, tuple, str): Zero padding applied to input 1) "in_eq_out": Automated padding applied to keep output shape same as input 2) integer or tuple value: Manually add padding groups (int): Number of groups for grouped convolution dilation (int): Factor for dilated convolution use_bias (bool): If True, learnable bias is added layout (str): Either of these values (order) 1) 'NCHW' - Order 2) 'NHWC' - Order - N: Number of elements in batches - C: Number of channels - H: Height of features in layers - W: Number of features in layers uid (str): Unique name for layer, if not mentioned then dynamically assigned Returns: dict: Containing all the parameters set as per function arguments ''' tmp={}; tmp["uid"] = uid; tmp["name"] = "convolution2d"; tmp["params"] = {}; tmp["params"]["output_channels"] = output_channels; tmp["params"]["kernel_size"] = kernel_size; tmp["params"]["stride"] = stride; tmp["params"]["padding"] = padding; tmp["params"]["groups"] = groups; tmp["params"]["dilation"] = dilation; tmp["params"]["use_bias"] = use_bias; tmp["params"]["layout"] = layout; return tmp; ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, output_channels=["lt", 2048], kernel_size=["lt", 16], stride=["lt", 16], padding=None, groups=None, dilation=None, use_bias=None, layout=None, uid=None, post_trace=False) @error_checks(None, output_channels=["gt", 0], kernel_size=["gt", 0], stride=["gte", 1], padding=None, groups=["gte", 1], dilation=["gte", 1], use_bias=None, layout=["eq", "NCDHW"], uid=None, post_trace=False) @accepts("self", output_channels=int, kernel_size=int, stride=int, padding=[str, int], groups=int, dilation=int, use_bias=bool, layout=str, uid=[None, str], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def convolution3d(self, output_channels=3, kernel_size=3, stride=1, padding="in_eq_out", groups=1, dilation=1, use_bias=True, layout='NCDHW', uid=None): ''' Append 3d-convolution to custom network Args: output_channels (int): Number of output features for this layer kernel_size (int, tuple): kernel matrix size stride (int, tuple): kernel movement stride padding (int, tuple, str): Zero padding applied to input 1) "in_eq_out": Automated padding applied to keep output shape same as input 2) integer or tuple value: Manually add padding groups (int): Number of groups for grouped convolution dilation (int): Factor for dilated convolution use_bias (bool): If True, learnable bias is added layout (str): Either of these values (order) 1) 'NCDHW' - Order 2) 'NDHWC' - Order - N: Number of elements in batches - C: Number of channels - D: Depth of features in layers - H: Height of features in layers - W: Number of features in layers uid (str): Unique name for layer, if not mentioned then dynamically assigned Returns: dict: Containing all the parameters set as per function arguments ''' tmp={}; tmp["uid"] = uid; tmp["name"] = "convolution3d"; tmp["params"] = {}; tmp["params"]["output_channels"] = output_channels; tmp["params"]["kernel_size"] = kernel_size; tmp["params"]["stride"] = stride; tmp["params"]["padding"] = padding; tmp["params"]["groups"] = groups; tmp["params"]["dilation"] = dilation; tmp["params"]["use_bias"] = use_bias; tmp["params"]["layout"] = layout; return tmp; ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, output_channels=["lt", 2048], kernel_size=["lt", 16], stride=["lt", 16], padding=None, output_padding=None, groups=None, dilation=None, use_bias=None, layout=None, uid=None, post_trace=False) @error_checks(None, output_channels=["gt", 0], kernel_size=["gt", 0], stride=["gte", 1], padding=None, output_padding=["gte, 0"], groups=["gte", 1], dilation=["gte", 1], use_bias=None, layout=["eq", "NCW"], uid=None, post_trace=False) @accepts("self", output_channels=int, kernel_size=int, stride=int, padding=[str, int], output_padding=int, groups=int, dilation=int, use_bias=bool, layout=str, uid=[None, str], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def transposed_convolution1d(self, output_channels=3, kernel_size=3, stride=1, padding="in_eq_out", output_padding=0, groups=1, dilation=1, use_bias=True, layout='NCW', uid=None): ''' Append 1d-transposed-convolution to custom network Args: output_channels (int): Number of output features for this layer kernel_size (int, tuple): kernel matrix size stride (int, tuple): kernel movement stride padding (int, tuple, str): Zero padding applied to input 1) "in_eq_out": Automated padding applied to keep output shape same as input 2) integer or tuple value: Manually add padding output_padding (int): Additional padding applied to output groups (int): Number of groups for grouped convolution dilation (int): Factor for dilated convolution use_bias (bool): If True, learnable bias is added layout (str): Either of these values (order) 1) 'NCW' - order 2) 'NWC' - order - N: Number of elements in batches - C: Number of channels - W: Number of features in layers uid (str): Unique name for layer, if not mentioned then dynamically assigned Returns: dict: Containing all the parameters set as per function arguments ''' tmp={}; tmp["uid"] = uid; tmp["name"] = "transposed_convolution1d"; tmp["params"] = {}; tmp["params"]["output_channels"] = output_channels; tmp["params"]["kernel_size"] = kernel_size; tmp["params"]["stride"] = stride; tmp["params"]["padding"] = padding; tmp["params"]["output_padding"] = output_padding; tmp["params"]["groups"] = groups; tmp["params"]["dilation"] = dilation; tmp["params"]["use_bias"] = use_bias; tmp["params"]["layout"] = layout; return tmp; ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, output_channels=["lt", 2048], kernel_size=["lt", 16], stride=["lt", 16], padding=None, output_padding=None, groups=None, dilation=None, use_bias=None, layout=None, uid=None, post_trace=False) @error_checks(None, output_channels=["gt", 0], kernel_size=["gt", 0], stride=["gte", 1], padding=None, output_padding=["gte, 0"], groups=["gte", 1], dilation=["gte", 1], use_bias=None, layout=["eq", "NCHW"], uid=None, post_trace=False) @accepts("self", output_channels=int, kernel_size=int, stride=int, padding=[str, int], output_padding=int, groups=int, dilation=int, use_bias=bool, layout=str, uid=[None, str], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def transposed_convolution(self, output_channels=3, kernel_size=3, stride=1, padding="in_eq_out", output_padding=0, groups=1, dilation=1, use_bias=True, layout='NCHW', uid=None): ''' Append 2d-transposed-convolution to custom network Args: output_channels (int): Number of output features for this layer kernel_size (int, tuple): kernel matrix size stride (int, tuple): kernel movement stride padding (int, tuple, str): Zero padding applied to input 1) "in_eq_out": Automated padding applied to keep output shape same as input 2) integer or tuple value: Manually add padding output_padding (int): Additional padding applied to output groups (int): Number of groups for grouped convolution dilation (int): Factor for dilated convolution use_bias (bool): If True, learnable bias is added layout (str): Either of these values (order) 1) 'NCHW' - Order 2) 'NHWC' - Order - N: Number of elements in batches - C: Number of channels - H: Height of features in layers - W: Number of features in layers uid (str): Unique name for layer, if not mentioned then dynamically assigned Returns: dict: Containing all the parameters set as per function arguments ''' tmp={}; tmp["uid"] = uid; tmp["name"] = "transposed_convolution2d"; tmp["params"] = {}; tmp["params"]["output_channels"] = output_channels; tmp["params"]["kernel_size"] = kernel_size; tmp["params"]["stride"] = stride; tmp["params"]["padding"] = padding; tmp["params"]["output_padding"] = output_padding; tmp["params"]["groups"] = groups; tmp["params"]["dilation"] = dilation; tmp["params"]["use_bias"] = use_bias; tmp["params"]["layout"] = layout; return tmp; ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, output_channels=["lt", 2048], kernel_size=["lt", 16], stride=["lt", 16], padding=None, output_padding=None, groups=None, dilation=None, use_bias=None, layout=None, uid=None, post_trace=False) @error_checks(None, output_channels=["gt", 0], kernel_size=["gt", 0], stride=["gte", 1], padding=None, output_padding=["gte, 0"], groups=["gte", 1], dilation=["gte", 1], use_bias=None, layout=["eq", "NCHW"], uid=None, post_trace=False) @accepts("self", output_channels=int, kernel_size=int, stride=int, padding=[str, int], output_padding=int, groups=int, dilation=int, use_bias=bool, layout=str, uid=[None, str], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def transposed_convolution2d(self, output_channels=3, kernel_size=3, stride=1, padding="in_eq_out", output_padding=0, groups=1, dilation=1, use_bias=True, layout='NCHW', uid=None): ''' Append 2d-transposed-convolution to custom network Args: output_channels (int): Number of output features for this layer kernel_size (int, tuple): kernel matrix size stride (int, tuple): kernel movement stride padding (int, tuple, str): Zero padding applied to input 1) "in_eq_out": Automated padding applied to keep output shape same as input 2) integer or tuple value: Manually add padding output_padding (int): Additional padding applied to output groups (int): Number of groups for grouped convolution dilation (int): Factor for dilated convolution use_bias (bool): If True, learnable bias is added layout (str): Either of these values (order) 1) 'NCHW' - Order 2) 'NHWC' - Order - N: Number of elements in batches - C: Number of channels - H: Height of features in layers - W: Number of features in layers uid (str): Unique name for layer, if not mentioned then dynamically assigned Returns: dict: Containing all the parameters set as per function arguments ''' tmp={}; tmp["uid"] = uid; tmp["name"] = "transposed_convolution2d"; tmp["params"] = {}; tmp["params"]["output_channels"] = output_channels; tmp["params"]["kernel_size"] = kernel_size; tmp["params"]["stride"] = stride; tmp["params"]["padding"] = padding; tmp["params"]["output_padding"] = output_padding; tmp["params"]["groups"] = groups; tmp["params"]["dilation"] = dilation; tmp["params"]["use_bias"] = use_bias; tmp["params"]["layout"] = layout; return tmp; ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, output_channels=["lt", 2048], kernel_size=["lt", 16], stride=["lt", 16], padding=None, output_padding=None, groups=None, dilation=None, use_bias=None, layout=None, uid=None, post_trace=False) @error_checks(None, output_channels=["gt", 0], kernel_size=["gt", 0], stride=["gte", 1], padding=None, output_padding=["gte, 0"], groups=["gte", 1], dilation=["gte", 1], use_bias=None, layout=["eq", "NCDHW"], uid=None, post_trace=False) @accepts("self", output_channels=int, kernel_size=int, stride=int, padding=[str, int], output_padding=int, groups=int, dilation=int, use_bias=bool, layout=str, uid=[None, str], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def transposed_convolution3d(self, output_channels=3, kernel_size=3, stride=1, padding="in_eq_out", output_padding=0, groups=1, dilation=1, use_bias=True, layout='NCDHW', uid=None): ''' Append 3d-transposed-convolution to custom network Args: output_channels (int): Number of output features for this layer kernel_size (int, tuple): kernel matrix size stride (int, tuple): kernel movement stride padding (int, tuple, str): Zero padding applied to input 1) "in_eq_out": Automated padding applied to keep output shape same as input 2) integer or tuple value: Manually add padding output_padding (int): Additional padding applied to output groups (int): Number of groups for grouped convolution dilation (int): Factor for dilated convolution use_bias (bool): If True, learnable bias is added layout (str): Either of these values (order) 1) 'NCDHW' - Order 2) 'NDHWC' - Order - N: Number of elements in batches - C: Number of channels - D: Depth of features in layers - H: Height of features in layers - W: Number of features in layers uid (str): Unique name for layer, if not mentioned then dynamically assigned Returns: dict: Containing all the parameters set as per function arguments ''' tmp={}; tmp["uid"] = uid; tmp["name"] = "transposed_convolution3d"; tmp["params"] = {}; tmp["params"]["output_channels"] = output_channels; tmp["params"]["kernel_size"] = kernel_size; tmp["params"]["stride"] = stride; tmp["params"]["padding"] = padding; tmp["params"]["output_padding"] = output_padding; tmp["params"]["groups"] = groups; tmp["params"]["dilation"] = dilation; tmp["params"]["use_bias"] = use_bias; tmp["params"]["layout"] = layout; return tmp; ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, kernel_size=["lt", 16], stride=["lt", 16], padding=None, dilation=None, return_indices=None, ceil_mode=None, layout=None, uid=None, post_trace=False) @error_checks(None, kernel_size=["gt", 0], stride=["gte", 1], padding=None, dilation=["gte", 1], return_indices=None, ceil_mode=None, layout=["eq", "NCW"], uid=None, post_trace=False) @accepts("self", kernel_size=int, stride=[int, None], padding=[str, int], dilation=int, return_indices=bool, ceil_mode=bool, layout=str, uid=[None, str], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def max_pooling1d(self, kernel_size=2, stride=None, padding=0, dilation=1, return_indices=False, ceil_mode=False, layout='NCW', uid=None): ''' Append 1d-max-pooling to custom network Args: kernel_size (int, tuple): kernel matrix size stride (int, tuple): kernel movement stride padding (int, tuple, str): Zero padding applied to input 1) "in_eq_out": Automated padding applied to keep output shape same as input 2) integer or tuple value: Manually add padding dilation (int): Factor for dilated pooling return_indices (bool): Fixed value set as False ceil_mode (bool): If True, apply ceil math operation post pooling layout (str): Either of these values (order) 1) 'NCW' - order 2) 'NWC' - order - N: Number of elements in batches - C: Number of channels - W: Number of features in layers uid (str): Unique name for layer, if not mentioned then dynamically assigned Returns: dict: Containing all the parameters set as per function arguments ''' tmp = {}; tmp["uid"] = uid; tmp["name"] = "max_pooling1d"; tmp["params"] = {}; tmp["params"]["kernel_size"] = kernel_size; tmp["params"]["stride"] = stride; tmp["params"]["padding"] = padding; tmp["params"]["dilation"] = dilation; tmp["params"]["return_indices"] = return_indices; tmp["params"]["ceil_mode"] = ceil_mode; tmp["params"]["layout"] = layout; return tmp; ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, kernel_size=["lt", 16], stride=["lt", 16], padding=None, dilation=None, return_indices=None, ceil_mode=None, layout=None, uid=None, post_trace=False) @error_checks(None, kernel_size=["gt", 0], stride=["gte", 1], padding=None, dilation=["gte", 1], return_indices=None, ceil_mode=None, layout=["eq", "NCHW"], uid=None, post_trace=False) @accepts("self", kernel_size=int, stride=[int, None], padding=[str, int], dilation=int, return_indices=bool, ceil_mode=bool, layout=str, uid=[None, str], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def max_pooling2d(self, kernel_size=2, stride=None, padding=0, dilation=1, return_indices=False, ceil_mode=False, layout='NCHW', uid=None): ''' Append 2d-max-pooling to custom network Args: kernel_size (int, tuple): kernel matrix size stride (int, tuple): kernel movement stride padding (int, tuple, str): Zero padding applied to input 1) "in_eq_out": Automated padding applied to keep output shape same as input 2) integer or tuple value: Manually add padding dilation (int): Factor for dilated pooling return_indices (bool): Fixed value set as False ceil_mode (bool): If True, apply ceil math operation post pooling layout (str): Either of these values (order) 1) 'NCHW' - Order 2) 'NHWC' - Order - N: Number of elements in batches - C: Number of channels - H: Height of features in layers - W: Number of features in layers uid (str): Unique name for layer, if not mentioned then dynamically assigned Returns: dict: Containing all the parameters set as per function arguments ''' tmp = {}; tmp["uid"] = uid; tmp["name"] = "max_pooling2d"; tmp["params"] = {}; tmp["params"]["kernel_size"] = kernel_size; tmp["params"]["stride"] = stride; tmp["params"]["padding"] = padding; tmp["params"]["dilation"] = dilation; tmp["params"]["return_indices"] = return_indices; tmp["params"]["ceil_mode"] = ceil_mode; tmp["params"]["layout"] = layout; return tmp; ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, kernel_size=["lt", 16], stride=["lt", 16], padding=None, dilation=None, return_indices=None, ceil_mode=None, layout=None, uid=None, post_trace=False) @error_checks(None, kernel_size=["gt", 0], stride=["gte", 1], padding=None, dilation=["gte", 1], return_indices=None, ceil_mode=None, layout=["eq", "NCHW"], uid=None, post_trace=False) @accepts("self", kernel_size=int, stride=[int, None], padding=[str, int], dilation=int, return_indices=bool, ceil_mode=bool, layout=str, uid=[None, str], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def max_pooling(self, kernel_size=2, stride=None, padding=0, dilation=1, return_indices=False, ceil_mode=False, layout='NCHW', uid=None): ''' Append 2d-max-pooling to custom network Args: kernel_size (int, tuple): kernel matrix size stride (int, tuple): kernel movement stride padding (int, tuple, str): Zero padding applied to input 1) "in_eq_out": Automated padding applied to keep output shape same as input 2) integer or tuple value: Manually add padding dilation (int): Factor for dilated pooling return_indices (bool): Fixed value set as False ceil_mode (bool): If True, apply ceil math operation post pooling layout (str): Either of these values (order) 1) 'NCHW' - Order 2) 'NHWC' - Order - N: Number of elements in batches - C: Number of channels - H: Height of features in layers - W: Number of features in layers uid (str): Unique name for layer, if not mentioned then dynamically assigned Returns: dict: Containing all the parameters set as per function arguments ''' tmp = {}; tmp["uid"] = uid; tmp["name"] = "max_pooling2d"; tmp["params"] = {}; tmp["params"]["kernel_size"] = kernel_size; tmp["params"]["stride"] = stride; tmp["params"]["padding"] = padding; tmp["params"]["dilation"] = dilation; tmp["params"]["return_indices"] = return_indices; tmp["params"]["ceil_mode"] = ceil_mode; tmp["params"]["layout"] = layout; return tmp; ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, kernel_size=["lt", 16], stride=["lt", 16], padding=None, dilation=None, return_indices=None, ceil_mode=None, layout=None, uid=None, post_trace=False) @error_checks(None, kernel_size=["gt", 0], stride=["gte", 1], padding=None, dilation=["gte", 1], return_indices=None, ceil_mode=None, layout=["eq", "NCDHW"], uid=None, post_trace=False) @accepts("self", kernel_size=int, stride=[int, None], padding=[str, int], dilation=int, return_indices=bool, ceil_mode=bool, layout=str, uid=[None, str], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def max_pooling3d(self, kernel_size=2, stride=None, padding=0, dilation=1, return_indices=False, ceil_mode=False, layout='NCDHW', uid=None): ''' Append 3d-max-pooling to custom network Args: kernel_size (int, tuple): kernel matrix size stride (int, tuple): kernel movement stride padding (int, tuple, str): Zero padding applied to input 1) "in_eq_out": Automated padding applied to keep output shape same as input 2) integer or tuple value: Manually add padding dilation (int): Factor for dilated pooling return_indices (bool): Fixed value set as False ceil_mode (bool): If True, apply ceil math operation post pooling layout (str): Either of these values (order) 1) 'NCDHW' - Order 2) 'NDHWC' - Order - N: Number of elements in batches - C: Number of channels - D: Depth of features in layers - H: Height of features in layers - W: Number of features in layers uid (str): Unique name for layer, if not mentioned then dynamically assigned Returns: dict: Containing all the parameters set as per function arguments ''' tmp = {}; tmp["uid"] = uid; tmp["name"] = "max_pooling3d"; tmp["params"] = {}; tmp["params"]["kernel_size"] = kernel_size; tmp["params"]["stride"] = stride; tmp["params"]["padding"] = padding; tmp["params"]["dilation"] = dilation; tmp["params"]["return_indices"] = return_indices; tmp["params"]["ceil_mode"] = ceil_mode; tmp["params"]["layout"] = layout; return tmp; ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, kernel_size=["lt", 16], stride=["lt", 16], padding=None, dilation=None, return_indices=None, ceil_mode=None, include_padding_in_calculation=None, layout=None, uid=None, post_trace=False) @error_checks(None, kernel_size=["gt", 0], stride=["gte", 1], padding=None, dilation=["gte", 1], return_indices=None, ceil_mode=None, include_padding_in_calculation=None, layout=["eq", "NCW"], uid=None, post_trace=False) @accepts("self", kernel_size=int, stride=[int, None], padding=[str, int], dilation=int, return_indices=bool, ceil_mode=bool, include_padding_in_calculation=bool, layout=str, uid=[None, str], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def average_pooling1d(self, kernel_size=2, stride=None, padding=0, dilation=1, return_indices=False, ceil_mode=False, include_padding_in_calculation=True, layout='NCW', uid=None): ''' Append 1d-average-pooling to custom network Args: kernel_size (int, tuple): kernel matrix size stride (int, tuple): kernel movement stride padding (int, tuple, str): Zero padding applied to input 1) "in_eq_out": Automated padding applied to keep output shape same as input 2) integer or tuple value: Manually add padding dilation (int): Factor for dilated pooling return_indices (bool): Fixed value set as False include_padding_in_calculation (bool): If True, padding will be considered. ceil_mode (bool): If True, apply ceil math operation post pooling layout (str): Either of these values (order) 1) 'NCW' - order 2) 'NWC' - order - N: Number of elements in batches - C: Number of channels - W: Number of features in layers uid (str): Unique name for layer, if not mentioned then dynamically assigned Returns: dict: Containing all the parameters set as per function arguments ''' tmp = {}; tmp["uid"] = uid; tmp["name"] = "average_pooling1d"; tmp["params"] = {}; tmp["params"]["kernel_size"] = kernel_size; tmp["params"]["stride"] = stride; tmp["params"]["padding"] = padding; tmp["params"]["dilation"] = dilation; tmp["params"]["return_indices"] = return_indices; tmp["params"]["ceil_mode"] = ceil_mode; tmp["params"]["include_padding_in_calculation"] = include_padding_in_calculation; tmp["params"]["layout"] = layout; return tmp; ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, kernel_size=["lt", 16], stride=["lt", 16], padding=None, dilation=None, return_indices=None, ceil_mode=None, include_padding_in_calculation=None, layout=None, uid=None, post_trace=False) @error_checks(None, kernel_size=["gt", 0], stride=["gte", 1], padding=None, dilation=["gte", 1], return_indices=None, ceil_mode=None, include_padding_in_calculation=None, layout=["eq", "NCHW"], uid=None, post_trace=False) @accepts("self", kernel_size=int, stride=[int, None], padding=[str, int], dilation=int, return_indices=bool, ceil_mode=bool, include_padding_in_calculation=bool, layout=str, uid=[None, str], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def average_pooling2d(self, kernel_size=2, stride=None, padding=0, dilation=1, return_indices=False, ceil_mode=False, include_padding_in_calculation=True, layout='NCHW', uid=None): ''' Append 2d-average-pooling to custom network Args: kernel_size (int, tuple): kernel matrix size stride (int, tuple): kernel movement stride padding (int, tuple, str): Zero padding applied to input 1) "in_eq_out": Automated padding applied to keep output shape same as input 2) integer or tuple value: Manually add padding dilation (int): Factor for dilated pooling return_indices (bool): Fixed value set as False ceil_mode (bool): If True, apply ceil math operation post pooling include_padding_in_calculation (bool): If True, padding will be considered. layout (str): Either of these values (order) 1) 'NCHW' - Order 2) 'NHWC' - Order - N: Number of elements in batches - C: Number of channels - H: Height of features in layers - W: Number of features in layers uid (str): Unique name for layer, if not mentioned then dynamically assigned Returns: dict: Containing all the parameters set as per function arguments ''' tmp = {}; tmp["uid"] = uid; tmp["name"] = "average_pooling2d"; tmp["params"] = {}; tmp["params"]["kernel_size"] = kernel_size; tmp["params"]["stride"] = stride; tmp["params"]["padding"] = padding; tmp["params"]["dilation"] = dilation; tmp["params"]["return_indices"] = return_indices; tmp["params"]["ceil_mode"] = ceil_mode; tmp["params"]["include_padding_in_calculation"] = include_padding_in_calculation; tmp["params"]["layout"] = layout; return tmp; ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, kernel_size=["lt", 16], stride=["lt", 16], padding=None, dilation=None, return_indices=None, ceil_mode=None, include_padding_in_calculation=None, layout=None, uid=None, post_trace=False) @error_checks(None, kernel_size=["gt", 0], stride=["gte", 1], padding=None, dilation=["gte", 1], return_indices=None, ceil_mode=None, include_padding_in_calculation=None, layout=["eq", "NCHW"], uid=None, post_trace=False) @accepts("self", kernel_size=int, stride=[int, None], padding=[str, int], dilation=int, return_indices=bool, ceil_mode=bool, include_padding_in_calculation=bool, layout=str, uid=[None, str], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def average_pooling(self, kernel_size=2, stride=None, padding=0, dilation=1, return_indices=False, ceil_mode=False, include_padding_in_calculation=True, layout='NCHW', uid=None): ''' Append 2d-average-pooling to custom network Args: kernel_size (int, tuple): kernel matrix size stride (int, tuple): kernel movement stride padding (int, tuple, str): Zero padding applied to input 1) "in_eq_out": Automated padding applied to keep output shape same as input 2) integer or tuple value: Manually add padding dilation (int): Factor for dilated pooling return_indices (bool): Fixed value set as False ceil_mode (bool): If True, apply ceil math operation post pooling include_padding_in_calculation (bool): If True, padding will be considered. layout (str): Either of these values (order) 1) 'NCHW' - Order 2) 'NHWC' - Order - N: Number of elements in batches - C: Number of channels - H: Height of features in layers - W: Number of features in layers uid (str): Unique name for layer, if not mentioned then dynamically assigned Returns: dict: Containing all the parameters set as per function arguments ''' tmp = {}; tmp["uid"] = uid; tmp["name"] = "average_pooling2d"; tmp["params"] = {}; tmp["params"]["kernel_size"] = kernel_size; tmp["params"]["stride"] = stride; tmp["params"]["padding"] = padding; tmp["params"]["dilation"] = dilation; tmp["params"]["return_indices"] = return_indices; tmp["params"]["ceil_mode"] = ceil_mode; tmp["params"]["include_padding_in_calculation"] = include_padding_in_calculation; tmp["params"]["layout"] = layout; return tmp; ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, kernel_size=["lt", 16], stride=["lt", 16], padding=None, dilation=None, return_indices=None, ceil_mode=None, include_padding_in_calculation=None, layout=None, uid=None, post_trace=False) @error_checks(None, kernel_size=["gt", 0], stride=["gte", 1], padding=None, dilation=["gte", 1], return_indices=None, ceil_mode=None, include_padding_in_calculation=None, layout=["eq", "NCDHW"], uid=None, post_trace=False) @accepts("self", kernel_size=int, stride=[int, None], padding=[str, int], dilation=int, return_indices=bool, ceil_mode=bool, include_padding_in_calculation=bool, layout=str, uid=[None, str], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def average_pooling3d(self, kernel_size=2, stride=None, padding=0, dilation=1, return_indices=False, ceil_mode=False, include_padding_in_calculation=True, layout='NCDHW', uid=None): ''' Append 3d-average-pooling to custom network Args: kernel_size (int, tuple): kernel matrix size stride (int, tuple): kernel movement stride padding (int, tuple, str): Zero padding applied to input 1) "in_eq_out": Automated padding applied to keep output shape same as input 2) integer or tuple value: Manually add padding dilation (int): Factor for dilated pooling return_indices (bool): Fixed value set as False ceil_mode (bool): If True, apply ceil math operation post pooling include_padding_in_calculation (bool): If True, padding will be considered. layout (str): Either of these values (order) 1) 'NCDHW' - Order 2) 'NDHWC' - Order - N: Number of elements in batches - C: Number of channels - D: Depth of features in layers - H: Height of features in layers - W: Number of features in layers uid (str): Unique name for layer, if not mentioned then dynamically assigned Returns: dict: Containing all the parameters set as per function arguments ''' tmp = {}; tmp["uid"] = uid; tmp["name"] = "average_pooling3d"; tmp["params"] = {}; tmp["params"]["kernel_size"] = kernel_size; tmp["params"]["stride"] = stride; tmp["params"]["padding"] = padding; tmp["params"]["dilation"] = dilation; tmp["params"]["return_indices"] = return_indices; tmp["params"]["ceil_mode"] = ceil_mode; tmp["params"]["include_padding_in_calculation"] = include_padding_in_calculation; tmp["params"]["layout"] = layout; return tmp; ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, layout=None, uid=None, post_trace=False) @error_checks(None, layout=["eq", "NCW"], uid=None, post_trace=False) @accepts("self", layout=str, uid=[None, str], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def global_max_pooling1d(self, layout='NCW', uid=None): ''' Append 1d-global-max-pooling to custom network Args: layout (str): Either of these values (order) 1) 'NCW' - order 2) 'NWC' - order - N: Number of elements in batches - C: Number of channels - W: Number of features in layers uid (str): Unique name for layer, if not mentioned then dynamically assigned Returns: dict: Containing all the parameters set as per function arguments ''' tmp = {}; tmp["uid"] = uid; tmp["name"] = "global_max_pooling1d"; tmp["params"] = {}; tmp["params"]["layout"] = layout; return tmp; ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, layout=None, uid=None, post_trace=False) @error_checks(None, layout=["eq", "NCHW"], uid=None, post_trace=False) @accepts("self", layout=str, uid=[None, str], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def global_max_pooling2d(self, layout='NCHW', uid=None): ''' Append 2d-global-max-pooling to custom network Args: layout (str): Either of these values (order) 1) 'NCHW' - Order 2) 'NHWC' - Order - N: Number of elements in batches - C: Number of channels - H: Height of features in layers - W: Number of features in layers uid (str): Unique name for layer, if not mentioned then dynamically assigned Returns: dict: Containing all the parameters set as per function arguments ''' tmp = {}; tmp["uid"] = uid; tmp["name"] = "global_max_pooling2d"; tmp["params"] = {}; tmp["params"]["layout"] = layout; return tmp; ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, layout=None, uid=None, post_trace=False) @error_checks(None, layout=["eq", "NCHW"], uid=None, post_trace=False) @accepts("self", layout=str, uid=[None, str], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def global_max_pooling(self, layout='NCHW', uid=None): ''' Append 2d-global-max-pooling to custom network Args: layout (str): Either of these values (order) 1) 'NCHW' - Order 2) 'NHWC' - Order - N: Number of elements in batches - C: Number of channels - H: Height of features in layers - W: Number of features in layers uid (str): Unique name for layer, if not mentioned then dynamically assigned Returns: dict: Containing all the parameters set as per function arguments ''' tmp = {}; tmp["uid"] = uid; tmp["name"] = "global_max_pooling2d"; tmp["params"] = {}; tmp["params"]["layout"] = layout; return tmp; ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, layout=None, uid=None, post_trace=False) @error_checks(None, layout=["eq", "NCDHW"], uid=None, post_trace=False) @accepts("self", layout=str, uid=[None, str], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def global_max_pooling3d(self, layout='NCDHW', uid=None): ''' Append 3d-global-max-pooling to custom network Args: layout (str): Either of these values (order) 1) 'NCDHW' - Order 2) 'NDHWC' - Order - N: Number of elements in batches - C: Number of channels - D: Depth of features in layers - H: Height of features in layers - W: Number of features in layers uid (str): Unique name for layer, if not mentioned then dynamically assigned Returns: dict: Containing all the parameters set as per function arguments ''' tmp = {}; tmp["uid"] = uid; tmp["name"] = "global_max_pooling3d"; tmp["params"] = {}; tmp["params"]["layout"] = layout; return tmp; ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, layout=None, uid=None, post_trace=False) @error_checks(None, layout=["eq", "NCW"], uid=None, post_trace=False) @accepts("self", layout=str, uid=[None, str], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def global_average_pooling1d(self, layout='NCW', uid=None): ''' Append 1d-global-average-pooling to custom network Args: layout (str): Either of these values (order) 1) 'NCW' - order 2) 'NWC' - order - N: Number of elements in batches - C: Number of channels - W: Number of features in layers uid (str): Unique name for layer, if not mentioned then dynamically assigned Returns: dict: Containing all the parameters set as per function arguments ''' tmp = {}; tmp["uid"] = uid; tmp["name"] = "global_average_pooling1d"; tmp["params"] = {}; tmp["params"]["layout"] = layout; return tmp; ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, layout=None, uid=None, post_trace=False) @error_checks(None, layout=["eq", "NCHW"], uid=None, post_trace=False) @accepts("self", layout=str, uid=[None, str], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def global_average_pooling2d(self, layout='NCHW', uid=None): ''' Append 2d-global-average-pooling to custom network Args: layout (str): Either of these values (order) 1) 'NCHW' - Order 2) 'NHWC' - Order - N: Number of elements in batches - C: Number of channels - H: Height of features in layers - W: Number of features in layers uid (str): Unique name for layer, if not mentioned then dynamically assigned Returns: dict: Containing all the parameters set as per function arguments ''' tmp = {}; tmp["uid"] = uid; tmp["name"] = "global_average_pooling2d"; tmp["params"] = {}; tmp["params"]["layout"] = layout; return tmp; ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, layout=None, uid=None, post_trace=False) @error_checks(None, layout=["eq", "NCHW"], uid=None, post_trace=False) @accepts("self", layout=str, uid=[None, str], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def global_average_pooling(self, layout='NCHW', uid=None): ''' Append 2d-global-average-pooling to custom network Args: layout (str): Either of these values (order) 1) 'NCHW' - Order 2) 'NHWC' - Order - N: Number of elements in batches - C: Number of channels - H: Height of features in layers - W: Number of features in layers uid (str): Unique name for layer, if not mentioned then dynamically assigned Returns: dict: Containing all the parameters set as per function arguments ''' tmp = {}; tmp["uid"] = uid; tmp["name"] = "global_average_pooling2d"; tmp["params"] = {}; tmp["params"]["layout"] = layout; return tmp; ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, layout=None, uid=None, post_trace=False) @error_checks(None, layout=["eq", "NCDHW"], uid=None, post_trace=False) @accepts("self", layout=str, uid=[None, str], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def global_average_pooling3d(self, layout='NCDHW', uid=None): ''' Append 3d-global-average-pooling to custom network Args: layout (str): Either of these values (order) 1) 'NCDHW' - Order 2) 'NDHWC' - Order - N: Number of elements in batches - C: Number of channels - D: Depth of features in layers - H: Height of features in layers - W: Number of features in layers uid (str): Unique name for layer, if not mentioned then dynamically assigned Returns: dict: Containing all the parameters set as per function arguments ''' tmp = {}; tmp["uid"] = uid; tmp["name"] = "global_average_pooling3d"; tmp["params"] = {}; tmp["params"]["layout"] = layout; return tmp; ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, units=None, use_bias=None, flatten=None, uid=None, post_trace=False) @error_checks(None, units=["gt", 0], use_bias=None, flatten=None, uid=None, post_trace=False) @accepts("self", units=int, use_bias=bool, flatten=bool, uid=[None, str], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def fully_connected(self, units=512, use_bias=True, flatten=True, uid=None): ''' Append fully-connected (dense) layer to custom network Args: units (int): Number of neurons in the layer use_bias (bool): If True, learnable bias is added flatten (bool): Fixed to True uid (str): Unique name for layer, if not mentioned then dynamically assigned Returns: dict: Containing all the parameters set as per function arguments ''' tmp = {}; tmp["uid"] = uid; tmp["name"] = "fully_connected"; tmp["params"] = {}; tmp["params"]["units"] = units; tmp["params"]["use_bias"] = use_bias; tmp["params"]["flatten"] = flatten; return tmp; ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, drop_probability=["lt", 0.5], use_bias=None, flatten=None, uid=None, post_trace=False) @error_checks(None, drop_probability=["gte", 0.0, "lt", 1.0], use_bias=None, flatten=None, uid=None, post_trace=False) @accepts("self", drop_probability=[int, float], axes=tuple, uid=[None, str], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def dropout(self, drop_probability=0.2, axes=(), uid=None): ''' Append dropout layer to custom network Args: drop_probability (float): Probability for not considering neurons in the output axes (tuple): Channel axis to implement dropout over uid (str): Unique name for layer, if not mentioned then dynamically assigned Returns: dict: Containing all the parameters set as per function arguments ''' tmp = {}; tmp["uid"] = uid; tmp["name"] = "dropout"; tmp["params"] = {}; tmp["params"]["drop_probability"] = drop_probability; tmp["params"]["axes"] = axes; return tmp; ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, uid=None, post_trace=False) @error_checks(None, uid=None, post_trace=False) @accepts("self", uid=[None, str], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def flatten(self, uid=None): ''' Append flatten layer to custom network Args: uid (str): Unique name for layer, if not mentioned then dynamically assigned Returns: dict: Containing all the parameters set as per function arguments ''' tmp = {}; tmp["uid"] = uid; tmp["name"] = "flatten"; tmp["params"] = {}; return tmp; ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, uid=None, post_trace=False) @error_checks(None, uid=None, post_trace=False) @accepts("self", uid=[None, str], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def identity(self, uid=None): ''' Append identity layer to custom network Args: uid (str): Unique name for layer, if not mentioned then dynamically assigned Returns: dict: Containing all the parameters set as per function arguments ''' tmp = {}; tmp["uid"] = uid; tmp["name"] = "identity"; tmp["params"] = {}; return tmp; ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, uid=None, post_trace=False) @error_checks(None, uid=None, post_trace=False) @accepts("self", uid=[None, str], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def add(self, uid=None): ''' Append elementwise addition layer to custom network Args: uid (str): Unique name for layer, if not mentioned then dynamically assigned Returns: dict: Containing all the parameters set as per function arguments ''' tmp = {}; tmp["uid"] = uid; tmp["name"] = "add"; return tmp; ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, uid=None, post_trace=False) @error_checks(None, uid=None, post_trace=False) @accepts("self", uid=[None, str], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def concatenate(self, uid=None): ''' Append concatenation layer to custom network Args: uid (str): Unique name for layer, if not mentioned then dynamically assigned Returns: dict: Containing all the parameters set as per function arguments ''' tmp = {}; tmp["uid"] = uid; tmp["name"] = "concatenate"; return tmp; ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, moving_average_momentum=["lt", 1.0], epsilon=["lt", 0.0001], use_trainable_parameters=None, activate_scale_shift_operation=None, uid=None, post_trace=False) @error_checks(None, moving_average_momentum=["gte", 0], epsilon=["gte", 0], use_trainable_parameters=None, activate_scale_shift_operation=None, uid=None, post_trace=False) @accepts("self", moving_average_momentum=[float, int], epsilon=float, use_trainable_parameters=bool, activate_scale_shift_operation=bool, uid=[None, str], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def batch_normalization(self, moving_average_momentum=0.9, epsilon=0.00001, use_trainable_parameters=True, activate_scale_shift_operation=False, uid=None): ''' Append batch normalization layer to custom network Args: moving_average_momentum (float): Normalization momentum value epsilon (float): Value to avoid division by zero use_trainable_paramemetrs (bool): If True, batch norm turns into a trainable layer activate_scale_shift_operation (bool): Fixed status - False uid (str): Unique name for layer, if not mentioned then dynamically assigned Returns: dict: Containing all the parameters set as per function arguments ''' tmp = {}; tmp["uid"] = uid; tmp["name"] = "batch_normalization"; tmp["params"] = {}; tmp["params"]["moving_average_momentum"] = moving_average_momentum; tmp["params"]["epsilon"] = epsilon; tmp["params"]["use_trainable_parameters"] = use_trainable_parameters; tmp["params"]["activate_scale_shift_operation"] = activate_scale_shift_operation; return tmp; ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, moving_average_momentum=["lt", 1.0], epsilon=["lt", 0.0001], use_trainable_parameters=None, uid=None, post_trace=False) @error_checks(None, moving_average_momentum=["gte", 0], epsilon=["gte", 0], use_trainable_parameters=None, uid=None, post_trace=False) @accepts("self", moving_average_momentum=[float, int], epsilon=float, use_trainable_parameters=bool, uid=[None, str], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def instance_normalization(self, moving_average_momentum=0.9, epsilon=0.00001, use_trainable_parameters=True, uid=None): ''' Append instace normalization layer to custom network Args: moving_average_momentum (float): Normalization momentum value epsilon (float): Value to avoid division by zero use_trainable_paramemetrs (bool): If True, batch norm turns into a trainable layer uid (str): Unique name for layer, if not mentioned then dynamically assigned Returns: dict: Containing all the parameters set as per function arguments ''' tmp = {}; tmp["uid"] = uid; tmp["name"] = "instance_normalization"; tmp["params"] = {}; tmp["params"]["epsilon"] = epsilon; tmp["params"]["use_trainable_parameters"] = use_trainable_parameters; return tmp; ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, moving_average_momentum=["lt", 1.0], epsilon=["lt", 0.0001], use_trainable_parameters=None, uid=None, post_trace=False) @error_checks(None, moving_average_momentum=["gte", 0], epsilon=["gte", 0], use_trainable_parameters=None, uid=None, post_trace=False) @accepts("self", moving_average_momentum=[float, int], epsilon=float, use_trainable_parameters=bool, uid=[None, str], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def layer_normalization(self, moving_average_momentum=0.9, epsilon=0.00001, use_trainable_parameters=True, uid=None): ''' Append layer normalization layer to custom network Args: moving_average_momentum (float): Normalization momentum value epsilon (float): Value to avoid division by zero use_trainable_paramemetrs (bool): If True, batch norm turns into a trainable layer uid (str): Unique name for layer, if not mentioned then dynamically assigned Returns: dict: Containing all the parameters set as per function arguments ''' tmp = {}; tmp["uid"] = uid; tmp["name"] = "layer_normalization"; tmp["params"] = {}; tmp["params"]["epsilon"] = epsilon; tmp["params"]["use_trainable_parameters"] = use_trainable_parameters; return tmp; ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, uid=None, post_trace=False) @error_checks(None, uid=None, post_trace=False) @accepts("self", uid=[None, str], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def relu(self, uid=None): ''' Append rectified linear unit activation to custom network Args: uid (str): Unique name for layer, if not mentioned then dynamically assigned Returns: dict: Containing all the parameters set as per function arguments ''' tmp = {}; tmp["uid"] = uid; tmp["name"] = "relu"; tmp["params"] = {}; return tmp; ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, uid=None, post_trace=False) @error_checks(None, uid=None, post_trace=False) @accepts("self", uid=[None, str], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def sigmoid(self, uid=None): ''' Append sigmoid activation to custom network Args: uid (str): Unique name for layer, if not mentioned then dynamically assigned Returns: dict: Containing all the parameters set as per function arguments ''' tmp = {}; tmp["uid"] = uid; tmp["name"] = "sigmoid"; tmp["params"] = {}; return tmp; ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, uid=None, post_trace=False) @error_checks(None, uid=None, post_trace=False) @accepts("self", uid=[None, str], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def tanh(self, uid=None): ''' Append tanh activation to custom network Args: uid (str): Unique name for layer, if not mentioned then dynamically assigned Returns: dict: Containing all the parameters set as per function arguments ''' tmp = {}; tmp["uid"] = uid; tmp["name"] = "tanh"; tmp["params"] = {}; return tmp; ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, beta=None, threshold=None, uid=None, post_trace=False) @error_checks(None, beta=["gt", 0], threshold=None, uid=None, post_trace=False) @accepts("self", beta=[int, float], threshold=[int, float], uid=[None, str], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def softplus(self, beta=1, threshold=20, uid=None): ''' Append softplus activation to custom network Args: beta (int): Multiplicative factor threshold (int): softplus (thresholded relu) limit uid (str): Unique name for layer, if not mentioned then dynamically assigned Returns: dict: Containing all the parameters set as per function arguments ''' tmp = {}; tmp["uid"] = uid; tmp["name"] = "softplus"; tmp["params"] = {}; tmp["params"]["beta"] = beta; tmp["params"]["threshold"] = threshold; return tmp; ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, uid=None, post_trace=False) @error_checks(None, uid=None, post_trace=False) @accepts("self", uid=[None, str], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def softsign(self, uid=None): ''' Append softsign activation to custom network Args: uid (str): Unique name for layer, if not mentioned then dynamically assigned Returns: dict: Containing all the parameters set as per function arguments ''' tmp = {}; tmp["uid"] = uid; tmp["name"] = "softsign"; tmp["params"] = {}; return tmp; ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, alpha=None, uid=None, post_trace=False) @error_checks(None, alpha=["gt", 0], uid=None, post_trace=False) @accepts("self", alpha=float, uid=[None, str], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def elu(self, alpha=1.0, uid=None): ''' Append exponential linear unit activation to custom network Args: alpha (float): Multiplicative factor uid (str): Unique name for layer, if not mentioned then dynamically assigned Returns: dict: Containing all the parameters set as per function arguments ''' tmp = {}; tmp["uid"] = uid; tmp["name"] = "elu"; tmp["params"] = {}; tmp["params"]["alpha"] = alpha; return tmp; ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, uid=None, post_trace=False) @error_checks(None, uid=None, post_trace=False) @accepts("self", uid=[None, str], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def gelu(self, uid=None): ''' Append gated exponential linear unit activation to custom network Args: uid (str): Unique name for layer, if not mentioned then dynamically assigned Returns: dict: Containing all the parameters set as per function arguments ''' tmp = {}; tmp["uid"] = uid; tmp["name"] = "gelu"; tmp["params"] = {}; return tmp; ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, alpha=None, uid=None, post_trace=False) @error_checks(None, alpha=["gt", 0], uid=None, post_trace=False) @accepts("self", alpha=float, uid=[None, str], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def leaky_relu(self, alpha=0.01, uid=None): ''' Append leaky relu activation to custom network Args: alpha (float): Multiplicatve factor towards negative spectrum of real numbers. uid (str): Unique name for layer, if not mentioned then dynamically assigned Returns: dict: Containing all the parameters set as per function arguments ''' tmp = {}; tmp["uid"] = uid; tmp["name"] = "leaky_relu"; tmp["params"] = {}; tmp["params"]["alpha"] = alpha; return tmp; ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, uid=None, post_trace=False) @error_checks(None, uid=None, post_trace=False) @accepts("self", uid=[None, str], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def prelu(self, uid=None): ''' Append paramemeterized rectified linear unit activation to custom network Args: uid (str): Unique name for layer, if not mentioned then dynamically assigned Returns: dict: Containing all the parameters set as per function arguments ''' tmp = {}; tmp["uid"] = uid; tmp["name"] = "prelu"; tmp["params"] = {}; return tmp; ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, uid=None, post_trace=False) @error_checks(None, uid=None, post_trace=False) @accepts("self", uid=[None, str], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def selu(self, uid=None): ''' Append scaled exponential linear unit activation to custom network Args: uid (str): Unique name for layer, if not mentioned then dynamically assigned Returns: dict: Containing all the parameters set as per function arguments ''' tmp = {}; tmp["uid"] = uid; tmp["name"] = "selu"; tmp["params"] = {}; return tmp; ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, beta=None, uid=None, post_trace=False) @error_checks(None, beta=["gt", 0], uid=None, post_trace=False) @accepts("self", beta=float, uid=[None, str], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def swish(self, beta=1.0, uid=None): ''' Append swish activation to custom network Args: beta (float): Multiplicative factor uid (str): Unique name for layer, if not mentioned then dynamically assigned Returns: dict: Containing all the parameters set as per function arguments ''' tmp = {}; tmp["uid"] = uid; tmp["name"] = "swish"; tmp["params"] = {}; tmp["params"]["beta"] = beta; return tmp; ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, output_channels=None, stride=None, downsample=None, post_trace=False) @error_checks(None, output_channels=["gt", 0], stride=None, downsample=None, post_trace=False) @accepts("self", output_channels=int, stride=[None, int, tuple], downsample=bool, post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def resnet_v1_block(self, output_channels=16, stride=1, downsample=True): ''' Append Resnet V1 Block to custom network Args: output_channels (int): Number of output features for this block stride (int, tuple): kernel movement stride downsample (bool): If False, residual branch is a shortcut, Else, residual branch has non-identity layers Returns: list: Containing all the layer dictionaries arranged as per function arguments ''' network = []; subnetwork = []; branch_1 = []; branch_1.append(self.convolution(output_channels=output_channels, kernel_size=3, stride=stride)); branch_1.append(self.batch_normalization()); branch_1.append(self.relu()); branch_1.append(self.convolution(output_channels=output_channels, kernel_size=3, stride=1)); branch_1.append(self.batch_normalization()); branch_2 = []; if(downsample): branch_2.append(self.convolution(output_channels=output_channels, kernel_size=1, stride=stride)); branch_2.append(self.batch_normalization()); else: branch_2.append(self.identity()); subnetwork.append(branch_1); subnetwork.append(branch_2); subnetwork.append(self.add()); network.append(subnetwork) network.append(self.relu()); return network; ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, output_channels=None, stride=None, downsample=None, post_trace=False) @error_checks(None, output_channels=["gt", 0], stride=None, downsample=None, post_trace=False) @accepts("self", output_channels=int, stride=[None, int, tuple], downsample=bool, post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def resnet_v2_block(self, output_channels=16, stride=1, downsample=True): ''' Append Resnet V2 Block to custom network Args: output_channels (int): Number of output features for this block stride (int, tuple): kernel movement stride downsample (bool): If False, residual branch is a shortcut, Else, residual branch has non-identity layers Returns: list: Containing all the layer dictionaries arranged as per function arguments ''' network = []; network.append(self.batch_normalization()); network.append(self.relu()); subnetwork = []; branch_1 = []; branch_1.append(self.convolution(output_channels=output_channels, kernel_size=3, stride=stride)); branch_1.append(self.batch_normalization()); branch_1.append(self.relu()); branch_1.append(self.convolution(output_channels=output_channels, kernel_size=3, stride=1)); branch_2 = []; if(downsample): branch_2.append(self.convolution(output_channels=output_channels, kernel_size=1, stride=stride)); else: branch_2.append(self.identity()); subnetwork.append(branch_1); subnetwork.append(branch_2); subnetwork.append(self.add()); network.append(subnetwork); return network; ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, output_channels=None, stride=None, downsample=None, post_trace=False) @error_checks(None, output_channels=["gt", 0], stride=None, downsample=None, post_trace=False) @accepts("self", output_channels=int, stride=[None, int, tuple], downsample=bool, post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def resnet_v1_bottleneck_block(self, output_channels=16, stride=1, downsample=True): ''' Append Resnet V1 Bottleneck Block to custom network Args: output_channels (int): Number of output features for this block stride (int, tuple): kernel movement stride downsample (bool): If False, residual branch is a shortcut, Else, residual branch has non-identity layers Returns: list: Containing all the layer dictionaries arranged as per function arguments ''' network = []; subnetwork = []; branch_1 = []; branch_1.append(self.convolution(output_channels=output_channels//4, kernel_size=1, stride=stride)); branch_1.append(self.batch_normalization()); branch_1.append(self.relu()); branch_1.append(self.convolution(output_channels=output_channels//4, kernel_size=3, stride=1)); branch_1.append(self.batch_normalization()); branch_1.append(self.relu()); branch_1.append(self.convolution(output_channels=output_channels, kernel_size=1, stride=1)); branch_1.append(self.batch_normalization()); branch_2 = []; if(downsample): branch_2.append(self.convolution(output_channels=output_channels, kernel_size=1, stride=stride)); branch_2.append(self.batch_normalization()); else: branch_2.append(self.identity()); subnetwork.append(branch_1); subnetwork.append(branch_2); subnetwork.append(self.add()); network.append(subnetwork) network.append(self.relu()) return network; ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, output_channels=None, stride=None, downsample=None, post_trace=False) @error_checks(None, output_channels=["gt", 0], stride=None, downsample=None, post_trace=False) @accepts("self", output_channels=int, stride=[None, int, tuple], downsample=bool, post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def resnet_v2_bottleneck_block(self, output_channels=16, stride=1, downsample=True): ''' Append Resnet V2 Bottleneck Block to custom network Args: output_channels (int): Number of output features for this block stride (int): kernel movement stride downsample (bool): If False, residual branch is a shortcut, Else, residual branch has non-identity layers Returns: list: Containing all the layer dictionaries arranged as per function arguments ''' network = []; network.append(self.batch_normalization()); network.append(self.relu()); subnetwork = []; branch_1 = []; branch_1.append(self.convolution(output_channels=output_channels//4, kernel_size=1, stride=1)); branch_1.append(self.batch_normalization()); branch_1.append(self.relu()); branch_1.append(self.convolution(output_channels=output_channels//4, kernel_size=3, stride=stride)); branch_1.append(self.batch_normalization()); branch_1.append(self.relu()); branch_1.append(self.convolution(output_channels=output_channels, kernel_size=1, stride=1)); branch_2 = []; if(downsample): branch_2.append(self.convolution(output_channels=output_channels, kernel_size=1, stride=stride)); else: branch_2.append(self.identity()); subnetwork.append(branch_1); subnetwork.append(branch_2); subnetwork.append(self.add()); network.append(subnetwork) return network; ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, output_channels=None, cardinality=None, bottleneck_width=None, stride=None, downsample=None, post_trace=False) @error_checks(None, output_channels=["gt", 0], cardinality=None, bottleneck_width=None, stride=None, downsample=None, post_trace=False) @accepts("self", output_channels=int, cardinality=int, bottleneck_width=int, stride=[int, tuple], downsample=bool, post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def resnext_block(self, output_channels=256, cardinality=8, bottleneck_width=4, stride=1, downsample=True): ''' Append Resnext Block to custom network Args: output_channels (int): Number of output features for this block cardinality (int): cardinality dimensions for complex transformations bottleneck_width (int): Bottleneck dimensions for reducing number of features stride (int): kernel movement stride downsample (bool): If False, residual branch is a shortcut, Else, residual branch has non-identity layers Returns: list: Containing all the layer dictionaries arranged as per function arguments ''' network = []; channels = output_channels//4; D = int(math.floor(channels * (bottleneck_width / 64))) group_width = cardinality * D subnetwork = []; branch_1 = []; branch_1.append(self.convolution(output_channels=group_width, kernel_size=1, stride=1)); branch_1.append(self.batch_normalization()); branch_1.append(self.relu()); branch_1.append(self.convolution(output_channels=group_width, kernel_size=3, stride=stride)); branch_1.append(self.batch_normalization()); branch_1.append(self.relu()); branch_1.append(self.convolution(output_channels=output_channels, kernel_size=1, stride=1)); branch_1.append(self.batch_normalization()); branch_2 = []; if(downsample): branch_2.append(self.convolution(output_channels=output_channels, kernel_size=1, stride=stride)); branch_2.append(self.batch_normalization()); else: branch_2.append(self.identity()); subnetwork.append(branch_1); subnetwork.append(branch_2); subnetwork.append(self.add()); network.append(subnetwork) network.append(self.relu()); return network; ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, output_channels=None, bottleneck_width=None, stride=None, post_trace=False) @error_checks(None, output_channels=["gt", 0], bottleneck_width=None, stride=None, post_trace=False) @accepts("self", output_channels=int, bottleneck_width=int, stride=[int, tuple], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def mobilenet_v2_linear_bottleneck_block(self, output_channels=32, bottleneck_width=4, stride=1): ''' Append Mobilenet V2 Linear Bottleneck Block to custom network Args: output_channels (int): Number of output features for this block stride (int): kernel movement stride bottleneck_width (int): Bottleneck dimensions for reducing number of features Returns: list: Containing all the layer dictionaries arranged as per function arguments ''' network = []; subnetwork = []; branch_1 = []; if(bottleneck_width != 1): branch_1.append(self.convolution(output_channels=output_channels*bottleneck_width, kernel_size=1, stride=1)); branch_1.append(self.batch_normalization()); branch_1.append(self.relu()); branch_1.append(self.convolution(output_channels=output_channels*bottleneck_width, kernel_size=3, stride=stride)); branch_1.append(self.batch_normalization()); branch_1.append(self.relu()); branch_1.append(self.convolution(output_channels=output_channels, kernel_size=1, stride=1)); branch_1.append(self.batch_normalization()); branch_2 = []; branch_2.append(self.identity()); subnetwork.append(branch_1); subnetwork.append(branch_2); subnetwork.append(self.add()); network.append(subnetwork); return network; ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, input_channels=None, output_channels=None, kernel_size=None, stride=None, padding=None, post_trace=False) @error_checks(None, input_channels=["gt", 0], output_channels=["gt", 0], kernel_size=None, stride=None, padding=None, post_trace=False) @accepts("self", input_channels=int, output_channels=int, kernel_size=int, stride=[None, int, tuple], padding=[None, int, tuple], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def separable_convolution_block(self, input_channels=16, output_channels=32, kernel_size=3, stride=1, padding=1): ''' Append Separable convolution Block to custom network Args: input_channels (int): Number of input features for this block output_channels (int): Number of output features for this block kernel_size (int): Kernel matrix shape for all layers in this block stride (int): kernel movement stride padding (int, tuple): external zero padding on input Returns: list: Containing all the layer dictionaries arranged as per function arguments ''' network = []; network.append(self.convolution(output_channels=input_channels, kernel_size=kernel_size, stride=stride, padding=padding, groups=input_channels)); network.append(self.convolution(output_channels=output_channels, kernel_size=1, stride=1)); return network; ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, output_channels=None, bottleneck_width=None, stride=None, post_trace=False) @error_checks(None, output_channels=["gt", 0], bottleneck_width=None, stride=None, post_trace=False) @accepts("self", output_channels=int, bottleneck_width=int, stride=[int, tuple], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def mobilenet_v2_inverted_linear_bottleneck_block(self, output_channels=32, bottleneck_width=4, stride=1): ''' Append Mobilenet V2 Inverted Linear Bottleneck Block to custom network Args: output_channels (int): Number of output features for this block stride (int): kernel movement stride bottleneck_width (int): Bottleneck dimensions for reducing number of features Returns: list: Containing all the layer dictionaries arranged as per function arguments ''' network = []; subnetwork = []; branch_1 = []; if(bottleneck_width != 1): branch_1.append(self.convolution(output_channels=output_channels//bottleneck_width, kernel_size=1, stride=1)); branch_1.append(self.batch_normalization()); branch_1.append(self.relu()); sep_conv = self.separable_convolution_block(input_channels=output_channels//bottleneck_width, output_channels=output_channels//bottleneck_width, kernel_size=3, stride=stride); branch_1.append(sep_conv); branch_1.append(self.batch_normalization()); branch_1.append(self.relu()); branch_1.append(self.convolution(output_channels=output_channels, kernel_size=1, stride=1)); branch_1.append(self.batch_normalization()); branch_2 = []; branch_2.append(self.identity()); subnetwork.append(branch_1); subnetwork.append(branch_2); subnetwork.append(self.add()); network.append(subnetwork); return network; ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, squeeze_channels=None, expand_channels_1x1=None, expand_channels_3x3=None, post_trace=False) @error_checks(None, squeeze_channels=["gt", 0], expand_channels_1x1=["gt", 0], expand_channels_3x3=["gt", 0], post_trace=False) @accepts("self", squeeze_channels=int, expand_channels_1x1=int, expand_channels_3x3=int, post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def squeezenet_fire_block(self, squeeze_channels=16, expand_channels_1x1=32, expand_channels_3x3=64): ''' Append Squeezenet Fire Block to custom network Args: squeeze_channels (int): Number of output features for this block expand_channels_1x1 (int): Number of convolution_1x1 features for this block expand_channels_3x3 (int): Number of convolution_3x3 features for this block bottleneck_width (int): Bottleneck dimensions for reducing number of features Returns: list: Containing all the layer dictionaries arranged as per function arguments ''' network = []; network.append(self.convolution(output_channels=squeeze_channels, kernel_size=1, stride=1)); network.append(self.relu()); subnetwork = []; branch_1 = []; branch_2 = []; branch_1.append(self.convolution(output_channels=expand_channels_1x1, kernel_size=1, stride=1)); branch_1.append(self.relu()); branch_2.append(self.convolution(output_channels=expand_channels_3x3, kernel_size=3, stride=1)); branch_2.append(self.relu()); subnetwork.append(branch_1); subnetwork.append(branch_2); subnetwork.append(self.concatenate()); network.append(subnetwork); return network; ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, bottleneck_size=None, growth_rate=None, dropout=None, post_trace=False) @error_checks(None, bottleneck_size=["gt", 0], growth_rate=None, dropout=["gte", 0, "lte", 1], post_trace=False) @accepts("self", bottleneck_size=int, growth_rate=int, dropout=[int, float], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def densenet_block(self, bottleneck_size=4, growth_rate=16, dropout=0.2): ''' Append Densenet Block to custom network Args: bottleneck_size (int): Bottleneck dimensions for reducing number of features growth_rate (int): Expansion rate for convolution layers for this block dropout (float): Prbability for dropout layer post convolution Returns: list: Containing all the layer dictionaries arranged as per function arguments ''' network = []; subnetwork = []; branch_1 = []; branch_2 = []; branch_1.append(self.batch_normalization()); branch_1.append(self.relu()); branch_1.append(self.convolution(output_channels=bottleneck_size*growth_rate, kernel_size=1, stride=1)); branch_1.append(self.batch_normalization()); branch_1.append(self.relu()); branch_1.append(self.convolution(output_channels=growth_rate, kernel_size=3, stride=1)); branch_1.append(self.dropout(drop_probability=dropout)); branch_2.append(self.identity()); subnetwork.append(branch_1); subnetwork.append(branch_2); subnetwork.append(self.concatenate()); network.append(subnetwork); return network; ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, input_channels=None, output_channels=None, kernel_size=None, stride=None, padding=None, post_trace=False) @error_checks(None, input_channels=["gt", 0], output_channels=["gt", 0], kernel_size=None, stride=None, padding=None, post_trace=False) @accepts("self", input_channels=int, output_channels=int, kernel_size=[int, tuple], stride=[None, int, tuple], padding=[None, int, tuple], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def conv_bn_relu_block(self, output_channels=64, kernel_size=1, stride=1, padding=None): ''' Append Conv->batch_norm->relu Block to custom network Args: output_channels (int): Number of output features for this block kernel_size (int): Kernel matrix shape for all layers in this block stride (int): kernel movement stride padding (int, tuple): external zero padding on input Returns: list: Containing all the layer dictionaries arranged as per function arguments ''' network = []; if(padding): network.append(self.convolution(output_channels=output_channels, kernel_size=kernel_size, stride=stride, padding=padding)); else: network.append(self.convolution(output_channels=output_channels, kernel_size=kernel_size, stride=stride)); network.append(self.batch_normalization()); network.append(self.relu()); return network; ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, pooling_branch_channels=None, pool_type=None, post_trace=False) @error_checks(None, pooling_branch_channels=["gt", 0], pool_type=None, post_trace=False) @accepts("self", pooling_branch_channels=int, pool_type=str, post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def inception_a_block(self, pooling_branch_channels=32, pool_type="avg"): ''' Append Inception-A Block to custom network Args: pooling_branch_channels (int): Number of features for conv layers in pooling branch pool_type (str): Either of these types - "avg" - Average pooling - "max" - Max pooling Returns: list: Containing all the layer dictionaries arranged as per function arguments ''' network = []; subnetwork = []; branch_1 = []; branch_2 = []; branch_3 = []; branch_4 = []; branch_1.append(self.conv_bn_relu_block(output_channels=64, kernel_size=1)) branch_2.append(self.conv_bn_relu_block(output_channels=48, kernel_size=1)); branch_2.append(self.conv_bn_relu_block(output_channels=64, kernel_size=5)); branch_3.append(self.conv_bn_relu_block(output_channels=64, kernel_size=1)); branch_3.append(self.conv_bn_relu_block(output_channels=96, kernel_size=3)); branch_3.append(self.conv_bn_relu_block(output_channels=96, kernel_size=3)); if(pool_type=="avg"): branch_4.append(self.average_pooling(kernel_size=3, stride=1, padding=1)); else: branch_4.append(self.max_pooling(kernel_size=3, stride=1, padding=1)); branch_4.append(self.conv_bn_relu_block(output_channels=pooling_branch_channels, kernel_size=1)); subnetwork.append(branch_1); subnetwork.append(branch_2); subnetwork.append(branch_3); subnetwork.append(branch_4); subnetwork.append(self.concatenate()); network.append(subnetwork); return network; ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, pool_type=None, post_trace=False) @error_checks(None, pool_type=None, post_trace=False) @accepts("self", pool_type=str, post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def inception_b_block(self, pool_type="avg"): ''' Append Inception-B Block to custom network Args: pool_type (str): Either of these types - "avg" - Average pooling - "max" - Max pooling Returns: list: Containing all the layer dictionaries arranged as per function arguments ''' network = []; subnetwork = []; branch_1 = []; branch_2 = []; branch_3 = []; branch_4 = []; branch_1.append(self.conv_bn_relu_block(output_channels=384, kernel_size=3)) branch_2.append(self.conv_bn_relu_block(output_channels=64, kernel_size=1)); branch_2.append(self.conv_bn_relu_block(output_channels=96, kernel_size=3)); branch_2.append(self.conv_bn_relu_block(output_channels=96, kernel_size=3)); if(pool_type=="avg"): branch_3.append(self.average_pooling(kernel_size=3, stride=1, padding=1)); else: branch_3.append(self.max_pooling(kernel_size=3, stride=1, padding=1)); subnetwork.append(branch_1); subnetwork.append(branch_2); subnetwork.append(branch_3); subnetwork.append(self.concatenate()); network.append(subnetwork); return network; ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, channels_7x7=None, pool_type=None, post_trace=False) @error_checks(None, channels_7x7=["gt", 0], pool_type=None, post_trace=False) @accepts("self", channels_7x7=int, pool_type=str, post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def inception_c_block(self, channels_7x7=3, pool_type="avg"): ''' Append Inception-C Block to custom network Args: channels_7x7 (int): Number of features for conv layers in channels_7x7 branch pool_type (str): Either of these types - "avg" - Average pooling - "max" - Max pooling Returns: list: Containing all the layer dictionaries arranged as per function arguments ''' network = []; subnetwork = []; branch_1 = []; branch_2 = []; branch_3 = []; branch_4 = []; branch_1.append(self.conv_bn_relu_block(output_channels=192, kernel_size=1)) branch_2.append(self.conv_bn_relu_block(output_channels=channels_7x7, kernel_size=1)); branch_2.append(self.conv_bn_relu_block(output_channels=channels_7x7, kernel_size=(1, 7), padding=(0, 3))); branch_2.append(self.conv_bn_relu_block(output_channels=192, kernel_size=(7, 1), padding=(3, 0))); branch_3.append(self.conv_bn_relu_block(output_channels=channels_7x7, kernel_size=1)); branch_3.append(self.conv_bn_relu_block(output_channels=channels_7x7, kernel_size=(1, 7), padding=(0, 3))); branch_3.append(self.conv_bn_relu_block(output_channels=channels_7x7, kernel_size=(7, 1), padding=(3, 0))); branch_3.append(self.conv_bn_relu_block(output_channels=channels_7x7, kernel_size=(1, 7), padding=(0, 3))); branch_3.append(self.conv_bn_relu_block(output_channels=192, kernel_size=(7, 1), padding=(3, 0))); if(pool_type=="avg"): branch_4.append(self.average_pooling(kernel_size=3, stride=1, padding=1)); else: branch_4.append(self.max_pooling(kernel_size=3, stride=1, padding=1)); branch_4.append(self.conv_bn_relu_block(output_channels=192, kernel_size=1)); subnetwork.append(branch_1); subnetwork.append(branch_2); subnetwork.append(branch_3); subnetwork.append(branch_4); subnetwork.append(self.concatenate()); network.append(subnetwork); return network; ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, pool_type=None, post_trace=False) @error_checks(None, pool_type=None, post_trace=False) @accepts("self", pool_type=str, post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def inception_d_block(self, pool_type="avg"): ''' Append Inception-D Block to custom network Args: pool_type (str): Either of these types - "avg" - Average pooling - "max" - Max pooling Returns: list: Containing all the layer dictionaries arranged as per function arguments ''' network = []; subnetwork = []; branch_1 = []; branch_2 = []; branch_3 = []; branch_4 = []; branch_1.append(self.conv_bn_relu_block(output_channels=192, kernel_size=1)) branch_1.append(self.conv_bn_relu_block(output_channels=320, kernel_size=3, stride=2)) branch_2.append(self.conv_bn_relu_block(output_channels=192, kernel_size=1)); branch_2.append(self.conv_bn_relu_block(output_channels=192, kernel_size=(1, 7), padding=(0, 3))); branch_2.append(self.conv_bn_relu_block(output_channels=192, kernel_size=(7, 1), padding=(3, 0))); branch_2.append(self.conv_bn_relu_block(output_channels=192, kernel_size=3, stride=2)); if(pool_type=="avg"): branch_3.append(self.average_pooling(kernel_size=3, stride=2)); else: branch_3.append(self.max_pooling(kernel_size=3, stride=2)); subnetwork.append(branch_1); subnetwork.append(branch_2); subnetwork.append(branch_3); subnetwork.append(self.concatenate()); network.append(subnetwork); return network; ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, post_trace=False) @error_checks(None, post_trace=False) @accepts("self", post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def subbranch_block(self): ''' Append sub-branch Block to custom network Args: None Returns: list: Containing all the layer dictionaries arranged as per function arguments ''' subnetwork = []; branch_1 = []; branch_2 = []; branch_1.append(self.conv_bn_relu_block(output_channels=384, kernel_size=(1, 3), padding=(0, 1))); branch_2.append(self.conv_bn_relu_block(output_channels=384, kernel_size=(3, 1), padding=(1, 0))); subnetwork.append(branch_1); subnetwork.append(branch_2); subnetwork.append(self.concatenate()); return subnetwork; ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, pool_type=None, post_trace=False) @error_checks(None, pool_type=None, post_trace=False) @accepts("self", pool_type=str, post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def inception_e_block(self, pool_type="avg"): ''' Append Inception-C Block to custom network Args: pool_type (str): Either of these types - "avg" - Average pooling - "max" - Max pooling Returns: list: Containing all the layer dictionaries arranged as per function arguments ''' network = []; subnetwork = []; branch_1 = []; branch_2 = []; branch_3 = []; branch_4 = []; branch_1.append(self.conv_bn_relu_block(output_channels=320, kernel_size=1)) branch_2.append(self.conv_bn_relu_block(output_channels=384, kernel_size=1)); branch_2.append(self.subbranch_block()); branch_3.append(self.conv_bn_relu_block(output_channels=448, kernel_size=1)); branch_3.append(self.conv_bn_relu_block(output_channels=384, kernel_size=3, padding=1)); branch_3.append(self.subbranch_block()); if(pool_type=="avg"): branch_4.append(self.average_pooling(kernel_size=3, stride=1, padding=1)); else: branch_4.append(self.max_pooling(kernel_size=3, stride=1, padding=1)); branch_4.append(self.conv_bn_relu_block(output_channels=192, kernel_size=1)); subnetwork.append(branch_1); subnetwork.append(branch_2); subnetwork.append(branch_3); subnetwork.append(branch_4); subnetwork.append(self.concatenate()); network.append(subnetwork); return network; ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, None, num_filters=None, post_trace=False) @error_checks(None, None, num_filters=None, post_trace=False) @accepts("self", str, num_filters=int, post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def study_convolution_filters(self, img_name, num_filters=4): """ Visualize the effect of a parameter in convolution - number of filters Args: img_name (str): Path to test images num_filters(int): Number of filters to use Returns: None """ network = []; network.append(self.convolution(output_channels=num_filters)); self.Compile_Network(network, data_shape=(3, 224, 224), use_gpu=False, network_initializer="xavier_normal", debug=False); self.system_dict["local"]["transforms_test"] = []; self.system_dict["local"]["transforms_test"].append(transforms.Resize(size=(224, 224))); self.system_dict["local"]["transforms_test"].append(transforms.ToTensor()); self.system_dict["local"]["data_transforms"]["test"] = transforms.Compose(self.system_dict["local"]["transforms_test"]); img = image.imread(img_name) img = self.system_dict["local"]["data_transforms"]["test"](img).expand_dims(axis=0); img = img.copyto(self.system_dict["local"]["ctx"][0]); print("Input shape - {}".format(img.shape)) outputs = self.system_dict["local"]["model"].forward(img); print("Output shape - {}".format(outputs.shape)) if(os.path.isdir("tmp")): os.system("rm -r tmp"); os.system("mkdir tmp"); else: os.system("mkdir tmp"); out = outputs[0]; for i in range(out.shape[0]): tmp = out[i].asnumpy() cv2.imwrite("tmp/test" + str(i) + ".jpg", tmp*255) weights = self.system_dict["local"]["model"][0].weight.data().asnumpy() for i in range(weights.shape[0]): tmp = weights[i] cv2.imwrite("tmp/test_filter" + str(i) + ".jpg", tmp*255) ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, None, kernel_size=None, post_trace=False) @error_checks(None, None, kernel_size=["gt", 0], post_trace=False) @accepts("self", str, kernel_size=[int, tuple], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def study_convolution_kernels(self, img_name, kernel_size=4): """ Visualize the effect of a parameter in convolution - KErnel size Args: img_name (str): Path to test images kernel_size(int): Convolution kernel shape Returns: None """ network = []; network.append(self.convolution(output_channels=4, kernel_size=kernel_size)); self.Compile_Network(network, data_shape=(3, 224, 224), use_gpu=False, network_initializer="xavier_normal", debug=False); self.system_dict["local"]["transforms_test"] = []; self.system_dict["local"]["transforms_test"].append(transforms.Resize(size=(224, 224))); self.system_dict["local"]["transforms_test"].append(transforms.ToTensor()); self.system_dict["local"]["data_transforms"]["test"] = transforms.Compose(self.system_dict["local"]["transforms_test"]); img = image.imread(img_name) img = self.system_dict["local"]["data_transforms"]["test"](img).expand_dims(axis=0); img = img.copyto(self.system_dict["local"]["ctx"][0]); print("Input shape - {}".format(img.shape)) outputs = self.system_dict["local"]["model"].forward(img); print("Output shape - {}".format(outputs.shape)) if(os.path.isdir("tmp")): os.system("rm -r tmp"); os.system("mkdir tmp"); else: os.system("mkdir tmp"); out = outputs[0]; for i in range(out.shape[0]): tmp = out[i].asnumpy() cv2.imwrite("tmp/test" + str(i) + ".jpg", tmp*255) weights = self.system_dict["local"]["model"][0].weight.data().asnumpy() for i in range(weights.shape[0]): tmp = weights[i] tmp = np.swapaxes(tmp, 0, 1) tmp = np.swapaxes(tmp, 1, 2) cv2.imwrite("tmp/test_filter" + str(i) + ".jpg", tmp*255) ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, None, stride=None, post_trace=False) @error_checks(None, None, stride=["gt", 0], post_trace=False) @accepts("self", str, stride=[int, tuple], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def study_convolution_stride(self, img_name, stride=4): """ Visualize the effect of a parameter in convolution - KErnel size Args: img_name (str): Path to test images kernel_size(int): Convolution kernel shape Returns: None """ network = []; network.append(self.convolution(output_channels=4, kernel_size=5, stride=stride)); self.Compile_Network(network, data_shape=(3, 224, 224), use_gpu=False, network_initializer="xavier_normal", debug=False); self.system_dict["local"]["transforms_test"] = []; self.system_dict["local"]["transforms_test"].append(transforms.Resize(size=(224, 224))); self.system_dict["local"]["transforms_test"].append(transforms.ToTensor()); self.system_dict["local"]["data_transforms"]["test"] = transforms.Compose(self.system_dict["local"]["transforms_test"]); img = image.imread(img_name) img = self.system_dict["local"]["data_transforms"]["test"](img).expand_dims(axis=0); img = img.copyto(self.system_dict["local"]["ctx"][0]); print("Input shape - {}".format(img.shape)) outputs = self.system_dict["local"]["model"].forward(img); print("Output shape - {}".format(outputs.shape)) if(os.path.isdir("tmp")): os.system("rm -r tmp"); os.system("mkdir tmp"); else: os.system("mkdir tmp"); out = outputs[0]; for i in range(out.shape[0]): tmp = out[i].asnumpy() cv2.imwrite("tmp/test" + str(i) + ".jpg", tmp*255) weights = self.system_dict["local"]["model"][0].weight.data().asnumpy() for i in range(weights.shape[0]): tmp = weights[i] tmp = np.swapaxes(tmp, 0, 1) tmp = np.swapaxes(tmp, 1, 2) cv2.imwrite("tmp/test_filter" + str(i) + ".jpg", tmp*255) ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, None, padding=None, post_trace=False) @error_checks(None, None, padding=["gte", 0], post_trace=False) @accepts("self", str, padding=[int, tuple], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def study_convolution_padding(self, img_name, padding=0): """ Visualize the effect of a parameter in convolution - KErnel size Args: img_name (str): Path to test images kernel_size(int): Convolution kernel shape Returns: None """ network = []; network.append(self.convolution(output_channels=4, kernel_size=5, stride=1, padding=padding)); self.Compile_Network(network, data_shape=(3, 224, 224), use_gpu=False, network_initializer="xavier_normal", debug=False); self.system_dict["local"]["transforms_test"] = []; self.system_dict["local"]["transforms_test"].append(transforms.Resize(size=(224, 224))); self.system_dict["local"]["transforms_test"].append(transforms.ToTensor()); self.system_dict["local"]["data_transforms"]["test"] = transforms.Compose(self.system_dict["local"]["transforms_test"]); img = image.imread(img_name) img = self.system_dict["local"]["data_transforms"]["test"](img).expand_dims(axis=0); img = img.copyto(self.system_dict["local"]["ctx"][0]); print("Input shape - {}".format(img.shape)) outputs = self.system_dict["local"]["model"].forward(img); print("Output shape - {}".format(outputs.shape)) if(os.path.isdir("tmp")): os.system("rm -r tmp"); os.system("mkdir tmp"); else: os.system("mkdir tmp"); out = outputs[0]; for i in range(out.shape[0]): tmp = out[i].asnumpy() cv2.imwrite("tmp/test" + str(i) + ".jpg", tmp*255) weights = self.system_dict["local"]["model"][0].weight.data().asnumpy() for i in range(weights.shape[0]): tmp = weights[i] tmp = np.swapaxes(tmp, 0, 1) tmp = np.swapaxes(tmp, 1, 2) cv2.imwrite("tmp/test_filter" + str(i) + ".jpg", tmp*255) ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, None, groups=None, post_trace=False) @error_checks(None, None, groups=["gt", 0], post_trace=False) @accepts("self", str, groups=int, post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def study_convolution_grouping(self, img_name, groups=1): """ Visualize the effect of a parameter in convolution - KErnel size Args: img_name (str): Path to test images kernel_size(int): Convolution kernel shape Returns: None """ network = []; network.append(self.convolution(output_channels=4*groups, kernel_size=5, stride=1, groups=1)); network.append(self.convolution(output_channels=groups*4, kernel_size=5, stride=1, groups=groups)); self.Compile_Network(network, data_shape=(3, 224, 224), use_gpu=False, network_initializer="xavier_normal", debug=False); self.system_dict["local"]["transforms_test"] = []; self.system_dict["local"]["transforms_test"].append(transforms.Resize(size=(224, 224))); self.system_dict["local"]["transforms_test"].append(transforms.ToTensor()); self.system_dict["local"]["data_transforms"]["test"] = transforms.Compose(self.system_dict["local"]["transforms_test"]); img = image.imread(img_name) img = self.system_dict["local"]["data_transforms"]["test"](img).expand_dims(axis=0); img = img.copyto(self.system_dict["local"]["ctx"][0]); print("Input shape - {}".format(img.shape)) outputs = self.system_dict["local"]["model"].forward(img); print("Output shape - {}".format(outputs.shape)) if(os.path.isdir("tmp")): os.system("rm -r tmp"); os.system("mkdir tmp"); else: os.system("mkdir tmp"); out = outputs[0]; for i in range(out.shape[0]): tmp = out[i].asnumpy() cv2.imwrite("tmp/test" + str(i) + ".jpg", tmp*255) weights = self.system_dict["local"]["model"][0].weight.data().asnumpy() for i in range(weights.shape[0]): tmp = weights[i] tmp = np.swapaxes(tmp, 0, 1) tmp = np.swapaxes(tmp, 1, 2) cv2.imwrite("tmp/test_filter" + str(i) + ".jpg", tmp*255) ##################################################################################################################################### ##################################################################################################################################### @warning_checks(None, None, factor=None, post_trace=False) @error_checks(None, None, factor=["gt", 0], post_trace=False) @accepts("self", str, factor=int, post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def study_convolution_dilation(self, img_name, factor=0): """ Visualize the effect of a parameter in convolution - KErnel size Args: img_name (str): Path to test images kernel_size(int): Convolution kernel shape Returns: None """ network = []; network.append(self.convolution(output_channels=4, kernel_size=5, stride=1, dilation=factor)); self.Compile_Network(network, data_shape=(3, 224, 224), use_gpu=False, network_initializer="xavier_normal", debug=False); self.system_dict["local"]["transforms_test"] = []; self.system_dict["local"]["transforms_test"].append(transforms.Resize(size=(224, 224))); self.system_dict["local"]["transforms_test"].append(transforms.ToTensor()); self.system_dict["local"]["data_transforms"]["test"] = transforms.Compose(self.system_dict["local"]["transforms_test"]); img = image.imread(img_name) img = self.system_dict["local"]["data_transforms"]["test"](img).expand_dims(axis=0); img = img.copyto(self.system_dict["local"]["ctx"][0]); print("Input shape - {}".format(img.shape)) outputs = self.system_dict["local"]["model"].forward(img); print("Output shape - {}".format(outputs.shape)) if(os.path.isdir("tmp")): os.system("rm -r tmp"); os.system("mkdir tmp"); else: os.system("mkdir tmp"); out = outputs[0]; for i in range(out.shape[0]): tmp = out[i].asnumpy() cv2.imwrite("tmp/test" + str(i) + ".jpg", tmp*255) weights = self.system_dict["local"]["model"][0].weight.data().asnumpy() for i in range(weights.shape[0]): tmp = weights[i] tmp = np.swapaxes(tmp, 0, 1) tmp = np.swapaxes(tmp, 1, 2) cv2.imwrite("tmp/test_filter" + str(i) + ".jpg", tmp*255) #####################################################################################################################################
PypiClean
/msgraph_beta_sdk-1.0.0a9-py3-none-any.whl/msgraph/generated/users/item/calendar/events/item/extensions/count/count_request_builder.py
from __future__ import annotations from dataclasses import dataclass from kiota_abstractions.get_path_parameters import get_path_parameters from kiota_abstractions.method import Method from kiota_abstractions.request_adapter import RequestAdapter from kiota_abstractions.request_information import RequestInformation from kiota_abstractions.request_option import RequestOption from kiota_abstractions.response_handler import ResponseHandler from kiota_abstractions.serialization import Parsable, ParsableFactory from typing import Any, Callable, Dict, List, Optional, TYPE_CHECKING, Union if TYPE_CHECKING: from ........models.o_data_errors import o_data_error class CountRequestBuilder(): """ Provides operations to count the resources in the collection. """ def __init__(self,request_adapter: RequestAdapter, path_parameters: Optional[Union[Dict[str, Any], str]] = None) -> None: """ Instantiates a new CountRequestBuilder and sets the default values. Args: pathParameters: The raw url or the Url template parameters for the request. requestAdapter: The request adapter to use to execute the requests. """ if path_parameters is None: raise Exception("path_parameters cannot be undefined") if request_adapter is None: raise Exception("request_adapter cannot be undefined") # Url template to use to build the URL for the current request builder self.url_template: str = "{+baseurl}/users/{user%2Did}/calendar/events/{event%2Did}/extensions/$count{?%24filter}" url_tpl_params = get_path_parameters(path_parameters) self.path_parameters = url_tpl_params self.request_adapter = request_adapter async def get(self,request_configuration: Optional[CountRequestBuilderGetRequestConfiguration] = None) -> Optional[int]: """ Get the number of the resource Args: requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options. Returns: Optional[int] """ request_info = self.to_get_request_information( request_configuration ) from ........models.o_data_errors import o_data_error error_mapping: Dict[str, ParsableFactory] = { "4XX": o_data_error.ODataError, "5XX": o_data_error.ODataError, } if not self.request_adapter: raise Exception("Http core is null") return await self.request_adapter.send_primitive_async(request_info, "int", error_mapping) def to_get_request_information(self,request_configuration: Optional[CountRequestBuilderGetRequestConfiguration] = None) -> RequestInformation: """ Get the number of the resource Args: requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options. Returns: RequestInformation """ request_info = RequestInformation() request_info.url_template = self.url_template request_info.path_parameters = self.path_parameters request_info.http_method = Method.GET request_info.headers["Accept"] = ["text/plain"] if request_configuration: request_info.add_request_headers(request_configuration.headers) request_info.set_query_string_parameters_from_raw_object(request_configuration.query_parameters) request_info.add_request_options(request_configuration.options) return request_info @dataclass class CountRequestBuilderGetQueryParameters(): """ Get the number of the resource """ def get_query_parameter(self,original_name: Optional[str] = None) -> str: """ Maps the query parameters names to their encoded names for the URI template parsing. Args: originalName: The original query parameter name in the class. Returns: str """ if original_name is None: raise Exception("original_name cannot be undefined") if original_name == "filter": return "%24filter" return original_name # Filter items by property values filter: Optional[str] = None @dataclass class CountRequestBuilderGetRequestConfiguration(): """ Configuration for the request such as headers, query parameters, and middleware options. """ # Request headers headers: Optional[Dict[str, Union[str, List[str]]]] = None # Request options options: Optional[List[RequestOption]] = None # Request query parameters query_parameters: Optional[CountRequestBuilder.CountRequestBuilderGetQueryParameters] = None
PypiClean
/BioTEMPy-2.1.0a0-py3-none-any.whl/TEMPy/cli/arg_parser.py
import argparse import os import urllib import tempfile from TEMPy.protein.structure_parser import PDBParser, mmCIFParser from TEMPy.maps.map_parser import MapParser class Model: NUM_MODELS = 0 DEFAULT_COLOURS = ["#3498db", "#28b463", "#f1c40f"] def __init__(self, data): self._data = data self._name = None self._color = None self._id = Model.NUM_MODELS Model.NUM_MODELS += 1 def get_data(self): return self._data def get_name(self): if self._name is not None: return self._name return os.path.basename(self._data.filename).split(".")[0] def get_color(self): if self._color is not None: return self._color return Model.DEFAULT_COLOURS[self._id] def set_name(self, name): self._name = name def set_color(self, color): self._color = color def parse_color(color_str): import string if len(color_str) > 6: if color_str[0] != "#": raise argparse.ArgumentTypeError( "Expected color to be of in hexdecimal format. E.g. #12ab34 or 12ab34" ) color_str = color_str[1:] if len(color_str) != 6: raise argparse.ArgumentTypeError( "Expected color to have 6 hexadecimal digits. E.g. #12ab34 or 12ab34" ) if not all(c in string.hexdigits for c in color_str): raise argparse.ArgumentTypeError( "Expected color to have 6 hexadecimal digits. E.g. #12ab34 or 12ab34" ) return color_str def parse_plot_unmodelled(action): action = action.lower() if action in ("band", "dash", "blank"): return action else: raise argparse.ArgumentTypeError("Expected one of: 'band', 'dash', 'blank'") def parse_format(format): format = format.lower() if format in ("tsv", "csv", "json", "pdf", "png"): return format raise argparse.ArgumentTypeError( "Expected format to be one or more of: tsv, csv, json, pdf or png" ) def parse_plot_normalize(format): format = format.lower() if format in ("zscore"): return format raise argparse.ArgumentTypeError("Expected one of 'zscore'") def parse_plot_type(format): format = format.lower() if format in ("residue", "violin"): return format raise argparse.ArgumentTypeError( "Expected format to be one or more of: residue, violin" ) def parse_plot_residue_range(residue_range): try: chain, residue_range = residue_range.split(":") except Exception: chain = None try: start, end = residue_range.split("-") start_int = int(start) end_int = int(end) return [chain, start_int, end_int] except Exception: raise argparse.ArgumentTypeError( "Expected residue range to be in the format START-END. E.g. 42-69" ) def parse_plot_span(span): try: chain, residue_range, color = span.split(":") except Exception: chain = None try: start, end = residue_range.split("-") start_int = int(start) end_int = int(end) color = parse_color(color) return [chain, start_int, end_int, color] except argparse.ArgumentTypeError as e: raise e except Exception as e: print(e) raise argparse.ArgumentTypeError( "Expected residue range to be in the format START-END:COLOR. E.g. 42-69:ff00ff" ) def _is_rcsb_link(path): return path.startswith("rcsb:") def _is_emdb_link(path): return path.startswith("emdb:") def _get_tempfile(filename): temp_dir = tempfile.gettempdir() return f"/{temp_dir}/{filename}" def _read_from_rcsb(path): parts = path.split(":") if len(parts) == 2: _, pdb_id = parts local_file = _get_tempfile(f"rcsb_{pdb_id}.cif") if not os.path.isfile(local_file): url = f"http://www.rcsb.org/pdb/files/{pdb_id}.cif" print(f"Downloading {pdb_id} from RCSB") urllib.request.urlretrieve(url, filename=local_file) elif len(parts) == 3: _, pdb_id, version = parts local_file = _get_tempfile(f"rcsb_{pdb_id}_v{version}.cif.gz") if not os.path.isfile(local_file): print(f"Downloading {pdb_id} v{version} from RCSB") # RCSB publishes latest minor revision only. We just increment # until we find the file. We could be clever and FTP query later. max_minor = 10 found_minor = True for minor in range(max_minor): url = f"https://ftp-versioned.rcsb.org/pdb_versioned/data/entries/{pdb_id[1:3]}/pdb_0000{pdb_id}/pdb_0000{pdb_id}_xyz_v{version}-{minor}.cif.gz" try: urllib.request.urlretrieve(url, filename=local_file) found_minor = True break except Exception: continue if not found_minor: raise Exception( f"Couldn't find major version {version} of {pdb_id} on RCSB server" ) return mmCIFParser.read_mmCIF_file(local_file) def _read_from_emdb(path): emdb_id = path[5:] local_file = _get_tempfile(f"emdb_{emdb_id}.mrc.gz") if not os.path.isfile(local_file): url = f"https://ftp.ebi.ac.uk/pub/databases/emdb/structures/EMD-{emdb_id}/map/emd_{emdb_id}.map.gz" print(f"Downloading {emdb_id} from EMDB") urllib.request.urlretrieve(url, filename=local_file) return MapParser.readMRC(local_file) def _read_half_map_from_emdb(path, half): emdb_id = path[5:] local_file = _get_tempfile(f"emdb_{emdb_id}_{half}.mrc.gz") if not os.path.isfile(local_file): url = f"https://ftp.ebi.ac.uk/pub/databases/emdb/structures/EMD-{emdb_id}/other/emd_{emdb_id}_half_map_{half}.map.gz" print(f"Downloading half_map {emdb_id}_{half} from EMDB") urllib.request.urlretrieve(url, filename=local_file) return MapParser.readMRC(local_file) def parse_model(path): """Given a path or PDB accession tries to return a map. Args: path (str): A filename or PDB accession in the form of rcsb:1ake Returns: Map: Raise: argparse.ArgumentTypeError """ if _is_rcsb_link(path): try: return _read_from_rcsb(path) except Exception as e: raise argparse.ArgumentTypeError(e) try: return PDBParser().read_PDB_file("test", path) except Exception: pass try: return mmCIFParser(path).read_mmCIF_file(path) except Exception: pass raise argparse.ArgumentTypeError(f"Failed to read model file: {path}") def parse_map(path): """Given a path or EMDB accession tries to return a map. Args: path (str): A filename or EMDB accession in the form of emdb:1234 Returns: Map: Raise: argparse.ArgumentTypeError """ if _is_emdb_link(path): try: return _read_from_emdb(path) except Exception: raise argparse.ArgumentTypeError(f"Failed to get map with id: {path[5:]}") try: return MapParser.readMRC(path) except Exception: raise argparse.ArgumentTypeError(f"Failed to read map file: {path}") def _parse_half_map(path, half): if _is_emdb_link(path): try: return _read_half_map_from_emdb(path, half) except Exception: raise argparse.ArgumentTypeError(f"Failed to get map with id: {path[5:]}") try: return MapParser.readMRC(path) except Exception: raise argparse.ArgumentTypeError(f"Failed to read map file: {path}") def parse_half_map_1(path): return _parse_half_map(path, 1) def parse_half_map_2(path): return _parse_half_map(path, 2) class TEMPyArgParser: def __init__(self, script_name): self.parser = argparse.ArgumentParser(script_name) self.default_group = self.parser.add_argument_group("standard TEMPy arguments") self.args = None def parse_args(self): self.args = self.parser.parse_args() self.check_one_range_per_chain() return self.args def check_one_range_per_chain(self): seen = set() try: if self.args.plot_residue_range: for chain, start, end in self.args.plot_residue_range: if chain in seen: self.parser.error( f"Only one range per chain can be specified. Chain {chain} has more than one range specified." ) seen.add(chain) except: pass def get_models(self): if len(self.args.model_colors) > 0: if len(self.args.model_colors) != len(self.args.model): self.parser.error( f"The number of models {len(self.args.model)} and colors {len(self.args.model_colors)} do not match" ) if len(self.args.model_names) > 0: if len(self.args.model_names) != len(self.args.model): self.parser.error( f"The number of models {len(self.args.model)} and names {len(self.args.model_names)} do not match" ) models = [Model(m) for m in self.args.model] for model, color in zip(models, self.args.model_colors): model.set_color(color) for model, name in zip(models, self.args.model_names): model.set_name(name) return models def add_model_arg(self, multiple=False): self.model_group = self.parser.add_argument_group( "model", "Set additional model information" ) if multiple: self.model_group.add_argument( "-p", "--models", help="A model file in PDB, CIF or mmCIF formats. Alternatively, the accession number eg. rcsb:1234", dest="model", required=True, nargs="+", type=parse_model, ) else: self.model_group.add_argument( "-p", "--models", help="A model file in PDB, CIF or mmCIF formats. Alternatively, the accession number eg. rcsb:1234", dest="model", required=True, type=parse_model, ) self.model_group.add_argument( "--model-colors", help="Color of the models for plotting", dest="model_colors", default=[], nargs="+", type=parse_color, ) self.model_group.add_argument( "--model-names", help="Names of the models for plotting", dest="model_names", default=[], nargs="+", ) def add_map_arg(self): self.default_group.add_argument( "-m", "--map", help="A EM file in MRC format. Alternatively, the accession number eg. emdb:1234", dest="map", required=True, type=parse_map, ) def add_half_map_args(self, required=True): self.default_group.add_argument( "-hm1", "--half-map-1", help="A EM half map file in MRC format. Alternatively, the accession number eg. emdb:1234", dest="hmap1", required=required, type=parse_half_map_1, ) self.default_group.add_argument( "-hm2", "--half-map-2", help="A EM half map file in MRC format. Alternatively, the accession number eg. emdb:1234", dest="hmap2", required=required, type=parse_half_map_2, ) def add_resolution_arg(self): self.default_group.add_argument( "-r", "--resolution", dest="resolution", help="Estimated resolution of EM map", required=True, type=float, ) def add_residue_output_writer(self): output_group = self.parser.add_argument_group("output") output_group.add_argument( "--output-format", dest="output_formats", help="Output format: CSV, TSV, JSON, PDF, PNG", nargs="+", required=True, default=["tsv"], type=parse_format, ) output_group.add_argument( "--output-prefix", dest="output_prefix", help="Use a custom prefix for output files", required=False, default=None, type=str, ) plot_group = self.parser.add_argument_group("plot") plot_group.add_argument( "--plot-type", dest="plot_types", help="Plot type: residue, violin", nargs="+", default=["residue"], type=parse_plot_type, ) plot_group.add_argument( "--plot-residue-range", dest="plot_residue_range", help="Plot only the specified range", default=None, nargs="*", type=parse_plot_residue_range, ) plot_group.add_argument( "--plot-normalize", dest="plot_normalize", help="Normalize the plot", default=None, type=parse_plot_normalize, ) plot_group.add_argument( "--plot-bands", dest="plot_bands", help="Draw colored bands around ranges of residues. E.g. A:10-20:00ff00 A:70-110:ff0000", default=None, nargs="*", type=parse_plot_span, ) plot_group.add_argument( "--plot-unmodelled", dest="plot_unmodelled", help="Draw colored bands around ranges of residues. E.g. A:10-20:00ff00 A:70-110:ff0000", default="blank", type=parse_plot_unmodelled, )
PypiClean
/mpp-solar-0.7.76.tar.gz/mpp-solar-0.7.76/mppsolar/protocols/jk02.py
import logging from .jkabstractprotocol import jkAbstractProtocol log = logging.getLogger("jk02") NEW_COMMANDS = { "getCellData": { "name": "getCellData", "command_code": "96", "record_type": "2", "description": "BLE Cell Data inquiry", "help": " -- queries the ble device for the cell data", "type": "QUERY", "response_type": "POSITIONAL", "response": [ ["Hex2Str", 4, "Header", ""], ["Hex2Str", 1, "Record_Type", ""], ["Hex2Int", 1, "Record_Counter", ""], ["LittleHex2Short:r/1000", 2, "Voltage_Cell01", "V"], ["LittleHex2Short:r/1000", 2, "Voltage_Cell02", "V"], ["LittleHex2Short:r/1000", 2, "Voltage_Cell03", "V"], ["LittleHex2Short:r/1000", 2, "Voltage_Cell04", "V"], ["LittleHex2Short:r/1000", 2, "Voltage_Cell05", "V"], ["LittleHex2Short:r/1000", 2, "Voltage_Cell06", "V"], ["LittleHex2Short:r/1000", 2, "Voltage_Cell07", "V"], ["LittleHex2Short:r/1000", 2, "Voltage_Cell08", "V"], ["LittleHex2Short:r/1000", 2, "Voltage_Cell09", "V"], ["LittleHex2Short:r/1000", 2, "Voltage_Cell10", "V"], ["LittleHex2Short:r/1000", 2, "Voltage_Cell11", "V"], ["LittleHex2Short:r/1000", 2, "Voltage_Cell12", "V"], ["LittleHex2Short:r/1000", 2, "Voltage_Cell13", "V"], ["LittleHex2Short:r/1000", 2, "Voltage_Cell14", "V"], ["LittleHex2Short:r/1000", 2, "Voltage_Cell15", "V"], ["LittleHex2Short:r/1000", 2, "Voltage_Cell16", "V"], ["LittleHex2Short:r/1000", 2, "Voltage_Cell17", "V"], ["LittleHex2Short:r/1000", 2, "Voltage_Cell18", "V"], ["LittleHex2Short:r/1000", 2, "Voltage_Cell19", "V"], ["LittleHex2Short:r/1000", 2, "Voltage_Cell20", "V"], ["LittleHex2Short:r/1000", 2, "Voltage_Cell21", "V"], ["LittleHex2Short:r/1000", 2, "Voltage_Cell22", "V"], ["LittleHex2Short:r/1000", 2, "Voltage_Cell23", "V"], ["LittleHex2Short:r/1000", 2, "Voltage_Cell24", "V"], ["discard", 4, "discard1", ""], ["LittleHex2Short:r/1000", 2, "Average_Cell_Voltage", "V"], ["LittleHex2Short:r/1000", 2, "Delta_Cell_Voltage", "V"], ["LittleHex2Short:r/1000", 2, "Current_Balancer", ""], ["LittleHex2Short:r/1000", 2, "Resistance_Cell01", "Ohm"], ["LittleHex2Short:r/1000", 2, "Resistance_Cell02", "Ohm"], ["LittleHex2Short:r/1000", 2, "Resistance_Cell03", "Ohm"], ["LittleHex2Short:r/1000", 2, "Resistance_Cell04", "Ohm"], ["LittleHex2Short:r/1000", 2, "Resistance_Cell05", "Ohm"], ["LittleHex2Short:r/1000", 2, "Resistance_Cell06", "Ohm"], ["LittleHex2Short:r/1000", 2, "Resistance_Cell07", "Ohm"], ["LittleHex2Short:r/1000", 2, "Resistance_Cell08", "Ohm"], ["LittleHex2Short:r/1000", 2, "Resistance_Cell09", "Ohm"], ["LittleHex2Short:r/1000", 2, "Resistance_Cell10", "Ohm"], ["LittleHex2Short:r/1000", 2, "Resistance_Cell11", "Ohm"], ["LittleHex2Short:r/1000", 2, "Resistance_Cell12", "Ohm"], ["LittleHex2Short:r/1000", 2, "Resistance_Cell13", "Ohm"], ["LittleHex2Short:r/1000", 2, "Resistance_Cell14", "Ohm"], ["LittleHex2Short:r/1000", 2, "Resistance_Cell15", "Ohm"], ["LittleHex2Short:r/1000", 2, "Resistance_Cell16", "Ohm"], ["LittleHex2Short:r/1000", 2, "Resistance_Cell17", "Ohm"], ["LittleHex2Short:r/1000", 2, "Resistance_Cell18", "Ohm"], ["LittleHex2Short:r/1000", 2, "Resistance_Cell19", "Ohm"], ["LittleHex2Short:r/1000", 2, "Resistance_Cell20", "Ohm"], ["LittleHex2Short:r/1000", 2, "Resistance_Cell21", "Ohm"], ["LittleHex2Short:r/1000", 2, "Resistance_Cell22", "Ohm"], ["LittleHex2Short:r/1000", 2, "Resistance_Cell23", "Ohm"], ["LittleHex2Short:r/1000", 2, "Resistance_Cell24", "Ohm"], ["discard", 6, "discard2", ""], ["LittleHex2UInt:r/1000", 4, "Battery_Voltage", "V"], ["LittleHex2Float", 4, "Battery_Power", "W"], ["LittleHex2Float", 4, "Charge_Current", "A"], # signed int32 # ["discard", 8, "discard3", ""], ["LittleHex2Short:r/10", 2, "Battery_T1", "°C"], ["LittleHex2Short:r/10", 2, "Battery_T2", "°C"], ["LittleHex2Short:r/10", 2, "MOS_Temp", "°C"], ["discard", 4, "discard4", ""], # discard4 ["discard", 1, "discard4_1", ""], # added ["Hex2Int", 1, "Percent_Remain", ""], ["LittleHex2UInt:r/1000", 4, "Capacity_Remain", ""], # Unknown6+7 ["LittleHex2UInt:r/1000", 4, "Nominal_Capacity", ""], # Unknown8+9 ["Hex2Str", 4, "Cycle_Count", ""], # ["discard", 2, "Unknown10", ""], # ["discard", 2, "Unknown11", ""], ["LittleHex2UInt:r/1000", 4, "Capacity_Cycle", ""], # Unknown10+11 ["discard", 2, "Unknown12", ""], ["discard", 2, "Unknown13", ""], ["uptime", 3, "Time", ""], ["discard", 2, "Unknown15", ""], ["discard", 2, "Unknown16", ""], ["discard", 2, "Unknown17", ""], ["discard", 12, "discard6", ""], ["discard", 2, "Unknown18", ""], ["discard", 2, "Unknown19", ""], ["discard", 2, "Unknown20", ""], ["LittleHex2Short:r/1000", 2, "Current_Charge", ""], # Unknown21 ["LittleHex2Short:r/1000", 2, "Current_Discharge", ""], # Unknown22 ["discard", 2, "Unknown23", ""], ["discard", 2, "Unknown24", ""], ["discard", 2, "Unknown25", ""], ["discard", 2, "Unknown26", ""], ["discard", 2, "Unknown27", ""], ["discard", 2, "Unknown28", ""], ["discard", 2, "Unknown29", ""], ["discard", 93, "Unknown30", ""], ], "test_responses": [ bytes.fromhex( "55aaeb9002b52e0d280dfa0c2e0d2f0d220d220d130d190d1d0d1d0d170d1f0d160dfb0c1f0d00000000000000000000000000000000ffff00001c0d350004029b00c600a000b300bc00cc00be00b100b4002d013d01b000a100ab00b200ad0000000000000000000000000000000000000000000000bcd1000000000000000000001e0116013c010000000000636b0c0300400d030000000000dc4d010064000000781e16000101480a000000000000000000000000070101000000980400000000260141400000000037feffff00000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080" ), bytes.fromhex( "55aaeb9002bb2e0d280dfa0c2e0d2f0d220d220d130d190d1d0d1d0d170d1f0d160dfb0c1f0d00000000000000000000000000000000ffff00001b0d350004029b00c600a000b300bc00cc00be00b100b4002d013d01b000a100ab00b200ad0000000000000000000000000000000000000000000000b8d1000000000000000000001e0114013c010000000000636b0c0300400d030000000000dc4d0100640000007a1e16000101480a000000000000000000000000070101000000980400000000260141400000000037feffff00000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000081" ), bytes.fromhex( "55 AA EB 90 02 10 AD 0E 52 0E 53 0E 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 07 00 00 00 70 0E 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 51 2B 00 00 00 00 00 00 00 00 00 00 30 F8 30 F8 53 01 00 00 0C 01 00 00 C2 14 00 00 70 17 00 00 00 00 00 00 8F 01 00 00 00 00 51 07 AF 69 00 00 00 00 CB 06 00 00 00 00 00 00 00 00 00 00 00 00 01 00 00 00 00 00 11 04 00 00 00 00 92 4A 3B 40 00 00 00 00 AD 08 00 00 00 00 00 01 00 01 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 0C" ), b"U\xaa\xeb\x90\x02\xa7\xd8\x0e\xd8\x0e\xd7\x0e\xd8\x0e\xd8\x0e\xda\x0e\xd7\x0e\xda\x0e\xd7\x0e\xd8\x0e\xd8\x0e\xd8\x0e\xc3\x0e\xda\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff?\x00\x00\xd7\x0e\x19\x00\x0c\x02m\x00n\x00o\x00n\x00o\x00m\x00p\x00l\x00l\x00l\x00l\x00l\x00o\x00l\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc1\xcf\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf5\x00\xda\x00\x05\x01\x00\x00\x02\xf9\x02EN!\x00\x00\xc7\x00\x00\x03\x00\x00\x00\x87\x91\x00\x00W\x00)\x03\x16\x11\x04\x00\x01\x01\xa8\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07\x00\x01\x00\x00\x00\x11\x04\x00\x00\x00\x00\xee(@@\x7f\x00\x00\x00i\xfd\xff\xff\x00\x00\x00\x01\x00\x01\x00\x00t\xa3(\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00F", ], }, } class jk02(jkAbstractProtocol): """ JK02 - Handler for JKBMS 2 byte data communication - e.g. ASAS = ??V """ def __init__(self, *args, **kwargs) -> None: super().__init__() self._protocol_id = b"JK02" self.COMMANDS.update(NEW_COMMANDS) self.STATUS_COMMANDS = [ "getCellData", ] self.SETTINGS_COMMANDS = [ "getInfo", ] self.DEFAULT_COMMAND = "getCellData"
PypiClean
/remita_rits-1.0.6-py3-none-any.whl/GetAccountEnquiry/AccountEnquiry.py
from datetime import datetime import requests from requests import ConnectTimeout, ReadTimeout from RemitaInterbankService.BaseResponse import BaseResponse from RemitaInterbankService.EncryptionUtil import EncryptionConfig from RemitaInterbankService.EnvironmentConfig import EnvironmentConfig from RemitaInterbankService.SdkResponseCode import SdkResponseCode from RemitaInterbankService.Timestamp import Timestamp class AccountEnquiry(object): def account_enquiry(self, account_enquiry_payload, credentials): try: get_response = EnvironmentConfig() if not get_response.credential_available(credentials): return get_response.throw_exception(status=get_response.empty_credential_code, data=get_response.empty_credential_msg) else: rpg_environment = EnvironmentConfig.set_rpg_environment(credentials) headers = self.set_header(account_enquiry_payload, credentials) url = rpg_environment['ACCOUNT_ENQUIRY_URL'] if not credentials.connection_timeout: credentials.connection_timeout = 30000 config = EncryptionConfig() payload = { 'accountNo': config.AES128(credentials.enc_key, account_enquiry_payload.account_number,credentials.enc_vector), 'bankCode': config.AES128(credentials.enc_key, account_enquiry_payload.bank_code, credentials.enc_vector) } try: response = requests.post(url, headers=headers, json=payload, timeout=credentials.connection_timeout) account_enquiry_response = BaseResponse(response.content) except ConnectTimeout: return get_response.throw_exception(status=SdkResponseCode.CONNECTION_TIMEOUT_CODE, data=SdkResponseCode.CONNECTION_TIMEOUT) except ValueError: return get_response.throw_exception(status=SdkResponseCode.ERROR_IN_VALUE_CODE, data=SdkResponseCode.ERROR_IN_VALUE) except ReadTimeout: return get_response.throw_exception(status=SdkResponseCode.CONNECTION_TIMEOUT_CODE, data=SdkResponseCode.CONNECTION_TIMEOUT) except ConnectionError as e: # This is the correct syntax return get_response.throw_exception(status=SdkResponseCode.ERROR_WHILE_CONNECTING_CODE, data=SdkResponseCode.ERROR_WHILE_CONNECTING) return account_enquiry_response except Exception: return get_response.throw_exception(status=SdkResponseCode.ERROR_PROCESSING_REQUEST_CODE, data=SdkResponseCode.ERROR_PROCESSING_REQUEST) def set_header(self, account_enquiry_payload, credentials): hash_string = credentials.api_key + account_enquiry_payload.request_id + credentials.api_token txn_hash = EncryptionConfig.sha512(hash_string) time_stamp = Timestamp() headers = {'Content-Type': 'application/json', 'MERCHANT_ID':credentials.merchant_id, 'API_KEY':credentials.api_key, 'REQUEST_ID':account_enquiry_payload.request_id, 'REQUEST_TS':time_stamp.dateTimeObj(dateTimeObj=datetime.now()), 'API_DETAILS_HASH': txn_hash} return headers
PypiClean
/mailman_pgp-0.3.0.tar.gz/mailman_pgp-0.3.0/src/mailman_pgp/mta/deliver.py
"""""" import logging import time from mailman.config import config from mailman.interfaces.mailinglist import Personalization from mailman.interfaces.mta import SomeRecipientsFailed from mailman.mta.bulk import BulkDelivery from mailman.mta.deliver import Deliver from mailman.utilities.string import expand from public import public from mailman_pgp.model.list import PGPMailingList from mailman_pgp.mta.bulk import PGPBulkDelivery from mailman_pgp.mta.personalized import PGPPersonalizedDelivery COMMA = ',' log = logging.getLogger('mailman.smtp') @public def deliver(mlist, msg, msgdata): """Deliver a message to the outgoing mail server.""" # If there are no recipients, there's nothing to do. recipients = msgdata.get('recipients') if not recipients: # Could be None, could be an empty sequence. return # Which delivery agent should we use? Several situations can cause us to # use individual delivery. If not specified, use bulk delivery. See the # to-outgoing handler for when the 'verp' key is set in the metadata. personalized_agent = Deliver bulk_agent = BulkDelivery pgp_list = PGPMailingList.for_list(mlist) if pgp_list and msgdata.get('pgp_is_posting', False): personalized_agent = PGPPersonalizedDelivery bulk_agent = PGPBulkDelivery if msgdata.get('verp', False): agent = personalized_agent() elif mlist.personalize != Personalization.none: agent = personalized_agent() else: agent = bulk_agent(int(config.mta.max_recipients)) log.debug('Using agent: %s', agent) # Keep track of the original recipients and the original sender for # logging purposes. original_recipients = msgdata['recipients'] original_sender = msgdata.get('original-sender', msg.sender) # Let the agent attempt to deliver to the recipients. Record all failures # for re-delivery later. t0 = time.time() refused = agent.deliver(mlist, msg, msgdata) t1 = time.time() # Log this posting. size = getattr(msg, 'original_size', msgdata.get('original_size')) if size is None: size = len(msg.as_string()) substitutions = dict( msgid=msg.get('message-id', 'n/a'), # noqa: E221, E251 listname=mlist.fqdn_listname, # noqa: E221, E251 sender=original_sender, # noqa: E221, E251 recip=len(original_recipients), # noqa: E221, E251 size=size, # noqa: E221, E251 time=t1 - t0, # noqa: E221, E251 refused=len(refused), # noqa: E221, E251 smtpcode='n/a', # noqa: E221, E251 smtpmsg='n/a', # noqa: E221, E251 ) template = config.logging.smtp.every if template.lower() != 'no': log.info('%s', expand(template, mlist, substitutions)) if refused: template = config.logging.smtp.refused if template.lower() != 'no': log.info('%s', expand(template, mlist, substitutions)) else: # Log the successful post, but if it was not destined to the mailing # list (e.g. to the owner or admin), print the actual recipients # instead of just the number. if not msgdata.get('tolist', False): recips = msg.get_all('to', []) recips.extend(msg.get_all('cc', [])) substitutions['recips'] = COMMA.join(recips) template = config.logging.smtp.success if template.lower() != 'no': log.info('%s', expand(template, mlist, substitutions)) # Process any failed deliveries. temporary_failures = [] permanent_failures = [] for recipient, (code, smtp_message) in refused.items(): # RFC 5321, $4.5.3.1.10 says: # # RFC 821 [1] incorrectly listed the error where an SMTP server # exhausts its implementation limit on the number of RCPT commands # ("too many recipients") as having reply code 552. The correct # reply code for this condition is 452. Clients SHOULD treat a 552 # code in this case as a temporary, rather than permanent, failure # so the logic below works. # if code >= 500 and code != 552: # A permanent failure permanent_failures.append(recipient) else: # Deal with persistent transient failures by queuing them up for # future delivery. TBD: this could generate lots of log entries! temporary_failures.append(recipient) template = config.logging.smtp.failure if template.lower() != 'no': substitutions.update( recip=recipient, # noqa: E221, E251 smtpcode=code, # noqa: E221, E251 smtpmsg=smtp_message # noqa: E221, E251 ) log.info('%s', expand(template, mlist, substitutions)) # Return the results if temporary_failures or permanent_failures: raise SomeRecipientsFailed(temporary_failures, permanent_failures)
PypiClean
/gbptesthorizon-2014.2.tar.gz/gbptesthorizon-2014.2.dev67.g70d5b01/tools/install_venv_common.py
from __future__ import print_function import optparse import os import subprocess import sys class InstallVenv(object): def __init__(self, root, venv, requirements, test_requirements, py_version, project): self.root = root self.venv = venv self.requirements = requirements self.test_requirements = test_requirements self.py_version = py_version self.project = project def die(self, message, *args): print(message % args, file=sys.stderr) sys.exit(1) def check_python_version(self): if sys.version_info < (2, 6): self.die("Need Python Version >= 2.6") def run_command_with_code(self, cmd, redirect_output=True, check_exit_code=True): """Runs a command in an out-of-process shell. Returns the output of that command. Working directory is self.root. """ if redirect_output: stdout = subprocess.PIPE else: stdout = None proc = subprocess.Popen(cmd, cwd=self.root, stdout=stdout) output = proc.communicate()[0] if check_exit_code and proc.returncode != 0: self.die('Command "%s" failed.\n%s', ' '.join(cmd), output) return (output, proc.returncode) def run_command(self, cmd, redirect_output=True, check_exit_code=True): return self.run_command_with_code(cmd, redirect_output, check_exit_code)[0] def get_distro(self): if (os.path.exists('/etc/fedora-release') or os.path.exists('/etc/redhat-release')): return Fedora( self.root, self.venv, self.requirements, self.test_requirements, self.py_version, self.project) else: return Distro( self.root, self.venv, self.requirements, self.test_requirements, self.py_version, self.project) def check_dependencies(self): self.get_distro().install_virtualenv() def create_virtualenv(self, no_site_packages=True): """Creates the virtual environment and installs PIP. Creates the virtual environment and installs PIP only into the virtual environment. """ if not os.path.isdir(self.venv): print('Creating venv...', end=' ') if no_site_packages: self.run_command(['virtualenv', '-q', '--no-site-packages', self.venv]) else: self.run_command(['virtualenv', '-q', self.venv]) print('done.') else: print("venv already exists...") pass def pip_install(self, *args): self.run_command(['tools/with_venv.sh', 'pip', 'install', '--upgrade'] + list(args), redirect_output=False) def install_dependencies(self): print('Installing dependencies with pip (this can take a while)...') # First things first, make sure our venv has the latest pip and # setuptools and pbr self.pip_install('pip>=1.4') self.pip_install('setuptools') self.pip_install('pbr') self.pip_install('-r', self.requirements, '-r', self.test_requirements) def parse_args(self, argv): """Parses command-line arguments.""" parser = optparse.OptionParser() parser.add_option('-n', '--no-site-packages', action='store_true', help="Do not inherit packages from global Python " "install") return parser.parse_args(argv[1:])[0] class Distro(InstallVenv): def check_cmd(self, cmd): return bool(self.run_command(['which', cmd], check_exit_code=False).strip()) def install_virtualenv(self): if self.check_cmd('virtualenv'): return if self.check_cmd('easy_install'): print('Installing virtualenv via easy_install...', end=' ') if self.run_command(['easy_install', 'virtualenv']): print('Succeeded') return else: print('Failed') self.die('ERROR: virtualenv not found.\n\n%s development' ' requires virtualenv, please install it using your' ' favorite package management tool' % self.project) class Fedora(Distro): """This covers all Fedora-based distributions. Includes: Fedora, RHEL, CentOS, Scientific Linux """ def check_pkg(self, pkg): return self.run_command_with_code(['rpm', '-q', pkg], check_exit_code=False)[1] == 0 def install_virtualenv(self): if self.check_cmd('virtualenv'): return if not self.check_pkg('python-virtualenv'): self.die("Please install 'python-virtualenv'.") super(Fedora, self).install_virtualenv()
PypiClean
/netoprmgr-1.3.5.tar.gz/netoprmgr-1.3.5/netmiko/eltex/eltex_esr_ssh.py
import time from netmiko.cisco_base_connection import CiscoSSHConnection class EltexEsrSSH(CiscoSSHConnection): """Netmiko support for routers Eltex ESR.""" def session_preparation(self): """Prepare the session after the connection has been established.""" self.ansi_escape_codes = True self._test_channel_read() self.set_base_prompt() self.disable_paging(command="terminal datadump") # Clear the read buffer time.sleep(0.3 * self.global_delay_factor) self.clear_buffer() def config_mode(self, config_command="configure", pattern=r")#"): """Enter configuration mode.""" return super().config_mode(config_command=config_command, pattern=pattern) def check_config_mode(self, check_string="(config", pattern=""): """Checks whether in configuration mode. Returns a boolean.""" return super().check_config_mode(check_string=check_string, pattern=pattern) def save_config(self, *args, **kwargs): """Not Implemented (use commit() method)""" raise NotImplementedError def commit(self, delay_factor=1): """ Commit the candidate configuration. Commit the entered configuration. Raise an error and return the failure if the commit fails. default: command_string = commit """ delay_factor = self.select_delay_factor(delay_factor) error_marker = "Can't commit configuration" command_string = "commit" if self.check_config_mode(): self.exit_config_mode() output = self.send_command( command_string=command_string, delay_factor=delay_factor ) if error_marker in output: raise ValueError( "Commit failed with following errors:\n\n{}".format(output) ) return output def _confirm(self, delay_factor=1): """ Confirm the candidate configuration. Raise an error and return the failure if the confirm fails. """ delay_factor = self.select_delay_factor(delay_factor) error_marker = "Nothing to confirm in configuration" command_string = "confirm" if self.check_config_mode(): self.exit_config_mode() output = self.send_command( command_string=command_string, delay_factor=delay_factor ) if error_marker in output: raise ValueError( "Confirm failed with following errors:\n\n{}".format(output) ) return output def _restore(self, delay_factor=1): """ Restore the candidate configuration. Raise an error and return the failure if the restore fails. """ delay_factor = self.select_delay_factor(delay_factor) error_marker = "Can't find backup of previous configuration!" command_string = "restore" if self.check_config_mode(): self.exit_config_mode() output = self.send_command( command_string=command_string, delay_factor=delay_factor ) if error_marker in output: raise ValueError( "Restore failed with following errors:\n\n{}".format(output) ) return output
PypiClean
/py-opengauss-1.3.10.tar.gz/py-opengauss-1.3.10/py_opengauss/documentation/sphinx/copyman.rst
.. _pg_copyman: *************** Copy Management *************** The `postgresql.copyman` module provides a way to quickly move COPY data coming from one connection to many connections. Alternatively, it can be sourced by arbitrary iterators and target arbitrary callables. Statement execution methods offer a way for running COPY operations with iterators, but the cost of allocating objects for each row is too significant for transferring gigabytes of COPY data from one connection to another. The interfaces available on statement objects are primarily intended to be used when transferring COPY data to and from arbitrary Python objects. Direct connection-to-connection COPY operations can be performed using the high-level `postgresql.copyman.transfer` function:: >>> from postgresql import copyman >>> send_stmt = source.prepare("COPY (SELECT i FROM generate_series(1, 1000000) AS g(i)) TO STDOUT") >>> destination.execute("CREATE TEMP TABLE loading_table (i int8)") >>> receive_stmt = destination.prepare("COPY loading_table FROM STDIN") >>> total_rows, total_bytes = copyman.transfer(send_stmt, receive_stmt) However, if more control is needed, the `postgresql.copyman.CopyManager` class should be used directly. Copy Managers ============= The `postgresql.copyman.CopyManager` class manages the Producer and the Receivers involved in a COPY operation. Normally, `postgresql.copyman.StatementProducer` and `postgresql.copyman.StatementReceiver` instances. Naturally, a Producer is the object that produces the COPY data to be given to the Manager's Receivers. Using a Manager directly means that there is a need for more control over the operation. The Manager is both a context manager and an iterator. The context manager interfaces handle initialization and finalization of the COPY state, and the iterator provides an event loop emitting information about the amount of COPY data transferred this cycle. Normal usage takes the form:: >>> from postgresql import copyman >>> send_stmt = source.prepare("COPY (SELECT i FROM generate_series(1, 1000000) AS g(i)) TO STDOUT") >>> destination.execute("CREATE TEMP TABLE loading_table (i int8)") >>> receive_stmt = destination.prepare("COPY loading_table FROM STDIN") >>> producer = copyman.StatementProducer(send_stmt) >>> receiver = copyman.StatementReceiver(receive_stmt) >>> >>> with source.xact(), destination.xact(): ... with copyman.CopyManager(producer, receiver) as copy: ... for num_messages, num_bytes in copy: ... update_rate(num_bytes) As an alternative to a for-loop inside a with-statement block, the `run` method can be called to perform the operation:: >>> with source.xact(), destination.xact(): ... copyman.CopyManager(producer, receiver).run() However, there is little benefit beyond using the high-level `postgresql.copyman.transfer` function. Manager Interface Points ------------------------ Primarily, the `postgresql.copyman.CopyManager` provides a context manager and an iterator for controlling the COPY operation. ``CopyManager.run()`` Perform the entire COPY operation. ``CopyManager.__enter__()`` Start the COPY operation. Connections taking part in the COPY should **not** be used until ``__exit__`` is ran. ``CopyManager.__exit__(typ, val, tb)`` Finish the COPY operation. Fails in the case of an incomplete COPY, or an untrapped exception. Either returns `None` or raises the generalized exception, `postgresql.copyman.CopyFail`. ``CopyManager.__iter__()`` Returns the CopyManager instance. ``CopyManager.__next__()`` Transfer the next chunk of COPY data to the receivers. Yields a tuple consisting of the number of messages and bytes transferred, ``(num_messages, num_bytes)``. Raises `StopIteration` when complete. Raises `postgresql.copyman.ReceiverFault` when a Receiver raises an exception. Raises `postgresql.copyman.ProducerFault` when the Producer raises an exception. The original exception is available via the exception's ``__context__`` attribute. ``CopyManager.reconcile(faulted_receiver)`` Reconcile a faulted receiver. When a receiver faults, it will no longer be in the set of Receivers. This method is used to signal to the manager that the problem has been corrected, and the receiver is again ready to receive. ``CopyManager.receivers`` The `builtins.set` of Receivers involved in the COPY operation. ``CopyManager.producer`` The Producer emitting the data to be given to the Receivers. Faults ====== The CopyManager generalizes any exceptions that occur during transfer. While inside the context manager, `postgresql.copyman.Fault` may be raised if a Receiver or a Producer raises an exception. A `postgresql.copyman.ProducerFault` in the case of the Producer, and `postgresql.copyman.ReceiverFault` in the case of the Receivers. .. note:: Faults are only raised by `postgresql.copyman.CopyManager.__next__`. The ``run()`` method will only raise `postgresql.copyman.CopyFail`. Receiver Faults --------------- The Manager assumes the Fault is fatal to a Receiver, and immediately removes it from the set of target receivers. Additionally, if the Fault exception goes untrapped, the copy will ultimately fail. The Fault exception references the Manager that raised the exception, and the actual exceptions that occurred associated with the Receiver that caused them. In order to identify the exception that caused a Fault, the ``faults`` attribute on the `postgresql.copyman.ReceiverFault` must be referenced:: >>> from postgresql import copyman >>> send_stmt = source.prepare("COPY (SELECT i FROM generate_series(1, 1000000) AS g(i)) TO STDOUT") >>> destination.execute("CREATE TEMP TABLE loading_table (i int8)") >>> receive_stmt = destination.prepare("COPY loading_table FROM STDIN") >>> producer = copyman.StatementProducer(send_stmt) >>> receiver = copyman.StatementReceiver(receive_stmt) >>> >>> with source.xact(), destination.xact(): ... with copyman.CopyManager(producer, receiver) as copy: ... while copy.receivers: ... try: ... for num_messages, num_bytes in copy: ... update_rate(num_bytes) ... break ... except copyman.ReceiverFault as cf: ... # Access the original exception using the receiver as the key. ... original_exception = cf.faults[receiver] ... if unknown_failure(original_exception): ... ... ... raise ReceiverFault Properties ~~~~~~~~~~~~~~~~~~~~~~~~ The following attributes exist on `postgresql.copyman.ReceiverFault` instances: ``ReceiverFault.manager`` The subject `postgresql.copyman.CopyManager` instance. ``ReceiverFault.faults`` A dictionary mapping the Receiver to the exception raised by that Receiver. Reconciliation ~~~~~~~~~~~~~~ When a `postgresql.copyman.ReceiverFault` is raised, the Manager immediately removes the Receiver so that the COPY operation can continue. Continuation of the COPY can occur by trapping the exception and continuing the iteration of the Manager. However, if the fault is recoverable, the `postgresql.copyman.CopyManager.reconcile` method must be used to reintroduce the Receiver into the Manager's set. Faults must be trapped from within the Manager's context:: >>> import socket >>> from postgresql import copyman >>> send_stmt = source.prepare("COPY (SELECT i FROM generate_series(1, 1000000) AS g(i)) TO STDOUT") >>> destination.execute("CREATE TEMP TABLE loading_table (i int8)") >>> receive_stmt = destination.prepare("COPY loading_table FROM STDIN") >>> producer = copyman.StatementProducer(send_stmt) >>> receiver = copyman.StatementReceiver(receive_stmt) >>> >>> with source.xact(), destination.xact(): ... with copyman.CopyManager(producer, receiver) as copy: ... while copy.receivers: ... try: ... for num_messages, num_bytes in copy: ... update_rate(num_bytes) ... except copyman.ReceiverFault as cf: ... if isinstance(cf.faults[receiver], socket.timeout): ... copy.reconcile(receiver) ... else: ... raise Recovering from Faults does add significant complexity to a COPY operation, so, often, it's best to avoid conditions in which reconciliable Faults may occur. Producer Faults --------------- Producer faults are normally fatal to the COPY operation and should rarely be trapped. However, the Manager makes no state changes when a Producer faults, so, unlike Receiver Faults, no reconciliation process is necessary; rather, if it's safe to continue, the Manager's iterator should continue to be processed. ProducerFault Properties ~~~~~~~~~~~~~~~~~~~~~~~~ The following attributes exist on `postgresql.copyman.ProducerFault` instances: ``ReceiverFault.manager`` The subject `postgresql.copyman.CopyManager`. ``ReceiverFault.__context__`` The original exception raised by the Producer. Failures ======== When a COPY operation is aborted, either by an exception or by the iterator being broken, a `postgresql.copyman.CopyFail` exception will be raised by the Manager's ``__exit__()`` method. The `postgresql.copyman.CopyFail` exception offers to record any exceptions that occur during the exit of the context managers of the Producer and the Receivers. CopyFail Properties ------------------- The following properties exist on `postgresql.copyman.CopyFail` exceptions: ``CopyFail.manager`` The Manager whose COPY operation failed. ``CopyFail.receiver_faults`` A dictionary mapping a `postgresql.copyman.Receiver` to the exception raised by that Receiver's ``__exit__``. `None` if no exceptions were raised by the Receivers. ``CopyFail.producer_fault`` The exception Raised by the `postgresql.copyman.Producer`. `None` if none. Producers ========= The following Producers are available: ``postgresql.copyman.StatementProducer(postgresql.api.Statement)`` Given a Statement producing COPY data, construct a Producer. ``postgresql.copyman.IteratorProducer(collections.abc.Iterator)`` Given an Iterator producing *chunks* of COPY lines, construct a Producer to manage the data coming from the iterator. Receivers ========= ``postgresql.copyman.StatementReceiver(postgresql.api.Statement)`` Given a Statement producing COPY data, construct a Producer. ``postgresql.copyman.CallReceiver(callable)`` Given a callable, construct a Receiver that will transmit COPY data in chunks of lines. That is, the callable will be given a list of COPY lines for each transfer cycle. Terminology =========== The following terms are regularly used to describe the implementation and processes of the `postgresql.copyman` module: Manager The object used to manage data coming from a Producer and being given to the Receivers. It also manages the necessary initialization and finalization steps required by those factors. Producer The object used to produce the COPY data to be given to the Receivers. The source. Receiver An object that consumes COPY data. A target. Fault Specifically, `postgresql.copyman.Fault` exceptions. A Fault is raised when a Receiver or a Producer raises an exception during the COPY operation. Reconciliation Generally, the steps performed by the "reconcile" method on `postgresql.copyman.CopyManager` instances. More precisely, the necessary steps for a Receiver's reintroduction into the COPY operation after a Fault. Failed Copy A failed copy is an aborted COPY operation. This occurs in situations of untrapped exceptions or an incomplete COPY. Specifically, the COPY will be noted as failed in cases where the Manager's iterator is *not* ran until exhaustion. Realignment The process of providing compensating data to the Receivers so that the connection will be on a message boundary. Occurs when the COPY operation is aborted.
PypiClean
/pyeasysql-3.3.3-py3-none-any.whl/EasySQL/EasyInstances.py
from typing import Optional from .ABC import CHARSET from .Classes import EasyDatabase, EasyColumn from .Types import * __all__ = ['EasyLocalHost', 'EasyInt64Column', 'EasyInt32Column', 'EasyInt24Column', 'EasyInt16Column', 'EasyInt08Column', 'EasyCharColumn', 'EasyBitColumn', 'EasyBoolColumn', 'EasyFloatColumn', 'EasyDoubleColumn', 'EasyDecimalColumn', 'EasyStringColumn'] class EasyLocalHost(EasyDatabase): _host = "127.0.0.1" _port = 3306 _user = "root" _password = "" def __init__(self, database, charset: Optional[CHARSET]): self._database = database self._charset = charset if charset is not None else None super().__init__() class EasyInt64Column(EasyColumn): def __init__(self, name: str): super().__init__(name, INT64) class EasyInt32Column(EasyColumn): def __init__(self, name: str): super().__init__(name, INT32) class EasyInt24Column(EasyColumn): def __init__(self, name: str): super().__init__(name, INT24) class EasyInt16Column(EasyColumn): def __init__(self, name: str): super().__init__(name, INT16) class EasyInt08Column(EasyColumn): def __init__(self, name: str): super().__init__(name, INT8) class EasyBitColumn(EasyColumn): def __init__(self, name: str): super().__init__(name, BIT) class EasyBoolColumn(EasyColumn): def __init__(self, name: str): super().__init__(name, BOOL) class EasyFloatColumn(EasyColumn): def __init__(self, name: str): super().__init__(name, FLOAT) class EasyDoubleColumn(EasyColumn): def __init__(self, name: str): super().__init__(name, DOUBLE) class EasyDecimalColumn(EasyColumn): def __init__(self, name: str): super().__init__(name, DECIMAL) class EasyStringColumn(EasyColumn): def __init__(self, name: str): super().__init__(name, BIGINT) class EasyCharColumn(EasyColumn): def __init__(self, name: str): super().__init__(name, BIGINT)
PypiClean
/qlogging-1.3.1.tar.gz/qlogging-1.3.1/README.md
# Python Quick Logging | QLogging Beautifully colored, quick and simple Python logging. This logger is based on [Python logging package](https://docs.python.org/3/library/logging.html) [![version](https://img.shields.io/badge/Version-1.2.8-lightgrey)](https://github.com/sinkingtitanic/qlogging) [![build](https://img.shields.io/badge/Pypi%20Build-Stable-blue)](https://pypi.org/project/qlogging/) [![python-version](https://img.shields.io/badge/Python-3^-success)](https://www.python.org/downloads/) [![coverage](https://img.shields.io/badge/coverage-%25100-success)](https://pypi.org/project/qlogging/) ## Screenshots: ### Terminal/CMD ![Preview](https://raw.githubusercontent.com/sinkingtitanic/qlogging/main/screenshots/terminal.png) ### Notebooks: ![Preview](https://raw.githubusercontent.com/sinkingtitanic/qlogging/main/screenshots/notebook.png) ### Windows: ![Preview](https://raw.githubusercontent.com/sinkingtitanic/qlogging/main/screenshots/windows.png) ## Features * Color logging in Terminal and CMD * Color logging in Jupyter Notebook and Jupyter Lab * Color logging in Kaggle Notebook * Color logging in Google Colab Notebook * Know which function the logger was called from * Know while line number the logger was called from * Support logging to a file * Simple and clean one-liner * Customizable ## Installation ``` $ pip install qlogging ``` ## Examples ### Logging only to console/notebook: ``` import qlogging logger = qlogging.get_logger(level='debug') logger.debug("This is debug") logger.info("This is info") logger.warning("This is warning") logger.error("This is an error") logger.critical("This is a critical") ``` Output (output format: `<time> <function_name>,<line#>| <log_message>`): ``` 12:21:37 foo(),3| This is debug 12:21:37 foo(),4| This is info 12:21:37 foo(),5| This is warning 12:21:37 foo(),6| This is an error 12:21:37 foo(),7| This is a critical ``` ____ ### Logging to console/terminal and a log file (append if log file exists): ``` import qlogging logger = qlogging.get_logger(level='debug', logfile='my_log.log') ``` ____ ### Logging to console/terminal and a log file (overwrite if log file exists): ``` import qlogging logger = qlogging.get_logger(level='debug', logfile='my_log.log', logfilemode='w') ``` ____ ### Logging with `loggingmode='long'` (default is `loggingmode='short'`): ``` import qlogging logger = qlogging.get_logger(level='debug', loggingmode='long') logger.debug("This is debug") ``` Output (output format: `<date> <time> | <file_name> | <function_name>,<line#>| <log_message>`): ``` 2021-05-18 12:38:22 | <main.py> | <foo()>,4 | This is debug ``` ## Easy Customization Customize your logger based on the following `get_logger()` function parameters ``` def get_logger( level="info", logfile=None, logfilemode="a", loggingmode="short", format_str=None, file_format_str=None, format_date=None, colors=None, logger_config=None, ): """ returns Python logging based logger formatted with colors :param level: (DEFAULT='info') str of logging level, each str option is mapped to Python logging levels, str options: 'info': logging.INFO, 'debug': logging.DEBUG, 'warning': logging.WARNING, 'error': logging.ERROR, 'critical': logging.CRITICAL, 'notset': logging.NOTSET :param logfile: (DEFAULT=None) str path where to save log file, example: '/tmp/my_log.log' :param logfilemode: (DEFAULT='a') str of log file writing mode, same as the ones documented at Python logging package. options: 'a': appends to logfile 'w': overwrites logfile :param loggingmode: (DEFAULT='short') str logging mode to be selected. options: 'short': will use short str format ('%(asctime)s %(funcName)s,%(lineno)s| %(message)s') and short date format ('%H:%M:%S') 'medium': will use long str format ('%(asctime)s | %(filename)s | %(funcName)s,%(lineno)s | %(message)s') and long date format ('%Y-%m-%d %H:%M:%S') 'manual': you need to set :param format_str: and :param format_date: yourself :param format_str: (DEFAULT=None) str of format logging string for console, only set this if you selected :param loggingmode: as 'manual'. example (the style is always '%', see python logging module for more info): '%(asctime)s | %(filename)s | %(funcName)s,%(lineno)s | %(message)s' :param file_format_str: (DEFAULT=None) str of format logging string for logfile (if you keep it None, we will use what you passed in :param format_str:), only set this if you selected :param loggingmode: as 'manual'. example (the style is always '%', see python logging module for more info): '%(asctime)s | %(filename)s | %(funcName)s,%(lineno)s | %(message)s' :param date_str: (DEFAULT=None) str of date logging string, only set this if you selected :param loggingmode: as 'manual'. example: '%Y-%m-%d %H:%M:%S' :param colors: (DEFAULT=None) dict of color settings, only set this if you selected :param loggingmode: as 'manual'. example: { 'DEBUG': Fore.CYAN + Style.BRIGHT, 'INFO': Fore.GREEN + Style.BRIGHT, 'WARNING': Fore.YELLOW + Style.BRIGHT, 'ERROR': Fore.RED + Style.BRIGHT, 'CRITICAL': Fore.RED + Back.WHITE + Style.BRIGHT, } :param logger_config: (DEFAULT=None) dict python logger config if you want to fully overwrite configs. example: { "version": 1, "disable_existing_loggers": False, "formatters": { "qlog": { "()": "qlogging.qlogging.ColoredFormatter", "colors": { 'DEBUG': Fore.CYAN + Style.BRIGHT, 'INFO': Fore.GREEN + Style.BRIGHT, 'WARNING': Fore.YELLOW + Style.BRIGHT, 'ERROR': Fore.RED + Style.BRIGHT, 'CRITICAL': Fore.RED + Back.WHITE + Style.BRIGHT, }, "format": "%(asctime)s [%(levelname)s] %(name)s: %(message)s", "datefmt":'%H:%M:%S' }, }, "handlers": { "console": { "level": "DEBUG", "formatter": "qlog", "class": "logging.StreamHandler", "stream": "ext://sys.stdout", }, }, "loggers": { "": { "handlers": ["console"], "level": "DEBUG", "propagate": True, }, }, } :return: formated Python logging instance """ ``` ## Alternatives * [coloredlogs 15.0](https://pypi.org/project/coloredlogs/): does not support coloring in notebooks. * [colorlog 5.0.1](https://pypi.org/project/colorlog/): does not support coloring in notebooks. ## Credit: * [colorama](https://github.com/tartley/colorama) * [joshbode](https://gist.github.com/joshbode/58fac7ababc700f51e2a9ecdebe563ad) ## License MIT License Copyright (c) 2021 Github Account SinkingTitanic Owner Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
PypiClean
/ais_dom-2023.7.2-py3-none-any.whl/homeassistant/components/octoprint/sensor.py
from __future__ import annotations from datetime import datetime, timedelta import logging from pyoctoprintapi import OctoprintJobInfo, OctoprintPrinterInfo from homeassistant.components.sensor import ( SensorDeviceClass, SensorEntity, SensorStateClass, ) from homeassistant.config_entries import ConfigEntry from homeassistant.const import PERCENTAGE, UnitOfTemperature from homeassistant.core import HomeAssistant, callback from homeassistant.helpers.entity_platform import AddEntitiesCallback from homeassistant.helpers.update_coordinator import CoordinatorEntity from . import OctoprintDataUpdateCoordinator from .const import DOMAIN _LOGGER = logging.getLogger(__name__) JOB_PRINTING_STATES = ["Printing from SD", "Printing"] def _is_printer_printing(printer: OctoprintPrinterInfo) -> bool: return ( printer and printer.state and printer.state.flags and printer.state.flags.printing ) async def async_setup_entry( hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities: AddEntitiesCallback, ) -> None: """Set up the available OctoPrint binary sensors.""" coordinator: OctoprintDataUpdateCoordinator = hass.data[DOMAIN][ config_entry.entry_id ]["coordinator"] device_id = config_entry.unique_id assert device_id is not None known_tools = set() @callback def async_add_tool_sensors() -> None: if not coordinator.data["printer"]: return new_tools = [] for tool in [ tool for tool in coordinator.data["printer"].temperatures if tool.name not in known_tools ]: assert device_id is not None known_tools.add(tool.name) for temp_type in ("actual", "target"): new_tools.append( OctoPrintTemperatureSensor( coordinator, tool.name, temp_type, device_id, ) ) async_add_entities(new_tools) config_entry.async_on_unload(coordinator.async_add_listener(async_add_tool_sensors)) if coordinator.data["printer"]: async_add_tool_sensors() entities: list[SensorEntity] = [ OctoPrintStatusSensor(coordinator, device_id), OctoPrintJobPercentageSensor(coordinator, device_id), OctoPrintEstimatedFinishTimeSensor(coordinator, device_id), OctoPrintStartTimeSensor(coordinator, device_id), ] async_add_entities(entities) class OctoPrintSensorBase( CoordinatorEntity[OctoprintDataUpdateCoordinator], SensorEntity ): """Representation of an OctoPrint sensor.""" def __init__( self, coordinator: OctoprintDataUpdateCoordinator, sensor_type: str, device_id: str, ) -> None: """Initialize a new OctoPrint sensor.""" super().__init__(coordinator) self._device_id = device_id self._attr_name = f"OctoPrint {sensor_type}" self._attr_unique_id = f"{sensor_type}-{device_id}" @property def device_info(self): """Device info.""" return self.coordinator.device_info class OctoPrintStatusSensor(OctoPrintSensorBase): """Representation of an OctoPrint sensor.""" _attr_icon = "mdi:printer-3d" def __init__( self, coordinator: OctoprintDataUpdateCoordinator, device_id: str ) -> None: """Initialize a new OctoPrint sensor.""" super().__init__(coordinator, "Current State", device_id) @property def native_value(self): """Return sensor state.""" printer: OctoprintPrinterInfo = self.coordinator.data["printer"] if not printer: return None return printer.state.text @property def available(self) -> bool: """Return if entity is available.""" return self.coordinator.last_update_success and self.coordinator.data["printer"] class OctoPrintJobPercentageSensor(OctoPrintSensorBase): """Representation of an OctoPrint sensor.""" _attr_native_unit_of_measurement = PERCENTAGE _attr_icon = "mdi:file-percent" def __init__( self, coordinator: OctoprintDataUpdateCoordinator, device_id: str ) -> None: """Initialize a new OctoPrint sensor.""" super().__init__(coordinator, "Job Percentage", device_id) @property def native_value(self): """Return sensor state.""" job: OctoprintJobInfo = self.coordinator.data["job"] if not job: return None if not (state := job.progress.completion): return 0 return round(state, 2) class OctoPrintEstimatedFinishTimeSensor(OctoPrintSensorBase): """Representation of an OctoPrint sensor.""" _attr_device_class = SensorDeviceClass.TIMESTAMP def __init__( self, coordinator: OctoprintDataUpdateCoordinator, device_id: str ) -> None: """Initialize a new OctoPrint sensor.""" super().__init__(coordinator, "Estimated Finish Time", device_id) @property def native_value(self) -> datetime | None: """Return sensor state.""" job: OctoprintJobInfo = self.coordinator.data["job"] if ( not job or not job.progress.print_time_left or not _is_printer_printing(self.coordinator.data["printer"]) ): return None read_time = self.coordinator.data["last_read_time"] return (read_time + timedelta(seconds=job.progress.print_time_left)).replace( second=0 ) class OctoPrintStartTimeSensor(OctoPrintSensorBase): """Representation of an OctoPrint sensor.""" _attr_device_class = SensorDeviceClass.TIMESTAMP def __init__( self, coordinator: OctoprintDataUpdateCoordinator, device_id: str ) -> None: """Initialize a new OctoPrint sensor.""" super().__init__(coordinator, "Start Time", device_id) @property def native_value(self) -> datetime | None: """Return sensor state.""" job: OctoprintJobInfo = self.coordinator.data["job"] if ( not job or not job.progress.print_time or not _is_printer_printing(self.coordinator.data["printer"]) ): return None read_time = self.coordinator.data["last_read_time"] return (read_time - timedelta(seconds=job.progress.print_time)).replace( second=0 ) class OctoPrintTemperatureSensor(OctoPrintSensorBase): """Representation of an OctoPrint sensor.""" _attr_native_unit_of_measurement = UnitOfTemperature.CELSIUS _attr_device_class = SensorDeviceClass.TEMPERATURE _attr_state_class = SensorStateClass.MEASUREMENT def __init__( self, coordinator: OctoprintDataUpdateCoordinator, tool: str, temp_type: str, device_id: str, ) -> None: """Initialize a new OctoPrint sensor.""" super().__init__(coordinator, f"{temp_type} {tool} temp", device_id) self._temp_type = temp_type self._api_tool = tool @property def native_value(self): """Return sensor state.""" printer: OctoprintPrinterInfo = self.coordinator.data["printer"] if not printer: return None for temp in printer.temperatures: if temp.name == self._api_tool: val = ( temp.actual_temp if self._temp_type == "actual" else temp.target_temp ) if val is None: return None return round(val, 2) return None @property def available(self) -> bool: """Return if entity is available.""" return self.coordinator.last_update_success and self.coordinator.data["printer"]
PypiClean
/sonatype_nexus_sdk-0.1.3-py3-none-any.whl/nexus_sdk/paths/v1_repositories_nuget_hosted/post.py
import decimal # noqa: F401 import functools # noqa: F401 import io # noqa: F401 import re # noqa: F401 import typing # noqa: F401 import uuid # noqa: F401 from dataclasses import dataclass from datetime import date, datetime # noqa: F401 import frozendict # noqa: F401 import typing_extensions # noqa: F401 import urllib3 from urllib3._collections import HTTPHeaderDict from nexus_sdk import schemas # noqa: F401 from nexus_sdk import api_client, exceptions from nexus_sdk.model.nuget_hosted_repository_api_request import ( NugetHostedRepositoryApiRequest, ) from . import path # body param SchemaForRequestBodyApplicationJson = NugetHostedRepositoryApiRequest request_body_body = api_client.RequestBody( content={ 'application/json': api_client.MediaType( schema=SchemaForRequestBodyApplicationJson), }, ) @dataclass class ApiResponseFor201(api_client.ApiResponse): response: urllib3.HTTPResponse body: typing.Union[ ] headers: schemas.Unset = schemas.unset _response_for_201 = api_client.OpenApiResponse( response_cls=ApiResponseFor201, ) @dataclass class ApiResponseFor401(api_client.ApiResponse): response: urllib3.HTTPResponse body: typing.Union[ ] headers: schemas.Unset = schemas.unset _response_for_401 = api_client.OpenApiResponse( response_cls=ApiResponseFor401, ) @dataclass class ApiResponseFor403(api_client.ApiResponse): response: urllib3.HTTPResponse body: typing.Union[ ] headers: schemas.Unset = schemas.unset _response_for_403 = api_client.OpenApiResponse( response_cls=ApiResponseFor403, ) _status_code_to_response = { '201': _response_for_201, '401': _response_for_401, '403': _response_for_403, } class BaseApi(api_client.Api): @typing.overload def _create_repository12_oapg( self, content_type: typing_extensions.Literal["application/json"] = ..., body: typing.Union[SchemaForRequestBodyApplicationJson, schemas.Unset] = schemas.unset, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., ) -> typing.Union[ ApiResponseFor201, ]: ... @typing.overload def _create_repository12_oapg( self, content_type: str = ..., body: typing.Union[SchemaForRequestBodyApplicationJson, schemas.Unset] = schemas.unset, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., ) -> typing.Union[ ApiResponseFor201, ]: ... @typing.overload def _create_repository12_oapg( self, skip_deserialization: typing_extensions.Literal[True], content_type: str = ..., body: typing.Union[SchemaForRequestBodyApplicationJson, schemas.Unset] = schemas.unset, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _create_repository12_oapg( self, content_type: str = ..., body: typing.Union[SchemaForRequestBodyApplicationJson, schemas.Unset] = schemas.unset, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., ) -> typing.Union[ ApiResponseFor201, api_client.ApiResponseWithoutDeserialization, ]: ... def _create_repository12_oapg( self, content_type: str = 'application/json', body: typing.Union[SchemaForRequestBodyApplicationJson, schemas.Unset] = schemas.unset, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = False, ): """ Create NuGet hosted repository :param skip_deserialization: If true then api_response.response will be set but api_response.body and api_response.headers will not be deserialized into schema class instances """ used_path = path.value _headers = HTTPHeaderDict() # TODO add cookie handling _fields = None _body = None if body is not schemas.unset: serialized_data = request_body_body.serialize(body, content_type) _headers.add('Content-Type', content_type) if 'fields' in serialized_data: _fields = serialized_data['fields'] elif 'body' in serialized_data: _body = serialized_data['body'] response = self.api_client.call_api( resource_path=used_path, method='post'.upper(), headers=_headers, fields=_fields, body=_body, stream=stream, timeout=timeout, ) if skip_deserialization: api_response = api_client.ApiResponseWithoutDeserialization(response=response) else: response_for_status = _status_code_to_response.get(str(response.status)) if response_for_status: api_response = response_for_status.deserialize(response, self.api_client.configuration) else: api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: raise exceptions.ApiException( status=response.status, reason=response.reason, api_response=api_response ) return api_response class CreateRepository12(BaseApi): # this class is used by api classes that refer to endpoints with operationId fn names @typing.overload def create_repository12( self, content_type: typing_extensions.Literal["application/json"] = ..., body: typing.Union[SchemaForRequestBodyApplicationJson, schemas.Unset] = schemas.unset, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., ) -> typing.Union[ ApiResponseFor201, ]: ... @typing.overload def create_repository12( self, content_type: str = ..., body: typing.Union[SchemaForRequestBodyApplicationJson, schemas.Unset] = schemas.unset, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., ) -> typing.Union[ ApiResponseFor201, ]: ... @typing.overload def create_repository12( self, skip_deserialization: typing_extensions.Literal[True], content_type: str = ..., body: typing.Union[SchemaForRequestBodyApplicationJson, schemas.Unset] = schemas.unset, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def create_repository12( self, content_type: str = ..., body: typing.Union[SchemaForRequestBodyApplicationJson, schemas.Unset] = schemas.unset, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., ) -> typing.Union[ ApiResponseFor201, api_client.ApiResponseWithoutDeserialization, ]: ... def create_repository12( self, content_type: str = 'application/json', body: typing.Union[SchemaForRequestBodyApplicationJson, schemas.Unset] = schemas.unset, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = False, ): return self._create_repository12_oapg( body=body, content_type=content_type, stream=stream, timeout=timeout, skip_deserialization=skip_deserialization ) class ApiForpost(BaseApi): # this class is used by api classes that refer to endpoints by path and http method names @typing.overload def post( self, content_type: typing_extensions.Literal["application/json"] = ..., body: typing.Union[SchemaForRequestBodyApplicationJson, schemas.Unset] = schemas.unset, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., ) -> typing.Union[ ApiResponseFor201, ]: ... @typing.overload def post( self, content_type: str = ..., body: typing.Union[SchemaForRequestBodyApplicationJson, schemas.Unset] = schemas.unset, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., ) -> typing.Union[ ApiResponseFor201, ]: ... @typing.overload def post( self, skip_deserialization: typing_extensions.Literal[True], content_type: str = ..., body: typing.Union[SchemaForRequestBodyApplicationJson, schemas.Unset] = schemas.unset, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def post( self, content_type: str = ..., body: typing.Union[SchemaForRequestBodyApplicationJson, schemas.Unset] = schemas.unset, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., ) -> typing.Union[ ApiResponseFor201, api_client.ApiResponseWithoutDeserialization, ]: ... def post( self, content_type: str = 'application/json', body: typing.Union[SchemaForRequestBodyApplicationJson, schemas.Unset] = schemas.unset, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = False, ): return self._create_repository12_oapg( body=body, content_type=content_type, stream=stream, timeout=timeout, skip_deserialization=skip_deserialization )
PypiClean
/Brian2-2.5.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl/brian2/memory/dynamicarray.py
import numpy as np __all__ = ["DynamicArray", "DynamicArray1D"] def getslices(shape, from_start=True): if from_start: return tuple(slice(0, x) for x in shape) else: return tuple(slice(x, None) for x in shape) class DynamicArray: """ An N-dimensional dynamic array class The array can be resized in any dimension, and the class will handle allocating a new block of data and copying when necessary. .. warning:: The data will NOT be contiguous for >1D arrays. To ensure this, you will either need to use 1D arrays, or to copy the data, or use the shrink method with the current size (although note that in both cases you negate the memory and efficiency benefits of the dynamic array). Initialisation arguments: ``shape``, ``dtype`` The shape and dtype of the array to initialise, as in Numpy. For 1D arrays, shape can be a single int, for ND arrays it should be a tuple. ``factor`` The resizing factor (see notes below). Larger values tend to lead to more wasted memory, but more computationally efficient code. ``use_numpy_resize``, ``refcheck`` Normally, when you resize the array it creates a new array and copies the data. Sometimes, it is possible to resize an array without a copy, and if this option is set it will attempt to do this. However, this can cause memory problems if you are not careful so the option is off by default. You need to ensure that you do not create slices of the array so that no references to the memory exist other than the main array object. If you are sure you know what you're doing, you can switch this reference check off. Note that resizing in this way is only done if you resize in the first dimension. The array is initialised with zeros. The data is stored in the attribute ``data`` which is a Numpy array. Some numpy methods are implemented and can work directly on the array object, including ``len(arr)``, ``arr[...]`` and ``arr[...]=...``. In other cases, use the ``data`` attribute. Examples -------- >>> x = DynamicArray((2, 3), dtype=int) >>> x[:] = 1 >>> x.resize((3, 3)) >>> x[:] += 1 >>> x.resize((3, 4)) >>> x[:] += 1 >>> x.resize((4, 4)) >>> x[:] += 1 >>> x.data[:] = x.data**2 >>> x.data array([[16, 16, 16, 4], [16, 16, 16, 4], [ 9, 9, 9, 4], [ 1, 1, 1, 1]]) Notes ----- The dynamic array returns a ``data`` attribute which is a view on the larger ``_data`` attribute. When a resize operation is performed, and a specific dimension is enlarged beyond the size in the ``_data`` attribute, the size is increased to the larger of ``cursize*factor`` and ``newsize``. This ensures that the amortized cost of increasing the size of the array is O(1). """ def __init__( self, shape, dtype=float, factor=2, use_numpy_resize=False, refcheck=True ): if isinstance(shape, int): shape = (shape,) self._data = np.zeros(shape, dtype=dtype) self.data = self._data self.dtype = dtype self.shape = self._data.shape self.factor = factor self.use_numpy_resize = use_numpy_resize self.refcheck = refcheck def resize(self, newshape): """ Resizes the data to the new shape, which can be a different size to the current data, but should have the same rank, i.e. same number of dimensions. """ datashapearr = np.array(self._data.shape) newshapearr = np.array(newshape) resizedimensions = newshapearr > datashapearr if resizedimensions.any(): # resize of the data is needed minnewshapearr = datashapearr # .copy() dimstoinc = minnewshapearr[resizedimensions] incdims = np.array(dimstoinc * self.factor, dtype=int) newdims = np.maximum(incdims, dimstoinc + 1) minnewshapearr[resizedimensions] = newdims newshapearr = np.maximum(newshapearr, minnewshapearr) do_resize = False if self.use_numpy_resize and self._data.flags["C_CONTIGUOUS"]: if sum(resizedimensions) == resizedimensions[0]: do_resize = True if do_resize: self.data = None self._data.resize(tuple(newshapearr), refcheck=self.refcheck) else: newdata = np.zeros(tuple(newshapearr), dtype=self.dtype) slices = getslices(self._data.shape) newdata[slices] = self._data self._data = newdata elif (newshapearr < self.shape).any(): # If we reduced the size, set the no longer used memory to 0 self._data[getslices(newshape, from_start=False)] = 0 # Reduce our view to the requested size if necessary self.data = self._data[getslices(newshape, from_start=True)] self.shape = self.data.shape def resize_along_first(self, newshape): new_dimension = newshape[0] if new_dimension > self._data.shape[0]: new_size = np.maximum(self._data.shape[0] * self.factor, new_dimension + 1) final_new_shape = np.array(self._data.shape) final_new_shape[0] = new_size if self.use_numpy_resize and self._data.flags["C_CONTIGUOUS"]: self.data = None self._data.resize(tuple(final_new_shape), refcheck=self.refcheck) else: newdata = np.zeros(tuple(final_new_shape), dtype=self.dtype) slices = getslices(self._data.shape) newdata[slices] = self._data self._data = newdata elif newshape < self.shape: # If we reduced the size, set the no longer used memory to 0 self._data[new_dimension:] = 0 # Reduce our view to the requested size if necessary self.data = self._data[:new_dimension] self.shape = newshape def shrink(self, newshape): """ Reduces the data to the given shape, which should be smaller than the current shape. `resize` can also be used with smaller values, but it will not shrink the allocated memory, whereas `shrink` will reallocate the memory. This method should only be used infrequently, as if it is used frequently it will negate the computational efficiency benefits of the DynamicArray. """ if isinstance(newshape, int): newshape = (newshape,) shapearr = np.array(self.shape) newshapearr = np.array(newshape) if (newshapearr <= shapearr).all(): newdata = np.zeros(newshapearr, dtype=self.dtype) newdata[:] = self._data[getslices(newshapearr)] self._data = newdata self.shape = tuple(newshapearr) self.data = self._data def __getitem__(self, item): return self.data.__getitem__(item) def __setitem__(self, item, val): self.data.__setitem__(item, val) def __len__(self): return len(self.data) def __str__(self): return self.data.__str__() def __repr__(self): return self.data.__repr__() class DynamicArray1D(DynamicArray): """ Version of `DynamicArray` with specialised ``resize`` method designed to be more efficient. """ def resize(self, newshape): (datashape,) = self._data.shape if newshape > datashape: (shape,) = self.shape # we work with int shapes only newdatashape = max(newshape, int(shape * self.factor) + 1) if self.use_numpy_resize and self._data.flags["C_CONTIGUOUS"]: self.data = None self._data.resize(newdatashape, refcheck=self.refcheck) else: newdata = np.zeros(newdatashape, dtype=self.dtype) newdata[:shape] = self.data self._data = newdata elif newshape < self.shape[0]: # If we reduced the size, set the no longer used memory to 0 self._data[newshape:] = 0 # Reduce our view to the requested size if necessary self.data = self._data[:newshape] self.shape = (newshape,)
PypiClean
/Dabo-0.9.16.tar.gz/Dabo-0.9.16/dabo/lib/specParser.py
import xml.sax from StringIO import StringIO import os.path class specHandler(xml.sax.ContentHandler): _IsContainer = False def __init__(self): self.appDict = {} self.relaDict = {} self.currTableDict = {} self.currTable = "" # self.currFieldDict = {} def startElement(self, name, attrs): if name == "table": # New table starting self.currTable = attrs.getValue("name") self.currTableDict = {} #self.currFieldDict = {} self.currRelaDict = {} elif name == "field": fieldDict = {} for att in attrs.keys(): if att == "name": fldName = attrs.getValue("name") else: fieldDict[att] = attrs.getValue(att) self.currTableDict[fldName] = fieldDict.copy() elif name == "join": joinDict = {} for att in attrs.keys(): if att == "order": name = "_join%s" % attrs["order"] joinDict[att] = attrs[att] self.currTableDict[name] = joinDict.copy() elif name == "relation": relType = attrs.getValue("relationType") if relType == "1M": nm = attrs.getValue("name") self.relaDict[nm] = {} self.relaDict[nm]["relationType"] = "1M" self.relaDict[nm]["source"] = nm.split(":")[0].strip() self.relaDict[nm]["target"] = attrs.getValue("target") self.relaDict[nm]["sourceField"] = attrs.getValue("sourceField") self.relaDict[nm]["targetField"] = attrs.getValue("targetField") def endElement(self, name): if name == "table": # Save it to the app dict self.appDict[self.currTable] = self.currTableDict.copy() def getFieldDict(self): return self.appDict def getRelationDict(self): return self.relaDict def importFieldSpecs(file=None, tbl=None): if file is None: return None file = fileRef(file) sh = specHandler() xml.sax.parse(file, sh) ret = sh.getFieldDict() # Limit it to a specific table if requested if tbl is not None: ret = ret[tbl] return ret def importRelationSpecs(file=None): if file is None: return None file = fileRef(file) sh = specHandler() xml.sax.parse(file, sh) ret = sh.getRelationDict() return ret def fileRef(ref=""): """ Handles the passing of file names, file objects, or raw XML to the parser. Returns a file-like object, or None. """ ret = None if isinstance(ref, basestring): if os.path.exists(ref): ret = file(ref) else: ret = StringIO(ref) return ret
PypiClean
/verse16-0.1.2.tar.gz/verse16-0.1.2/docs/installation.rst
.. highlight:: shell ============ Installation ============ Stable release -------------- To install verse16, generate this command in your terminal: .. code-block:: console $ pip install verse16 This is the preferred method to install verse16, as it will always install the most recent stable release. If you don't have `pip`_ installed, this `Python installation guide`_ can guide you through the process. .. _pip: https://pip.pypa.io .. _Python installation guide: http://docs.python-guide.org/en/latest/starting/installation/ From sources ------------ The sources for verse16 can be downloaded from the `Github repo`_. You can either clone the public repository: .. code-block:: console $ git clone git://github.com/pelgo14/verse16 Or download the `tarball`_: .. code-block:: console $ curl -OJL https://github.com/pelgo14/verse16/tarball/master Once you have a copy of the source, you can install it with: .. code-block:: console $ python setup.py install .. _Github repo: https://github.com/pelgo14/verse16 .. _tarball: https://github.com/pelgo14/verse16/tarball/master
PypiClean
/Charm-Crypto-0.43.tar.gz/Charm-Crypto-0.43/charm/toolbox/matrixops.py
def GaussEliminationinGroups(m): #The code was original found at: http://ine.scripts.mit.edu/blog/2011/05/gaussian-elimination-in-python/ #Here is an example: suppose you have A= [[1,2], # [3,4]] #and you want AX = I. #if X = [[x1,x2],[x3,x4]] and I = [[1,0],[0,1]] #GaussEliminationinGroups([1,2,1],[3,4,0])-->[x1,x3] #GaussEliminationinGroups([1,2,0],[3,4,1])-->[x2,x4] #then X = MatrixTransGroups[[x1,x3],[x2,x4]] #eliminate columns for col in range(len(m[0])): for row in range(col+1, len(m)): r = [(rowValue * (-(m[row][col] / m[col][col]))) for rowValue in m[col]] m[row] = [ (pair[0]+pair[1]) for pair in zip(m[row], r)] #now backsolve by substitution ans = [] m.reverse() #makes it easier to backsolve for sol in range(len(m)): if sol == 0: ans.append(m[sol][-1] / m[sol][-2]) else: inner = 0 #substitute in all known coefficients for x in range(sol): inner += (ans[x]*m[sol][-2-x]) #the equation is now reduced to ax + b = c form #solve with (c - b) / a ans.append((m[sol][-1]-inner)/m[sol][-sol-2]) ans.reverse() return ans def MatrixMulGroups(matrix1,matrix2): # Matrix multiplication if len(matrix1[0]) != len(matrix2): # Check matrix dimensions print('Matrices must be m*n and n*p to multiply!') else: # Multiply if correct dimensions new_matrix = [[0 for row in range(len(matrix2[0]))] for col in range(len(matrix1))] for i in range(len(matrix1)): for j in range(len(matrix2[0])): for k in range(len(matrix2)): new_matrix[i][j] += matrix1[i][k]*matrix2[k][j] return new_matrix def MatrixAddGroups(matrix1,matrix2): # Matrix Addition if (len(matrix1[0]) != len(matrix2[0]) or len(matrix1) != len(matrix2)): # Check matrix dimensions print('Matrices must be m*m and m*m to Add!') else: # Add if correct dimensions rows = len(matrix1) columns =len(matrix1[0]) result = [[matrix1[row][col] + matrix2[row][col] for col in range(columns)] for row in range(rows)] return result def MatrixScalarMulGroups(lamda , matrix): # Matrix Scalar Mul rows = len(matrix) columns =len(matrix[0]) result = [[matrix[row][col] * lamda for col in range(columns)] for row in range(rows)] return result def MatrixTransGroups(matrix): # Matrix transpose, result = [[r[col] for r in matrix] for col in range(len(matrix[0]))] return result
PypiClean
/fastly-3.0.0.tar.gz/fastly-3.0.0/docs/Results.md
# Results The [results](#results-data-model) of the query, grouped by service (and optionally, region), and aggregated over the appropriate time span. ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- **requests** | **int** | Number of requests processed. | [optional] **hits** | **int** | Number of cache hits. | [optional] **hits_time** | **float** | Total amount of time spent processing cache hits (in seconds). | [optional] **miss** | **int** | Number of cache misses. | [optional] **miss_time** | **float** | Total amount of time spent processing cache misses (in seconds). | [optional] **_pass** | **int** | Number of requests that passed through the CDN without being cached. | [optional] **pass_time** | **float** | Total amount of time spent processing cache passes (in seconds). | [optional] **errors** | **int** | Number of cache errors. | [optional] **restarts** | **int** | Number of restarts performed. | [optional] **hit_ratio** | **float, none_type** | Ratio of cache hits to cache misses (between 0 and 1). | [optional] **bandwidth** | **int** | Total bytes delivered (`resp_header_bytes` + `resp_body_bytes` + `bereq_header_bytes` + `bereq_body_bytes` + `compute_resp_header_bytes` + `compute_resp_body_bytes` + `compute_bereq_header_bytes` + `compute_bereq_body_bytes` + `websocket_resp_header_bytes` + `websocket_resp_body_bytes` + `websocket_bereq_header_bytes` + `websocket_bereq_body_bytes` + `fanout_resp_header_bytes` + `fanout_resp_body_bytes` + `fanout_bereq_header_bytes` + `fanout_bereq_body_bytes`). | [optional] **body_size** | **int** | Total body bytes delivered (alias for resp_body_bytes). | [optional] **header_size** | **int** | Total header bytes delivered (alias for resp_header_bytes). | [optional] **req_body_bytes** | **int** | Total body bytes received. | [optional] **req_header_bytes** | **int** | Total header bytes received. | [optional] **resp_body_bytes** | **int** | Total body bytes delivered (edge_resp_body_bytes + shield_resp_body_bytes). | [optional] **resp_header_bytes** | **int** | Total header bytes delivered (edge_resp_header_bytes + shield_resp_header_bytes). | [optional] **bereq_body_bytes** | **int** | Total body bytes sent to origin. | [optional] **bereq_header_bytes** | **int** | Total header bytes sent to origin. | [optional] **uncacheable** | **int** | Number of requests that were designated uncachable. | [optional] **pipe** | **int** | Optional. Pipe operations performed (legacy feature). | [optional] **synth** | **int** | Number of requests that returned a synthetic response (i.e., response objects created with the `synthetic` VCL statement). | [optional] **tls** | **int** | Number of requests that were received over TLS. | [optional] **tls_v10** | **int** | Number of requests received over TLS 1.0. | [optional] **tls_v11** | **int** | Number of requests received over TLS 1.1. | [optional] **tls_v12** | **int** | Number of requests received over TLS 1.2. | [optional] **tls_v13** | **int** | Number of requests received over TLS 1.3. | [optional] **edge_requests** | **int** | Number of requests sent by end users to Fastly. | [optional] **edge_resp_header_bytes** | **int** | Total header bytes delivered from Fastly to the end user. | [optional] **edge_resp_body_bytes** | **int** | Total body bytes delivered from Fastly to the end user. | [optional] **edge_hit_requests** | **int** | Number of requests sent by end users to Fastly that resulted in a hit at the edge. | [optional] **edge_miss_requests** | **int** | Number of requests sent by end users to Fastly that resulted in a miss at the edge. | [optional] **origin_fetches** | **int** | Number of requests sent to origin. | [optional] **origin_fetch_header_bytes** | **int** | Total request header bytes sent to origin. | [optional] **origin_fetch_body_bytes** | **int** | Total request body bytes sent to origin. | [optional] **origin_fetch_resp_header_bytes** | **int** | Total header bytes received from origin. | [optional] **origin_fetch_resp_body_bytes** | **int** | Total body bytes received from origin. | [optional] **origin_revalidations** | **int** | Number of responses received from origin with a `304` status code in response to an `If-Modified-Since` or `If-None-Match` request. Under regular scenarios, a revalidation will imply a cache hit. However, if using Fastly Image Optimizer or segmented caching this may result in a cache miss. | [optional] **origin_cache_fetches** | **int** | The total number of completed requests made to backends (origins) that returned cacheable content. | [optional] **shield** | **int** | Number of requests from edge to the shield POP. | [optional] **shield_resp_body_bytes** | **int** | Total body bytes delivered via a shield. | [optional] **shield_resp_header_bytes** | **int** | Total header bytes delivered via a shield. | [optional] **shield_fetches** | **int** | Number of requests made from one Fastly POP to another, as part of shielding. | [optional] **shield_fetch_header_bytes** | **int** | Total request header bytes sent to a shield. | [optional] **shield_fetch_body_bytes** | **int** | Total request body bytes sent to a shield. | [optional] **shield_fetch_resp_header_bytes** | **int** | Total response header bytes sent from a shield to the edge. | [optional] **shield_fetch_resp_body_bytes** | **int** | Total response body bytes sent from a shield to the edge. | [optional] **shield_revalidations** | **int** | Number of responses received from origin with a `304` status code, in response to an `If-Modified-Since` or `If-None-Match` request to a shield. Under regular scenarios, a revalidation will imply a cache hit. However, if using segmented caching this may result in a cache miss. | [optional] **shield_cache_fetches** | **int** | The total number of completed requests made to shields that returned cacheable content. | [optional] **ipv6** | **int** | Number of requests that were received over IPv6. | [optional] **otfp** | **int** | Number of responses that came from the Fastly On-the-Fly Packaging service for video-on-demand. | [optional] **otfp_resp_body_bytes** | **int** | Total body bytes delivered from the Fastly On-the-Fly Packaging service for video-on-demand. | [optional] **otfp_resp_header_bytes** | **int** | Total header bytes delivered from the Fastly On-the-Fly Packaging service for video-on-demand. | [optional] **otfp_shield_resp_body_bytes** | **int** | Total body bytes delivered via a shield for the Fastly On-the-Fly Packaging service for video-on-demand. | [optional] **otfp_shield_resp_header_bytes** | **int** | Total header bytes delivered via a shield for the Fastly On-the-Fly Packaging service for video-on-demand. | [optional] **otfp_manifests** | **int** | Number of responses that were manifest files from the Fastly On-the-Fly Packaging service for video-on-demand. | [optional] **otfp_deliver_time** | **float** | Total amount of time spent delivering a response from the Fastly On-the-Fly Packaging service for video-on-demand (in seconds). | [optional] **otfp_shield_time** | **float** | Total amount of time spent delivering a response via a shield from the Fastly On-the-Fly Packaging service for video-on-demand (in seconds). | [optional] **video** | **int** | Number of responses with the video segment or video manifest MIME type (i.e., application/x-mpegurl, application/vnd.apple.mpegurl, application/f4m, application/dash+xml, application/vnd.ms-sstr+xml, ideo/mp2t, audio/aac, video/f4f, video/x-flv, video/mp4, audio/mp4). | [optional] **pci** | **int** | Number of responses with the PCI flag turned on. | [optional] **log** | **int** | Number of log lines sent. | [optional] **log_bytes** | **int** | Total log bytes sent. | [optional] **http2** | **int** | Number of requests received over HTTP/2. | [optional] **http3** | **int** | Number of requests received over HTTP/3. | [optional] **waf_logged** | **int** | Number of requests that triggered a WAF rule and were logged. | [optional] **waf_blocked** | **int** | Number of requests that triggered a WAF rule and were blocked. | [optional] **waf_passed** | **int** | Number of requests that triggered a WAF rule and were passed. | [optional] **attack_req_body_bytes** | **int** | Total body bytes received from requests that triggered a WAF rule. | [optional] **attack_req_header_bytes** | **int** | Total header bytes received from requests that triggered a WAF rule. | [optional] **attack_logged_req_body_bytes** | **int** | Total body bytes received from requests that triggered a WAF rule that was logged. | [optional] **attack_logged_req_header_bytes** | **int** | Total header bytes received from requests that triggered a WAF rule that was logged. | [optional] **attack_blocked_req_body_bytes** | **int** | Total body bytes received from requests that triggered a WAF rule that was blocked. | [optional] **attack_blocked_req_header_bytes** | **int** | Total header bytes received from requests that triggered a WAF rule that was blocked. | [optional] **attack_passed_req_body_bytes** | **int** | Total body bytes received from requests that triggered a WAF rule that was passed. | [optional] **attack_passed_req_header_bytes** | **int** | Total header bytes received from requests that triggered a WAF rule that was passed. | [optional] **attack_resp_synth_bytes** | **int** | Total bytes delivered for requests that triggered a WAF rule and returned a synthetic response. | [optional] **imgopto** | **int** | Number of responses that came from the Fastly Image Optimizer service. If the service receives 10 requests for an image, this stat will be 10 regardless of how many times the image was transformed. | [optional] **imgopto_resp_body_bytes** | **int** | Total body bytes delivered from the Fastly Image Optimizer service, including shield traffic. | [optional] **imgopto_resp_header_bytes** | **int** | Total header bytes delivered from the Fastly Image Optimizer service, including shield traffic. | [optional] **imgopto_shield_resp_body_bytes** | **int** | Total body bytes delivered via a shield from the Fastly Image Optimizer service. | [optional] **imgopto_shield_resp_header_bytes** | **int** | Total header bytes delivered via a shield from the Fastly Image Optimizer service. | [optional] **imgvideo** | **int** | Number of video responses that came from the Fastly Image Optimizer service. | [optional] **imgvideo_frames** | **int** | Number of video frames that came from the Fastly Image Optimizer service. A video frame is an individual image within a sequence of video. | [optional] **imgvideo_resp_header_bytes** | **int** | Total header bytes of video delivered from the Fastly Image Optimizer service. | [optional] **imgvideo_resp_body_bytes** | **int** | Total body bytes of video delivered from the Fastly Image Optimizer service. | [optional] **imgvideo_shield_resp_header_bytes** | **int** | Total header bytes of video delivered via a shield from the Fastly Image Optimizer service. | [optional] **imgvideo_shield_resp_body_bytes** | **int** | Total body bytes of video delivered via a shield from the Fastly Image Optimizer service. | [optional] **imgvideo_shield** | **int** | Number of video responses delivered via a shield that came from the Fastly Image Optimizer service. | [optional] **imgvideo_shield_frames** | **int** | Number of video frames delivered via a shield that came from the Fastly Image Optimizer service. A video frame is an individual image within a sequence of video. | [optional] **status_200** | **int** | Number of responses sent with status code 200 (Success). | [optional] **status_204** | **int** | Number of responses sent with status code 204 (No Content). | [optional] **status_206** | **int** | Number of responses sent with status code 206 (Partial Content). | [optional] **status_301** | **int** | Number of responses sent with status code 301 (Moved Permanently). | [optional] **status_302** | **int** | Number of responses sent with status code 302 (Found). | [optional] **status_304** | **int** | Number of responses sent with status code 304 (Not Modified). | [optional] **status_400** | **int** | Number of responses sent with status code 400 (Bad Request). | [optional] **status_401** | **int** | Number of responses sent with status code 401 (Unauthorized). | [optional] **status_403** | **int** | Number of responses sent with status code 403 (Forbidden). | [optional] **status_404** | **int** | Number of responses sent with status code 404 (Not Found). | [optional] **status_406** | **int** | Number of responses sent with status code 406 (Not Acceptable). | [optional] **status_416** | **int** | Number of responses sent with status code 416 (Range Not Satisfiable). | [optional] **status_429** | **int** | Number of responses sent with status code 429 (Too Many Requests). | [optional] **status_500** | **int** | Number of responses sent with status code 500 (Internal Server Error). | [optional] **status_501** | **int** | Number of responses sent with status code 501 (Not Implemented). | [optional] **status_502** | **int** | Number of responses sent with status code 502 (Bad Gateway). | [optional] **status_503** | **int** | Number of responses sent with status code 503 (Service Unavailable). | [optional] **status_504** | **int** | Number of responses sent with status code 504 (Gateway Timeout). | [optional] **status_505** | **int** | Number of responses sent with status code 505 (HTTP Version Not Supported). | [optional] **status_1xx** | **int** | Number of \&quot;Informational\&quot; category status codes delivered. | [optional] **status_2xx** | **int** | Number of \&quot;Success\&quot; status codes delivered. | [optional] **status_3xx** | **int** | Number of \&quot;Redirection\&quot; codes delivered. | [optional] **status_4xx** | **int** | Number of \&quot;Client Error\&quot; codes delivered. | [optional] **status_5xx** | **int** | Number of \&quot;Server Error\&quot; codes delivered. | [optional] **object_size_1k** | **int** | Number of objects served that were under 1KB in size. | [optional] **object_size_10k** | **int** | Number of objects served that were between 1KB and 10KB in size. | [optional] **object_size_100k** | **int** | Number of objects served that were between 10KB and 100KB in size. | [optional] **object_size_1m** | **int** | Number of objects served that were between 100KB and 1MB in size. | [optional] **object_size_10m** | **int** | Number of objects served that were between 1MB and 10MB in size. | [optional] **object_size_100m** | **int** | Number of objects served that were between 10MB and 100MB in size. | [optional] **object_size_1g** | **int** | Number of objects served that were between 100MB and 1GB in size. | [optional] **recv_sub_time** | **float** | Time spent inside the `vcl_recv` Varnish subroutine (in seconds). | [optional] **recv_sub_count** | **int** | Number of executions of the `vcl_recv` Varnish subroutine. | [optional] **hash_sub_time** | **float** | Time spent inside the `vcl_hash` Varnish subroutine (in seconds). | [optional] **hash_sub_count** | **int** | Number of executions of the `vcl_hash` Varnish subroutine. | [optional] **miss_sub_time** | **float** | Time spent inside the `vcl_miss` Varnish subroutine (in seconds). | [optional] **miss_sub_count** | **int** | Number of executions of the `vcl_miss` Varnish subroutine. | [optional] **fetch_sub_time** | **float** | Time spent inside the `vcl_fetch` Varnish subroutine (in seconds). | [optional] **fetch_sub_count** | **int** | Number of executions of the `vcl_fetch` Varnish subroutine. | [optional] **pass_sub_time** | **float** | Time spent inside the `vcl_pass` Varnish subroutine (in seconds). | [optional] **pass_sub_count** | **int** | Number of executions of the `vcl_pass` Varnish subroutine. | [optional] **pipe_sub_time** | **float** | Time spent inside the `vcl_pipe` Varnish subroutine (in seconds). | [optional] **pipe_sub_count** | **int** | Number of executions of the `vcl_pipe` Varnish subroutine. | [optional] **deliver_sub_time** | **float** | Time spent inside the `vcl_deliver` Varnish subroutine (in seconds). | [optional] **deliver_sub_count** | **int** | Number of executions of the `vcl_deliver` Varnish subroutine. | [optional] **error_sub_time** | **float** | Time spent inside the `vcl_error` Varnish subroutine (in seconds). | [optional] **error_sub_count** | **int** | Number of executions of the `vcl_error` Varnish subroutine. | [optional] **hit_sub_time** | **float** | Time spent inside the `vcl_hit` Varnish subroutine (in seconds). | [optional] **hit_sub_count** | **int** | Number of executions of the `vcl_hit` Varnish subroutine. | [optional] **prehash_sub_time** | **float** | Time spent inside the `vcl_prehash` Varnish subroutine (in seconds). | [optional] **prehash_sub_count** | **int** | Number of executions of the `vcl_prehash` Varnish subroutine. | [optional] **predeliver_sub_time** | **float** | Time spent inside the `vcl_predeliver` Varnish subroutine (in seconds). | [optional] **predeliver_sub_count** | **int** | Number of executions of the `vcl_predeliver` Varnish subroutine. | [optional] **tls_handshake_sent_bytes** | **int** | Number of bytes transferred during TLS handshake. | [optional] **hit_resp_body_bytes** | **int** | Total body bytes delivered for cache hits. | [optional] **miss_resp_body_bytes** | **int** | Total body bytes delivered for cache misses. | [optional] **pass_resp_body_bytes** | **int** | Total body bytes delivered for cache passes. | [optional] **segblock_origin_fetches** | **int** | Number of `Range` requests to origin for segments of resources when using segmented caching. | [optional] **segblock_shield_fetches** | **int** | Number of `Range` requests to a shield for segments of resources when using segmented caching. | [optional] **compute_requests** | **int** | The total number of requests that were received for your service by Fastly. | [optional] **compute_request_time_ms** | **float** | The total, actual amount of time used to process your requests, including active CPU time (in milliseconds). | [optional] **compute_request_time_billed_ms** | **float** | The total amount of request processing time you will be billed for, measured in 50 millisecond increments. | [optional] **compute_ram_used** | **int** | The amount of RAM used for your service by Fastly (in bytes). | [optional] **compute_execution_time_ms** | **float** | The amount of active CPU time used to process your requests (in milliseconds). | [optional] **compute_req_header_bytes** | **int** | Total header bytes received by Compute@Edge. | [optional] **compute_req_body_bytes** | **int** | Total body bytes received by Compute@Edge. | [optional] **compute_resp_header_bytes** | **int** | Total header bytes sent from Compute@Edge to end user. | [optional] **compute_resp_body_bytes** | **int** | Total body bytes sent from Compute@Edge to end user. | [optional] **compute_resp_status_1xx** | **int** | Number of \&quot;Informational\&quot; category status codes delivered by Compute@Edge. | [optional] **compute_resp_status_2xx** | **int** | Number of \&quot;Success\&quot; category status codes delivered by Compute@Edge. | [optional] **compute_resp_status_3xx** | **int** | Number of \&quot;Redirection\&quot; category status codes delivered by Compute@Edge. | [optional] **compute_resp_status_4xx** | **int** | Number of \&quot;Client Error\&quot; category status codes delivered by Compute@Edge. | [optional] **compute_resp_status_5xx** | **int** | Number of \&quot;Server Error\&quot; category status codes delivered by Compute@Edge. | [optional] **compute_bereq_header_bytes** | **int** | Total header bytes sent to backends (origins) by Compute@Edge. | [optional] **compute_bereq_body_bytes** | **int** | Total body bytes sent to backends (origins) by Compute@Edge. | [optional] **compute_beresp_header_bytes** | **int** | Total header bytes received from backends (origins) by Compute@Edge. | [optional] **compute_beresp_body_bytes** | **int** | Total body bytes received from backends (origins) by Compute@Edge. | [optional] **compute_bereqs** | **int** | Number of backend requests started. | [optional] **compute_bereq_errors** | **int** | Number of backend request errors, including timeouts. | [optional] **compute_resource_limit_exceeded** | **int** | Number of times a guest exceeded its resource limit, includes heap, stack, globals, and code execution timeout. | [optional] **compute_heap_limit_exceeded** | **int** | Number of times a guest exceeded its heap limit. | [optional] **compute_stack_limit_exceeded** | **int** | Number of times a guest exceeded its stack limit. | [optional] **compute_globals_limit_exceeded** | **int** | Number of times a guest exceeded its globals limit. | [optional] **compute_guest_errors** | **int** | Number of times a service experienced a guest code error. | [optional] **compute_runtime_errors** | **int** | Number of times a service experienced a guest runtime error. | [optional] **edge_hit_resp_body_bytes** | **int** | Body bytes delivered for edge hits. | [optional] **edge_hit_resp_header_bytes** | **int** | Header bytes delivered for edge hits. | [optional] **edge_miss_resp_body_bytes** | **int** | Body bytes delivered for edge misses. | [optional] **edge_miss_resp_header_bytes** | **int** | Header bytes delivered for edge misses. | [optional] **origin_cache_fetch_resp_body_bytes** | **int** | Body bytes received from origin for cacheable content. | [optional] **origin_cache_fetch_resp_header_bytes** | **int** | Header bytes received from an origin for cacheable content. | [optional] **shield_hit_requests** | **int** | Number of requests that resulted in a hit at a shield. | [optional] **shield_miss_requests** | **int** | Number of requests that resulted in a miss at a shield. | [optional] **shield_hit_resp_header_bytes** | **int** | Header bytes delivered for shield hits. | [optional] **shield_hit_resp_body_bytes** | **int** | Body bytes delivered for shield hits. | [optional] **shield_miss_resp_header_bytes** | **int** | Header bytes delivered for shield misses. | [optional] **shield_miss_resp_body_bytes** | **int** | Body bytes delivered for shield misses. | [optional] **websocket_req_header_bytes** | **int** | Total header bytes received from end users over passthrough WebSocket connections. | [optional] **websocket_req_body_bytes** | **int** | Total message content bytes received from end users over passthrough WebSocket connections. | [optional] **websocket_resp_header_bytes** | **int** | Total header bytes sent to end users over passthrough WebSocket connections. | [optional] **websocket_resp_body_bytes** | **int** | Total message content bytes sent to end users over passthrough WebSocket connections. | [optional] **websocket_bereq_header_bytes** | **int** | Total header bytes sent to backends over passthrough WebSocket connections. | [optional] **websocket_bereq_body_bytes** | **int** | Total message content bytes sent to backends over passthrough WebSocket connections. | [optional] **websocket_beresp_header_bytes** | **int** | Total header bytes received from backends over passthrough WebSocket connections. | [optional] **websocket_beresp_body_bytes** | **int** | Total message content bytes received from backends over passthrough WebSocket connections. | [optional] **websocket_conn_time_ms** | **int** | Total duration of passthrough WebSocket connections with end users. | [optional] **fanout_recv_publishes** | **int** | Total published messages received from the publish API endpoint. | [optional] **fanout_send_publishes** | **int** | Total published messages sent to end users. | [optional] **kv_store_class_a_operations** | **int** | The total number of class a operations for the KV store. | [optional] **kv_store_class_b_operations** | **int** | The total number of class b operations for the KV store. | [optional] **object_store_class_a_operations** | **int** | Use kv_store_class_a_operations. | [optional] **object_store_class_b_operations** | **int** | Use kv_store_class_b_operations. | [optional] **fanout_req_header_bytes** | **int** | Total header bytes received from end users over Fanout connections. | [optional] **fanout_req_body_bytes** | **int** | Total body or message content bytes received from end users over Fanout connections. | [optional] **fanout_resp_header_bytes** | **int** | Total header bytes sent to end users over Fanout connections. | [optional] **fanout_resp_body_bytes** | **int** | Total body or message content bytes sent to end users over Fanout connections, excluding published message content. | [optional] **fanout_bereq_header_bytes** | **int** | Total header bytes sent to backends over Fanout connections. | [optional] **fanout_bereq_body_bytes** | **int** | Total body or message content bytes sent to backends over Fanout connections. | [optional] **fanout_beresp_header_bytes** | **int** | Total header bytes received from backends over Fanout connections. | [optional] **fanout_beresp_body_bytes** | **int** | Total body or message content bytes received from backends over Fanout connections. | [optional] **fanout_conn_time_ms** | **int** | Total duration of Fanout connections with end users. | [optional] **ddos_action_limit_streams_connections** | **int** | For HTTP/2, the number of connections the limit-streams action was applied to. The limit-streams action caps the allowed number of concurrent streams in a connection. | [optional] **ddos_action_limit_streams_requests** | **int** | For HTTP/2, the number of requests made on a connection for which the limit-streams action was taken. The limit-streams action caps the allowed number of concurrent streams in a connection. | [optional] **ddos_action_tarpit_accept** | **int** | The number of times the tarpit-accept action was taken. The tarpit-accept action adds a delay when accepting future connections. | [optional] **ddos_action_tarpit** | **int** | The number of times the tarpit action was taken. The tarpit action delays writing the response to the client. | [optional] **ddos_action_close** | **int** | The number of times the close action was taken. The close action aborts the connection as soon as possible. The close action takes effect either right after accept, right after the client hello, or right after the response was sent. | [optional] **ddos_action_blackhole** | **int** | The number of times the blackhole action was taken. The blackhole action quietly closes a TCP connection without sending a reset. The blackhole action quietly closes a TCP connection without notifying its peer (all TCP state is dropped). | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
PypiClean
/availsim4-2.0.2.tar.gz/availsim4-2.0.2/availsim4core/src/discrete_event_simulation/event/b_event/failure_event/detectable_failure_event.py
from __future__ import annotations from typing import Set, TYPE_CHECKING, Tuple, List if TYPE_CHECKING: from availsim4core.src.context.system.component_tree.basic import Basic from availsim4core.src.discrete_event_simulation.event.b_event.b_event import BEventPriority from availsim4core.src.discrete_event_simulation.event.c_event.c_event import CEvent, CEventPriority from availsim4core.src.context.context import Context from availsim4core.src.context.system.component_tree.status import Status from availsim4core.src.context.system.failure_mode import FailureMode from availsim4core.src.discrete_event_simulation.event.b_event.failure_event.failure_event import FailureEvent from availsim4core.src.context.phase.phase_manager import PhaseManager from availsim4core.src.discrete_event_simulation.event.c_event.held_event.order_end_holding_event import \ OrderEndHoldingEvent from availsim4core.src.discrete_event_simulation.event.c_event.repair_event.order_repair_event import OrderRepairEvent class DetectableFailureEvent(FailureEvent): """ Class dealing with detectable failure events. A detectable failure can trigger a repair because that failure is , in real life, detected by the monitoring system """ def __init__(self, absolute_occurrence_time: float, context: Context, basic: Basic, failure_mode: FailureMode): super().__init__(absolute_occurrence_time, context, basic, failure_mode, priority=BEventPriority.DETECTABLE_FAILURE_EVENT) def postpone(self, duration): return DetectableFailureEvent(self.absolute_occurrence_time + duration, self.context, self.basic, self.failure_mode) def execute(self): return self.basic.update_status(Status.FAILED, self.absolute_occurrence_time, self.context.phase_manager.current_phase, # or phase of the failure mode?! f"{self.failure_mode.name} failure mode of component {self.basic.name}_{self.basic.local_id}_{self.basic.global_id}", self.context) def update_b_event_collection(self, event_set: Set, types_of_event_to_clean: List[type]) -> Tuple[Set, Set]: """ The input event_set is cleaned from all the events of types provided by types_of_event_to_clean. Returns a clean set of Events and a set containing the removed events. """ if self.failure_mode.phase_change_trigger in ["AFTER_REPAIR", "AFTER_FAILURE"]: # we clean the flow of the phases as for failures, the tye_of_event_to_clean are only the phase event b_event_to_remove_set = {event for event in event_set if type(event) in types_of_event_to_clean} return event_set - b_event_to_remove_set, b_event_to_remove_set elif self.failure_mode.phase_change_trigger in ["NEVER"]: # we do not change the normal flow of the phases return event_set, set() def generate_c_event(self, **kwargs) -> Set[CEvent]: c_event_set = set() order_repair_event = OrderRepairEvent( priority=CEventPriority.ORDER_REPAIR_EVENT, context=self.context, component=self.basic, event=self, failure_mode=self.failure_mode ) if self.failure_mode.held_before_repair_phase_set != {PhaseManager.HELD_FOREVER}: if self.context.phase_manager.current_phase not in self.failure_mode.held_before_repair_phase_set: c_event_set.add(OrderEndHoldingEvent( priority=CEventPriority.ORDER_END_HOLDING_EVENT, context=self.context, component=self.basic, event=self, failure_mode=self.failure_mode, held_event=order_repair_event, held_until_phase_set=self.failure_mode.held_before_repair_phase_set )) else: c_event_set.add(order_repair_event) if self.failure_mode.phase_change_trigger in ["AFTER_FAILURE"]: from availsim4core.src.discrete_event_simulation.event.c_event.phase_event.order_next_phase_if_failure_event import \ OrderNextPhaseIfFailureEvent c_event_set.add( OrderNextPhaseIfFailureEvent( priority=CEventPriority.ORDER_NEXT_PHASE_EVENT, context=self.context, failure_mode=self.failure_mode ) ) return c_event_set
PypiClean
/collective.jqueryui-1.6rc5.tar.gz/collective.jqueryui-1.6rc5/collective/jqueryui/jquery.ui/ui/minified/ui.slider.min.js
(function(a){a.widget("ui.slider",a.extend({},a.ui.mouse,{_init:function(){var b=this,c=this.options;this._keySliding=false;this._handleIndex=null;this.orientation=c.orientation=="auto"?(this.element[0].offsetWidth/this.element[0].offsetHeight>1?"horizontal":"vertical"):c.orientation;this._mouseInit();this.element.addClass("ui-slider ui-slider-"+this.orientation+" ui-widget ui-widget-content ui-corner-all");this.range=a([]);if(c.range){if(c.range===true){this.range=a("<div></div>");if(!c.values){c.values=[this._valueMin(),this._valueMin()]}if(c.values.length&&c.values.length!=2){c.values=[c.values[0],c.values[0]]}}else{this.range=a("<div></div>")}this.range.appendTo(this.element).addClass("ui-slider-range ui-widget-header");(c.range=="min")&&(this.orientation=="horizontal")&&this.range.css({left:0});(c.range=="max")&&(this.orientation=="horizontal")&&this.range.css({right:0});(c.range=="min")&&(this.orientation=="vertical")&&this.range.css({bottom:0});(c.range=="max")&&(this.orientation=="vertical")&&this.range.css({top:0})}if(a(".ui-slider-handle",this.element).length==0){a('<a href="#"></a>').appendTo(this.element).addClass("ui-slider-handle")}if(c.values&&c.values.length){while(a(".ui-slider-handle",this.element).length<c.values.length){a('<a href="#"></a>').appendTo(this.element).addClass("ui-slider-handle")}}this.handles=a(".ui-slider-handle",this.element).addClass("ui-state-default ui-corner-all");this.handle=this.handles.eq(0);this.handles.add(this.range).filter("a").click(function(d){d.preventDefault()}).hover(function(){a(this).addClass("ui-state-hover")},function(){a(this).removeClass("ui-state-hover")}).focus(function(){b.handles.removeClass("ui-state-focus");a(this).addClass("ui-state-focus")}).blur(function(){a(this).removeClass("ui-state-focus")});this.handles.each(function(d){a(this).data("index.ui-slider-handle",d)});this.handles.keydown(function(h){var e=a(this).data("index.ui-slider-handle");if(b.options.disabled){return}switch(h.keyCode){case a.ui.keyCode.HOME:case a.ui.keyCode.END:case a.ui.keyCode.UP:case a.ui.keyCode.RIGHT:case a.ui.keyCode.DOWN:case a.ui.keyCode.LEFT:if(!b._keySliding){b._keySliding=true;a(this).addClass("ui-state-active");b._start(h)}break}var f,d,g=b._step();if(b.options.values&&b.options.values.length){f=d=b.values(e)}else{f=d=b.value()}switch(h.keyCode){case a.ui.keyCode.HOME:d=b._valueMin();break;case a.ui.keyCode.END:d=b._valueMax();break;case a.ui.keyCode.UP:case a.ui.keyCode.RIGHT:d=f+g;break;case a.ui.keyCode.DOWN:case a.ui.keyCode.LEFT:d=f-g;break}b._slide(h,e,d)}).keyup(function(d){if(b._keySliding){b._stop(d);b._change(d);b._keySliding=false;a(this).removeClass("ui-state-active")}});this._refreshValue()},destroy:function(){this.handles.remove();this.element.removeClass("ui-slider ui-slider-horizontal ui-slider-vertical ui-slider-disabled ui-widget ui-widget-content ui-corner-all").removeData("slider").unbind(".slider");this._mouseDestroy()},_mouseCapture:function(g){var h=this.options;if(h.disabled){return false}this._start(g);this.elementSize={width:this.element.outerWidth(),height:this.element.outerHeight()};this.elementOffset=this.element.offset();var b={x:g.pageX,y:g.pageY};var e=this._normValueFromMouse(b);var i=this._valueMax(),c;var d=this,f;this.handles.each(function(j){var k=Math.abs(e-d.values(j));if(i>k){i=k;c=a(this);f=j}});d._handleIndex=f;c.addClass("ui-state-active").focus();this._slide(g,f,e);return true},_mouseStart:function(b){return true},_mouseDrag:function(d){var b={x:d.pageX,y:d.pageY};var c=this._normValueFromMouse(b);this._slide(d,this._handleIndex,c);return false},_mouseStop:function(b){this.handles.removeClass("ui-state-active");this._stop(b);this._change(b);this._handleIndex=null;return false},_normValueFromMouse:function(d){var c,h;if("horizontal"==this.orientation){c=this.elementSize.width;h=d.x-this.elementOffset.left}else{c=this.elementSize.height;h=d.y-this.elementOffset.top}var f=(h/c);if(f>1){f=1}if(f<0){f=0}if("vertical"==this.orientation){f=1-f}var e=this._valueMax()-this._valueMin(),i=f*e,b=i%this.options.step,g=this._valueMin()+i-b;if(b>(this.options.step/2)){g+=this.options.step}return g},_start:function(b){this._trigger("start",b,{value:this.value()})},_slide:function(f,e,d){if(this.options.values&&this.options.values.length){var g=this.handles[e];var b=this.values(e?0:1);if((e==0&&d>=b)||(e==1&&d<=b)){d=b}if(d!=this.values(e)){var c=this.values();c[e]=d;var h=this._trigger("slide",f,{handle:g,value:d,values:c});var b=this.values(e?0:1);if(h!==false){this.values(e,d)}}}else{if(d!=this.value()){var h=this._trigger("slide",f,{value:d});if(h!==false){this._setData("value",d)}}}},_stop:function(b){this._trigger("stop",b,{value:this.value()})},_change:function(b){this._trigger("change",b,{value:this.value()})},value:function(b){if(arguments.length){this._setData("value",b);this._change()}return this._value()},values:function(b,c){if(arguments.length>1){this.options.values[b]=c;this._refreshValue();this._change()}if(arguments.length){if(this.options.values&&this.options.values.length){return this._values(b)}else{return this.value()}}else{return this._values()}},_setData:function(b,c){a.widget.prototype._setData.apply(this,arguments);switch(b){case"orientation":this.orientation=this.options.orientation=="auto"?(this.element[0].offsetWidth/this.element[0].offsetHeight>1?"horizontal":"vertical"):this.options.orientation;this.element.removeClass("ui-slider-horizontal ui-slider-vertical").addClass("ui-slider-"+this._orientation());this._refreshValue();break;case"value":this._refreshValue();break}},_step:function(){var b=this.options.step;return b},_value:function(){var b=this.options.value;if(b<this._valueMin()){b=this._valueMin()}if(b>this._valueMax()){b=this._valueMax()}return b},_values:function(b){if(arguments.length){var c=this.options.values[b];if(c<this._valueMin()){c=this._valueMin()}if(c>this._valueMax()){c=this._valueMax()}return c}else{return this.options.values}},_valueMin:function(){var b=this.options.min;return b},_valueMax:function(){var b=this.options.max;return b},_refreshValue:function(){var e=this.options.range;if(this.options.values&&this.options.values.length){var c=this,b,f;this.handles.each(function(k,g){var h=(c.values(k)-c._valueMin())/(c._valueMax()-c._valueMin())*100;a(this).css(c.orientation=="horizontal"?"left":"bottom",h+"%");if(c.options.range===true){if(c.orientation=="horizontal"){(k==0)&&c.range.css("left",h+"%");(k==1)&&c.range.css("width",(h-lastValPercent)+"%")}else{(k==0)&&c.range.css("bottom",(h)+"%");(k==1)&&c.range.css("height",(h-lastValPercent)+"%")}}lastValPercent=h})}else{var d=(this.value()-this._valueMin())/(this._valueMax()-this._valueMin())*100;this.handle.css(this.orientation=="horizontal"?"left":"bottom",d+"%");(e=="min")&&(this.orientation=="horizontal")&&this.range.css({left:0,width:d+"%"});(e=="max")&&(this.orientation=="horizontal")&&this.range.css({left:d+"%",width:(100-d)+"%"});(e=="min")&&(this.orientation=="vertical")&&this.range.css({top:(100-d)+"%",height:d+"%"});(e=="max")&&(this.orientation=="vertical")&&this.range.css({bottom:d+"%",height:(100-d)+"%"})}}}));a.extend(a.ui.slider,{getter:"value values",version:"1.6rc5",eventPrefix:"slide",defaults:{delay:0,distance:0,max:100,min:0,orientation:"auto",range:false,step:1,value:0,values:null}})})(jQuery);
PypiClean
/email-filter-0.2.12.linux-x86_64.tar.gz/usr/local/lib/python2.7/dist-packages/email_filter/migrations/0002_auto__add_field_emaillog_raw_email__add_field_emaillog_message_id__add.py
import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'EmailLog.raw_email' db.add_column(u'email_filter_emaillog', 'raw_email', self.gf('django.db.models.fields.files.FileField')(max_length=100, null=True), keep_default=False) # Adding field 'EmailLog.message_id' db.add_column(u'email_filter_emaillog', 'message_id', self.gf('django.db.models.fields.CharField')(default='', max_length=255, blank=True), keep_default=False) # Adding field 'EmailLog.in_reply_to' db.add_column(u'email_filter_emaillog', 'in_reply_to', self.gf('django.db.models.fields.CharField')(default='', max_length=255, blank=True), keep_default=False) # Adding unique constraint on 'EmailRedirect', fields ['email_in', 'email_redirect'] db.create_unique(u'email_filter_emailredirect', ['email_in', 'email_redirect']) def backwards(self, orm): # Removing unique constraint on 'EmailRedirect', fields ['email_in', 'email_redirect'] db.delete_unique(u'email_filter_emailredirect', ['email_in', 'email_redirect']) # Deleting field 'EmailLog.raw_email' db.delete_column(u'email_filter_emaillog', 'raw_email') # Deleting field 'EmailLog.message_id' db.delete_column(u'email_filter_emaillog', 'message_id') # Deleting field 'EmailLog.in_reply_to' db.delete_column(u'email_filter_emaillog', 'in_reply_to') models = { u'email_filter.emaillog': { 'Meta': {'object_name': 'EmailLog'}, 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'in_reply_to': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}), 'message_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}), 'raw_body': ('django.db.models.fields.TextField', [], {}), 'raw_email': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True'}), 'recipient': ('django.db.models.fields.CharField', [], {'max_length': '750'}), 'sender': ('django.db.models.fields.EmailField', [], {'max_length': '250'}), 'sent': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'subject': ('django.db.models.fields.CharField', [], {'max_length': '255'}) }, u'email_filter.emailredirect': { 'Meta': {'unique_together': "(('email_in', 'email_redirect'),)", 'object_name': 'EmailRedirect'}, 'email_in': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '250'}), 'email_redirect': ('django.db.models.fields.EmailField', [], {'max_length': '250'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) } } complete_apps = ['email_filter']
PypiClean
/hops-apache-beam-2.24.0.2.tar.gz/hops-apache-beam-2.24.0.2/apache_beam/typehints/trivial_inference.py
# pytype: skip-file from __future__ import absolute_import from __future__ import print_function import collections import dis import inspect import pprint import sys import traceback import types from builtins import object from builtins import zip from functools import reduce from apache_beam.typehints import Any from apache_beam.typehints import row_type from apache_beam.typehints import typehints # pylint: disable=wrong-import-order, wrong-import-position, ungrouped-imports try: # Python 2 import __builtin__ as builtins except ImportError: # Python 3 import builtins # type: ignore # pylint: enable=wrong-import-order, wrong-import-position, ungrouped-imports class TypeInferenceError(ValueError): """Error to raise when type inference failed.""" pass def instance_to_type(o): """Given a Python object o, return the corresponding type hint. """ t = type(o) if o is None: return type(None) elif t not in typehints.DISALLOWED_PRIMITIVE_TYPES: # pylint: disable=deprecated-types-field if sys.version_info[0] == 2 and t == types.InstanceType: return o.__class__ if t == BoundMethod: return types.MethodType return t elif t == tuple: return typehints.Tuple[[instance_to_type(item) for item in o]] elif t == list: if len(o) > 0: return typehints.List[typehints.Union[[ instance_to_type(item) for item in o ]]] else: return typehints.List[typehints.Any] elif t == set: if len(o) > 0: return typehints.Set[typehints.Union[[ instance_to_type(item) for item in o ]]] else: return typehints.Set[typehints.Any] elif t == frozenset: if len(o) > 0: return typehints.FrozenSet[typehints.Union[[ instance_to_type(item) for item in o ]]] else: return typehints.FrozenSet[typehints.Any] elif t == dict: if len(o) > 0: return typehints.Dict[ typehints.Union[[instance_to_type(k) for k, v in o.items()]], typehints.Union[[instance_to_type(v) for k, v in o.items()]], ] else: return typehints.Dict[typehints.Any, typehints.Any] else: raise TypeInferenceError('Unknown forbidden type: %s' % t) def union_list(xs, ys): assert len(xs) == len(ys) return [union(x, y) for x, y in zip(xs, ys)] class Const(object): def __init__(self, value): self.value = value self.type = instance_to_type(value) def __eq__(self, other): return isinstance(other, Const) and self.value == other.value def __ne__(self, other): # TODO(BEAM-5949): Needed for Python 2 compatibility. return not self == other def __hash__(self): return hash(self.value) def __repr__(self): return 'Const[%s]' % str(self.value)[:100] @staticmethod def unwrap(x): if isinstance(x, Const): return x.type return x @staticmethod def unwrap_all(xs): return [Const.unwrap(x) for x in xs] class FrameState(object): """Stores the state of the frame at a particular point of execution. """ def __init__(self, f, local_vars=None, stack=()): self.f = f self.co = f.__code__ self.vars = list(local_vars) self.stack = list(stack) def __eq__(self, other): return isinstance(other, FrameState) and self.__dict__ == other.__dict__ def __ne__(self, other): # TODO(BEAM-5949): Needed for Python 2 compatibility. return not self == other def __hash__(self): return hash(tuple(sorted(self.__dict__.items()))) def copy(self): return FrameState(self.f, self.vars, self.stack) def const_type(self, i): return Const(self.co.co_consts[i]) def get_closure(self, i): num_cellvars = len(self.co.co_cellvars) if i < num_cellvars: return self.vars[i] else: return self.f.__closure__[i - num_cellvars].cell_contents def closure_type(self, i): """Returns a TypeConstraint or Const.""" val = self.get_closure(i) if isinstance(val, typehints.TypeConstraint): return val else: return Const(val) def get_global(self, i): name = self.get_name(i) if name in self.f.__globals__: return Const(self.f.__globals__[name]) if name in builtins.__dict__: return Const(builtins.__dict__[name]) return Any def get_name(self, i): return self.co.co_names[i] def __repr__(self): return 'Stack: %s Vars: %s' % (self.stack, self.vars) def __or__(self, other): if self is None: return other.copy() elif other is None: return self.copy() return FrameState( self.f, union_list(self.vars, other.vars), union_list(self.stack, other.stack)) def __ror__(self, left): return self | left def union(a, b): """Returns the union of two types or Const values. """ if a == b: return a elif not a: return b elif not b: return a a = Const.unwrap(a) b = Const.unwrap(b) # TODO(robertwb): Work this into the Union code in a more generic way. if type(a) == type(b) and element_type(a) == typehints.Union[()]: return b elif type(a) == type(b) and element_type(b) == typehints.Union[()]: return a return typehints.Union[a, b] def finalize_hints(type_hint): """Sets type hint for empty data structures to Any.""" def visitor(tc, unused_arg): if isinstance(tc, typehints.DictConstraint): empty_union = typehints.Union[()] if tc.key_type == empty_union: tc.key_type = Any if tc.value_type == empty_union: tc.value_type = Any if isinstance(type_hint, typehints.TypeConstraint): type_hint.visit(visitor, None) def element_type(hint): """Returns the element type of a composite type. """ hint = Const.unwrap(hint) if isinstance(hint, typehints.SequenceTypeConstraint): return hint.inner_type elif isinstance(hint, typehints.TupleHint.TupleConstraint): return typehints.Union[hint.tuple_types] return Any def key_value_types(kv_type): """Returns the key and value type of a KV type. """ # TODO(robertwb): Unions of tuples, etc. # TODO(robertwb): Assert? if (isinstance(kv_type, typehints.TupleHint.TupleConstraint) and len(kv_type.tuple_types) == 2): return kv_type.tuple_types return Any, Any known_return_types = { len: int, hash: int, } class BoundMethod(object): """Used to create a bound method when we only know the type of the instance. """ def __init__(self, func, type): """Instantiates a bound method object. Args: func (types.FunctionType): The method's underlying function type (type): The class of the method. """ self.func = func self.type = type def hashable(c): try: hash(c) return True except TypeError: return False def infer_return_type(c, input_types, debug=False, depth=5): """Analyses a callable to deduce its return type. Args: c: A Python callable to infer the return type of. input_types: A sequence of inputs corresponding to the input types. debug: Whether to print verbose debugging information. depth: Maximum inspection depth during type inference. Returns: A TypeConstraint that that the return value of this function will (likely) satisfy given the specified inputs. """ try: if hashable(c) and c in known_return_types: return known_return_types[c] elif isinstance(c, types.FunctionType): return infer_return_type_func(c, input_types, debug, depth) elif isinstance(c, types.MethodType): if c.__self__ is not None: input_types = [Const(c.__self__)] + input_types return infer_return_type_func(c.__func__, input_types, debug, depth) elif isinstance(c, BoundMethod): input_types = [c.type] + input_types return infer_return_type_func(c.func, input_types, debug, depth) elif inspect.isclass(c): if c in typehints.DISALLOWED_PRIMITIVE_TYPES: return { list: typehints.List[Any], set: typehints.Set[Any], frozenset: typehints.FrozenSet[Any], tuple: typehints.Tuple[Any, ...], dict: typehints.Dict[Any, Any] }[c] return c else: return Any except TypeInferenceError: if debug: traceback.print_exc() return Any except Exception: if debug: sys.stdout.flush() raise else: return Any def infer_return_type_func(f, input_types, debug=False, depth=0): """Analyses a function to deduce its return type. Args: f: A Python function object to infer the return type of. input_types: A sequence of inputs corresponding to the input types. debug: Whether to print verbose debugging information. depth: Maximum inspection depth during type inference. Returns: A TypeConstraint that that the return value of this function will (likely) satisfy given the specified inputs. Raises: TypeInferenceError: if no type can be inferred. """ if debug: print() print(f, id(f), input_types) dis.dis(f) from . import opcodes simple_ops = dict((k.upper(), v) for k, v in opcodes.__dict__.items()) co = f.__code__ code = co.co_code end = len(code) pc = 0 extended_arg = 0 # Python 2 only. free = None yields = set() returns = set() # TODO(robertwb): Default args via inspect module. local_vars = list(input_types) + [typehints.Union[()]] * ( len(co.co_varnames) - len(input_types)) state = FrameState(f, local_vars) states = collections.defaultdict(lambda: None) jumps = collections.defaultdict(int) # In Python 3, use dis library functions to disassemble bytecode and handle # EXTENDED_ARGs. is_py3 = sys.version_info[0] == 3 if is_py3: ofs_table = {} # offset -> instruction for instruction in dis.get_instructions(f): ofs_table[instruction.offset] = instruction # Python 2 - 3.5: 1 byte opcode + optional 2 byte arg (1 or 3 bytes). # Python 3.6+: 1 byte opcode + 1 byte arg (2 bytes, arg may be ignored). if sys.version_info >= (3, 6): inst_size = 2 opt_arg_size = 0 else: inst_size = 1 opt_arg_size = 2 last_pc = -1 while pc < end: # pylint: disable=too-many-nested-blocks start = pc if is_py3: instruction = ofs_table[pc] op = instruction.opcode else: op = ord(code[pc]) if debug: print('-->' if pc == last_pc else ' ', end=' ') print(repr(pc).rjust(4), end=' ') print(dis.opname[op].ljust(20), end=' ') pc += inst_size if op >= dis.HAVE_ARGUMENT: if is_py3: arg = instruction.arg else: arg = ord(code[pc]) + ord(code[pc + 1]) * 256 + extended_arg extended_arg = 0 pc += opt_arg_size if op == dis.EXTENDED_ARG: extended_arg = arg * 65536 if debug: print(str(arg).rjust(5), end=' ') if op in dis.hasconst: print('(' + repr(co.co_consts[arg]) + ')', end=' ') elif op in dis.hasname: print('(' + co.co_names[arg] + ')', end=' ') elif op in dis.hasjrel: print('(to ' + repr(pc + arg) + ')', end=' ') elif op in dis.haslocal: print('(' + co.co_varnames[arg] + ')', end=' ') elif op in dis.hascompare: print('(' + dis.cmp_op[arg] + ')', end=' ') elif op in dis.hasfree: if free is None: free = co.co_cellvars + co.co_freevars print('(' + free[arg] + ')', end=' ') # Actually emulate the op. if state is None and states[start] is None: # No control reaches here (yet). if debug: print() continue state |= states[start] opname = dis.opname[op] jmp = jmp_state = None if opname.startswith('CALL_FUNCTION'): if sys.version_info < (3, 6): # Each keyword takes up two arguments on the stack (name and value). standard_args = (arg & 0xFF) + 2 * (arg >> 8) var_args = 'VAR' in opname kw_args = 'KW' in opname pop_count = standard_args + var_args + kw_args + 1 if depth <= 0: return_type = Any elif arg >> 8: if not var_args and not kw_args and not arg & 0xFF: # Keywords only, maybe it's a call to Row. if isinstance(state.stack[-pop_count], Const): from apache_beam.pvalue import Row if state.stack[-pop_count].value == Row: fields = state.stack[-pop_count + 1::2] types = state.stack[-pop_count + 2::2] return_type = row_type.RowTypeConstraint( zip([fld.value for fld in fields], Const.unwrap_all(types))) else: return_type = Any else: # TODO(robertwb): Handle this case. return_type = Any elif isinstance(state.stack[-pop_count], Const): # TODO(robertwb): Handle this better. if var_args or kw_args: state.stack[-1] = Any state.stack[-var_args - kw_args] = Any return_type = infer_return_type( state.stack[-pop_count].value, state.stack[1 - pop_count:], debug=debug, depth=depth - 1) else: return_type = Any state.stack[-pop_count:] = [return_type] else: # Python 3.6+ if opname == 'CALL_FUNCTION': pop_count = arg + 1 if depth <= 0: return_type = Any elif isinstance(state.stack[-pop_count], Const): return_type = infer_return_type( state.stack[-pop_count].value, state.stack[1 - pop_count:], debug=debug, depth=depth - 1) else: return_type = Any elif opname == 'CALL_FUNCTION_KW': # TODO(udim): Handle keyword arguments. Requires passing them by name # to infer_return_type. pop_count = arg + 2 if isinstance(state.stack[-pop_count], Const): from apache_beam.pvalue import Row if state.stack[-pop_count].value == Row: fields = state.stack[-1].value return_type = row_type.RowTypeConstraint( zip(fields, Const.unwrap_all(state.stack[-pop_count + 1:-1]))) else: return_type = Any else: return_type = Any elif opname == 'CALL_FUNCTION_EX': # stack[-has_kwargs]: Map of keyword args. # stack[-1 - has_kwargs]: Iterable of positional args. # stack[-2 - has_kwargs]: Function to call. has_kwargs = arg & 1 # type: int pop_count = has_kwargs + 2 if has_kwargs: # TODO(udim): Unimplemented. Requires same functionality as a # CALL_FUNCTION_KW implementation. return_type = Any else: args = state.stack[-1] _callable = state.stack[-2] if isinstance(args, typehints.ListConstraint): # Case where there's a single var_arg argument. args = [args] elif isinstance(args, typehints.TupleConstraint): args = list(args._inner_types()) return_type = infer_return_type( _callable.value, args, debug=debug, depth=depth - 1) else: raise TypeInferenceError('unable to handle %s' % opname) state.stack[-pop_count:] = [return_type] elif opname == 'CALL_METHOD': pop_count = 1 + arg # LOAD_METHOD will return a non-Const (Any) if loading from an Any. if isinstance(state.stack[-pop_count], Const) and depth > 0: return_type = infer_return_type( state.stack[-pop_count].value, state.stack[1 - pop_count:], debug=debug, depth=depth - 1) else: return_type = typehints.Any state.stack[-pop_count:] = [return_type] elif opname in simple_ops: if debug: print("Executing simple op " + opname) simple_ops[opname](state, arg) elif opname == 'RETURN_VALUE': returns.add(state.stack[-1]) state = None elif opname == 'YIELD_VALUE': yields.add(state.stack[-1]) elif opname == 'JUMP_FORWARD': jmp = pc + arg jmp_state = state state = None elif opname == 'JUMP_ABSOLUTE': jmp = arg jmp_state = state state = None elif opname in ('POP_JUMP_IF_TRUE', 'POP_JUMP_IF_FALSE'): state.stack.pop() jmp = arg jmp_state = state.copy() elif opname in ('JUMP_IF_TRUE_OR_POP', 'JUMP_IF_FALSE_OR_POP'): jmp = arg jmp_state = state.copy() state.stack.pop() elif opname == 'FOR_ITER': jmp = pc + arg jmp_state = state.copy() jmp_state.stack.pop() state.stack.append(element_type(state.stack[-1])) else: raise TypeInferenceError('unable to handle %s' % opname) if jmp is not None: # TODO(robertwb): Is this guaranteed to converge? new_state = states[jmp] | jmp_state if jmp < pc and new_state != states[jmp] and jumps[pc] < 5: jumps[pc] += 1 pc = jmp states[jmp] = new_state if debug: print() print(state) pprint.pprint(dict(item for item in states.items() if item[1])) if yields: result = typehints.Iterable[reduce(union, Const.unwrap_all(yields))] else: result = reduce(union, Const.unwrap_all(returns)) finalize_hints(result) if debug: print(f, id(f), input_types, '->', result) return result
PypiClean
/ansible-8.3.0-py3-none-any.whl/ansible_collections/fortinet/fortimanager/plugins/modules/fmgr_pkg_firewall_hyperscalepolicy.py
from __future__ import absolute_import, division, print_function # Copyright 2019-2023 Fortinet, Inc. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. __metaclass__ = type ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'metadata_version': '1.1'} DOCUMENTATION = ''' --- module: fmgr_pkg_firewall_hyperscalepolicy short_description: Configure IPv4/IPv6 policies. description: - This module is able to configure a FortiManager device. - Examples include all parameters and values which need to be adjusted to data sources before usage. version_added: "2.2.0" author: - Xinwei Du (@dux-fortinet) - Xing Li (@lix-fortinet) - Jie Xue (@JieX19) - Link Zheng (@chillancezen) - Frank Shen (@fshen01) - Hongbin Lu (@fgtdev-hblu) notes: - Running in workspace locking mode is supported in this FortiManager module, the top level parameters workspace_locking_adom and workspace_locking_timeout help do the work. - To create or update an object, use state present directive. - To delete an object, use state absent directive. - Normally, running one module can fail when a non-zero rc is returned. you can also override the conditions to fail or succeed with parameters rc_failed and rc_succeeded options: access_token: description: The token to access FortiManager without using username and password. required: false type: str bypass_validation: description: Only set to True when module schema diffs with FortiManager API structure, module continues to execute without validating parameters. required: false type: bool default: false enable_log: description: Enable/Disable logging for task. required: false type: bool default: false forticloud_access_token: description: Authenticate Ansible client with forticloud API access token. required: false type: str proposed_method: description: The overridden method for the underlying Json RPC request. required: false type: str choices: - update - set - add rc_succeeded: description: The rc codes list with which the conditions to succeed will be overriden. type: list required: false elements: int rc_failed: description: The rc codes list with which the conditions to fail will be overriden. type: list required: false elements: int state: description: The directive to create, update or delete an object. type: str required: true choices: - present - absent workspace_locking_adom: description: The adom to lock for FortiManager running in workspace mode, the value can be global and others including root. required: false type: str workspace_locking_timeout: description: The maximum time in seconds to wait for other user to release the workspace lock. required: false type: int default: 300 adom: description: the parameter (adom) in requested url type: str required: true pkg: description: the parameter (pkg) in requested url type: str required: true pkg_firewall_hyperscalepolicy: description: the top level parameters set required: false type: dict suboptions: action: type: str description: Policy action choices: - 'deny' - 'accept' auto-asic-offload: type: str description: Enable/disable policy traffic ASIC offloading. choices: - 'disable' - 'enable' cgn-eif: type: str description: Enable/Disable CGN endpoint independent filtering. choices: - 'disable' - 'enable' cgn-eim: type: str description: Enable/Disable CGN endpoint independent mapping choices: - 'disable' - 'enable' cgn-log-server-grp: type: str description: NP log server group name cgn-resource-quota: type: int description: resource quota cgn-session-quota: type: int description: session quota comments: type: str description: Comment. delay-tcp-npu-session: type: str description: Enable TCP NPU session delay to guarantee packet order of 3-way handshake. choices: - 'disable' - 'enable' dstaddr: description: description type: str dstaddr-negate: type: str description: When enabled dstaddr/dstaddr6 specifies what the destination address must NOT be. choices: - 'disable' - 'enable' dstaddr6: description: description type: str dstintf: description: description type: str firewall-session-dirty: type: str description: How to handle sessions if the configuration of this firewall policy changes. choices: - 'check-all' - 'check-new' global-label: type: str description: Label for the policy that appears when the GUI is in Global View mode. ippool: type: str description: Enable to use IP Pools for source NAT. choices: - 'disable' - 'enable' label: type: str description: Label for the policy that appears when the GUI is in Section View mode. name: type: str description: Policy name. nat: type: str description: Enable/disable source NAT. choices: - 'disable' - 'enable' policy-offload: type: str description: Enable/Disable hardware session setup for CGNAT. choices: - 'disable' - 'enable' policyid: type: int description: Policy ID poolname: description: description type: str poolname6: description: description type: str send-deny-packet: type: str description: Enable to send a reply when a session is denied or blocked by a firewall policy. choices: - 'disable' - 'enable' service: description: description type: str service-negate: type: str description: When enabled service specifies what the service must NOT be. choices: - 'disable' - 'enable' srcaddr: description: description type: str srcaddr-negate: type: str description: When enabled srcaddr/srcaddr6 specifies what the source address must NOT be. choices: - 'disable' - 'enable' srcaddr6: description: description type: str srcintf: description: description type: str status: type: str description: Enable or disable this policy. choices: - 'disable' - 'enable' tcp-timeout-pid: type: str description: TCP timeout profile ID traffic-shaper: type: str description: Traffic shaper. traffic-shaper-reverse: type: str description: Reverse traffic shaper. udp-timeout-pid: type: str description: UDP timeout profile ID uuid: type: str description: Universally Unique Identifier ''' EXAMPLES = ''' - hosts: fortimanager-inventory collections: - fortinet.fortimanager connection: httpapi vars: ansible_httpapi_use_ssl: True ansible_httpapi_validate_certs: False ansible_httpapi_port: 443 tasks: - name: Configure IPv4/IPv6 policies. fmgr_pkg_firewall_hyperscalepolicy: bypass_validation: False workspace_locking_adom: <value in [global, custom adom including root]> workspace_locking_timeout: 300 rc_succeeded: [0, -2, -3, ...] rc_failed: [-2, -3, ...] adom: <your own value> pkg: <your own value> state: <value in [present, absent]> pkg_firewall_hyperscalepolicy: action: <value in [deny, accept]> auto-asic-offload: <value in [disable, enable]> cgn-eif: <value in [disable, enable]> cgn-eim: <value in [disable, enable]> cgn-log-server-grp: <value of string> cgn-resource-quota: <value of integer> cgn-session-quota: <value of integer> comments: <value of string> delay-tcp-npu-session: <value in [disable, enable]> dstaddr: <value of string> dstaddr-negate: <value in [disable, enable]> dstaddr6: <value of string> dstintf: <value of string> firewall-session-dirty: <value in [check-all, check-new]> global-label: <value of string> ippool: <value in [disable, enable]> label: <value of string> name: <value of string> nat: <value in [disable, enable]> policy-offload: <value in [disable, enable]> policyid: <value of integer> poolname: <value of string> poolname6: <value of string> send-deny-packet: <value in [disable, enable]> service: <value of string> service-negate: <value in [disable, enable]> srcaddr: <value of string> srcaddr-negate: <value in [disable, enable]> srcaddr6: <value of string> srcintf: <value of string> status: <value in [disable, enable]> tcp-timeout-pid: <value of string> traffic-shaper: <value of string> traffic-shaper-reverse: <value of string> udp-timeout-pid: <value of string> uuid: <value of string> ''' RETURN = ''' meta: description: The result of the request. type: dict returned: always contains: request_url: description: The full url requested. returned: always type: str sample: /sys/login/user response_code: description: The status of api request. returned: always type: int sample: 0 response_data: description: The api response. type: list returned: always response_message: description: The descriptive message of the api response. type: str returned: always sample: OK. system_information: description: The information of the target system. type: dict returned: always rc: description: The status the request. type: int returned: always sample: 0 version_check_warning: description: Warning if the parameters used in the playbook are not supported by the current FortiManager version. type: list returned: complex ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.connection import Connection from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import NAPIManager from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_galaxy_version from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_parameter_bypass def main(): jrpc_urls = [ '/pm/config/adom/{adom}/pkg/{pkg}/firewall/hyperscale-policy' ] perobject_jrpc_urls = [ '/pm/config/adom/{adom}/pkg/{pkg}/firewall/hyperscale-policy/{hyperscale-policy}' ] url_params = ['adom', 'pkg'] module_primary_key = 'name' module_arg_spec = { 'access_token': { 'type': 'str', 'required': False, 'no_log': True }, 'bypass_validation': { 'type': 'bool', 'required': False, 'default': False }, 'enable_log': { 'type': 'bool', 'required': False, 'default': False }, 'forticloud_access_token': { 'type': 'str', 'required': False, 'no_log': True }, 'proposed_method': { 'type': 'str', 'required': False, 'choices': [ 'set', 'update', 'add' ] }, 'rc_succeeded': { 'required': False, 'type': 'list', 'elements': 'int' }, 'rc_failed': { 'required': False, 'type': 'list', 'elements': 'int' }, 'state': { 'type': 'str', 'required': True, 'choices': [ 'present', 'absent' ] }, 'workspace_locking_adom': { 'type': 'str', 'required': False }, 'workspace_locking_timeout': { 'type': 'int', 'required': False, 'default': 300 }, 'adom': { 'required': True, 'type': 'str' }, 'pkg': { 'required': True, 'type': 'str' }, 'pkg_firewall_hyperscalepolicy': { 'required': False, 'type': 'dict', 'revision': { '6.4.7': True, '6.4.8': True, '6.4.9': True, '6.4.10': True, '6.4.11': True, '6.4.12': True, '7.0.1': True, '7.0.2': True, '7.0.3': True, '7.0.4': True, '7.0.5': True, '7.0.6': True, '7.0.7': True, '7.0.8': True, '7.2.0': True }, 'options': { 'action': { 'required': False, 'revision': { '7.2.0': True, '6.2.0': False, '6.2.2': False, '6.2.6': False, '6.2.7': False, '6.2.8': False, '6.2.9': False, '6.2.10': False, '6.2.11': False, '6.4.1': False, '6.4.3': False, '6.4.4': False, '6.4.6': False, '6.4.7': True, '6.4.8': True, '6.4.9': True, '6.4.10': True, '6.4.11': True, '6.4.12': True, '7.0.1': True, '7.0.2': True, '7.0.3': True, '7.0.4': True, '7.0.5': True, '7.0.6': True, '7.0.7': True, '7.0.8': True, '7.2.1': False, '7.2.2': False, '7.2.3': False, '7.4.0': False }, 'choices': [ 'deny', 'accept' ], 'type': 'str' }, 'auto-asic-offload': { 'required': False, 'revision': { '7.2.0': True, '6.2.0': False, '6.2.2': False, '6.2.6': False, '6.2.7': False, '6.2.8': False, '6.2.9': False, '6.2.10': False, '6.2.11': False, '6.4.1': False, '6.4.3': False, '6.4.4': False, '6.4.6': False, '6.4.7': True, '6.4.8': True, '6.4.9': True, '6.4.10': True, '6.4.11': True, '6.4.12': True, '7.0.1': True, '7.0.2': True, '7.0.3': True, '7.0.4': True, '7.0.5': True, '7.0.6': True, '7.0.7': True, '7.0.8': True, '7.2.1': False, '7.2.2': False, '7.2.3': False, '7.4.0': False }, 'choices': [ 'disable', 'enable' ], 'type': 'str' }, 'cgn-eif': { 'required': False, 'revision': { '7.2.0': True, '6.2.0': False, '6.2.2': False, '6.2.6': False, '6.2.7': False, '6.2.8': False, '6.2.9': False, '6.2.10': False, '6.2.11': False, '6.4.1': False, '6.4.3': False, '6.4.4': False, '6.4.6': False, '6.4.7': True, '6.4.8': True, '6.4.9': True, '6.4.10': True, '6.4.11': True, '6.4.12': True, '7.0.1': True, '7.0.2': True, '7.0.3': True, '7.0.4': True, '7.0.5': True, '7.0.6': True, '7.0.7': True, '7.0.8': True, '7.2.1': False, '7.2.2': False, '7.2.3': False, '7.4.0': False }, 'choices': [ 'disable', 'enable' ], 'type': 'str' }, 'cgn-eim': { 'required': False, 'revision': { '7.2.0': True, '6.2.0': False, '6.2.2': False, '6.2.6': False, '6.2.7': False, '6.2.8': False, '6.2.9': False, '6.2.10': False, '6.2.11': False, '6.4.1': False, '6.4.3': False, '6.4.4': False, '6.4.6': False, '6.4.7': True, '6.4.8': True, '6.4.9': True, '6.4.10': True, '6.4.11': True, '6.4.12': True, '7.0.1': True, '7.0.2': True, '7.0.3': True, '7.0.4': True, '7.0.5': True, '7.0.6': True, '7.0.7': True, '7.0.8': True, '7.2.1': False, '7.2.2': False, '7.2.3': False, '7.4.0': False }, 'choices': [ 'disable', 'enable' ], 'type': 'str' }, 'cgn-log-server-grp': { 'required': False, 'revision': { '7.2.0': True, '6.2.0': False, '6.2.2': False, '6.2.6': False, '6.2.7': False, '6.2.8': False, '6.2.9': False, '6.2.10': False, '6.2.11': False, '6.4.1': False, '6.4.3': False, '6.4.4': False, '6.4.6': False, '6.4.7': True, '6.4.8': True, '6.4.9': True, '6.4.10': True, '6.4.11': True, '6.4.12': True, '7.0.1': True, '7.0.2': True, '7.0.3': True, '7.0.4': True, '7.0.5': True, '7.0.6': True, '7.0.7': True, '7.0.8': True, '7.2.1': False, '7.2.2': False, '7.2.3': False, '7.4.0': False }, 'type': 'str' }, 'cgn-resource-quota': { 'required': False, 'revision': { '7.2.0': True, '6.2.0': False, '6.2.2': False, '6.2.6': False, '6.2.7': False, '6.2.8': False, '6.2.9': False, '6.2.10': False, '6.2.11': False, '6.4.1': False, '6.4.3': False, '6.4.4': False, '6.4.6': False, '6.4.7': True, '6.4.8': True, '6.4.9': True, '6.4.10': True, '6.4.11': True, '6.4.12': True, '7.0.1': True, '7.0.2': True, '7.0.3': True, '7.0.4': True, '7.0.5': True, '7.0.6': True, '7.0.7': True, '7.0.8': True, '7.2.1': False, '7.2.2': False, '7.2.3': False, '7.4.0': False }, 'type': 'int' }, 'cgn-session-quota': { 'required': False, 'revision': { '7.2.0': True, '6.2.0': False, '6.2.2': False, '6.2.6': False, '6.2.7': False, '6.2.8': False, '6.2.9': False, '6.2.10': False, '6.2.11': False, '6.4.1': False, '6.4.3': False, '6.4.4': False, '6.4.6': False, '6.4.7': True, '6.4.8': True, '6.4.9': True, '6.4.10': True, '6.4.11': True, '6.4.12': True, '7.0.1': True, '7.0.2': True, '7.0.3': True, '7.0.4': True, '7.0.5': True, '7.0.6': True, '7.0.7': True, '7.0.8': True, '7.2.1': False, '7.2.2': False, '7.2.3': False, '7.4.0': False }, 'type': 'int' }, 'comments': { 'required': False, 'revision': { '7.2.0': True, '6.2.0': False, '6.2.2': False, '6.2.6': False, '6.2.7': False, '6.2.8': False, '6.2.9': False, '6.2.10': False, '6.2.11': False, '6.4.1': False, '6.4.3': False, '6.4.4': False, '6.4.6': False, '6.4.7': True, '6.4.8': True, '6.4.9': True, '6.4.10': True, '6.4.11': True, '6.4.12': True, '7.0.1': True, '7.0.2': True, '7.0.3': True, '7.0.4': True, '7.0.5': True, '7.0.6': True, '7.0.7': True, '7.0.8': True, '7.2.1': False, '7.2.2': False, '7.2.3': False, '7.4.0': False }, 'type': 'str' }, 'delay-tcp-npu-session': { 'required': False, 'revision': { '7.2.0': True, '6.2.0': False, '6.2.2': False, '6.2.6': False, '6.2.7': False, '6.2.8': False, '6.2.9': False, '6.2.10': False, '6.2.11': False, '6.4.1': False, '6.4.3': False, '6.4.4': False, '6.4.6': False, '6.4.7': True, '6.4.8': True, '6.4.9': True, '6.4.10': True, '6.4.11': True, '6.4.12': True, '7.0.1': False, '7.0.2': True, '7.0.3': True, '7.0.4': True, '7.0.5': True, '7.0.6': True, '7.0.7': True, '7.0.8': True, '7.2.1': False, '7.2.2': False, '7.2.3': False, '7.4.0': False }, 'choices': [ 'disable', 'enable' ], 'type': 'str' }, 'dstaddr': { 'required': False, 'revision': { '7.2.0': True, '6.2.0': False, '6.2.2': False, '6.2.6': False, '6.2.7': False, '6.2.8': False, '6.2.9': False, '6.2.10': False, '6.2.11': False, '6.4.1': False, '6.4.3': False, '6.4.4': False, '6.4.6': False, '6.4.7': True, '6.4.8': True, '6.4.9': True, '6.4.10': True, '6.4.11': True, '6.4.12': True, '7.0.1': True, '7.0.2': True, '7.0.3': True, '7.0.4': True, '7.0.5': True, '7.0.6': True, '7.0.7': True, '7.0.8': True, '7.2.1': False, '7.2.2': False, '7.2.3': False, '7.4.0': False }, 'type': 'str' }, 'dstaddr-negate': { 'required': False, 'revision': { '7.2.0': True, '6.2.0': False, '6.2.2': False, '6.2.6': False, '6.2.7': False, '6.2.8': False, '6.2.9': False, '6.2.10': False, '6.2.11': False, '6.4.1': False, '6.4.3': False, '6.4.4': False, '6.4.6': False, '6.4.7': True, '6.4.8': True, '6.4.9': True, '6.4.10': True, '6.4.11': True, '6.4.12': True, '7.0.1': True, '7.0.2': True, '7.0.3': True, '7.0.4': True, '7.0.5': True, '7.0.6': True, '7.0.7': True, '7.0.8': True, '7.2.1': False, '7.2.2': False, '7.2.3': False, '7.4.0': False }, 'choices': [ 'disable', 'enable' ], 'type': 'str' }, 'dstaddr6': { 'required': False, 'revision': { '7.2.0': True, '6.2.0': False, '6.2.2': False, '6.2.6': False, '6.2.7': False, '6.2.8': False, '6.2.9': False, '6.2.10': False, '6.2.11': False, '6.4.1': False, '6.4.3': False, '6.4.4': False, '6.4.6': False, '6.4.7': True, '6.4.8': True, '6.4.9': True, '6.4.10': True, '6.4.11': True, '6.4.12': True, '7.0.1': False, '7.0.2': True, '7.0.3': True, '7.0.4': True, '7.0.5': True, '7.0.6': True, '7.0.7': True, '7.0.8': True, '7.2.1': False, '7.2.2': False, '7.2.3': False, '7.4.0': False }, 'type': 'str' }, 'dstintf': { 'required': False, 'revision': { '7.2.0': True, '6.2.0': False, '6.2.2': False, '6.2.6': False, '6.2.7': False, '6.2.8': False, '6.2.9': False, '6.2.10': False, '6.2.11': False, '6.4.1': False, '6.4.3': False, '6.4.4': False, '6.4.6': False, '6.4.7': True, '6.4.8': True, '6.4.9': True, '6.4.10': True, '6.4.11': True, '6.4.12': True, '7.0.1': True, '7.0.2': True, '7.0.3': True, '7.0.4': True, '7.0.5': True, '7.0.6': True, '7.0.7': True, '7.0.8': True, '7.2.1': False, '7.2.2': False, '7.2.3': False, '7.4.0': False }, 'type': 'str' }, 'firewall-session-dirty': { 'required': False, 'revision': { '7.2.0': True, '6.2.0': False, '6.2.2': False, '6.2.6': False, '6.2.7': False, '6.2.8': False, '6.2.9': False, '6.2.10': False, '6.2.11': False, '6.4.1': False, '6.4.3': False, '6.4.4': False, '6.4.6': False, '6.4.7': True, '6.4.8': True, '6.4.9': True, '6.4.10': True, '6.4.11': True, '6.4.12': True, '7.0.1': False, '7.0.2': True, '7.0.3': True, '7.0.4': True, '7.0.5': True, '7.0.6': True, '7.0.7': True, '7.0.8': True, '7.2.1': False, '7.2.2': False, '7.2.3': False, '7.4.0': False }, 'choices': [ 'check-all', 'check-new' ], 'type': 'str' }, 'global-label': { 'required': False, 'revision': { '7.2.0': True, '6.2.0': False, '6.2.2': False, '6.2.6': False, '6.2.7': False, '6.2.8': False, '6.2.9': False, '6.2.10': False, '6.2.11': False, '6.4.1': False, '6.4.3': False, '6.4.4': False, '6.4.6': False, '6.4.7': True, '6.4.8': True, '6.4.9': True, '6.4.10': True, '6.4.11': True, '6.4.12': True, '7.0.1': True, '7.0.2': True, '7.0.3': True, '7.0.4': True, '7.0.5': True, '7.0.6': True, '7.0.7': True, '7.0.8': True, '7.2.1': False, '7.2.2': False, '7.2.3': False, '7.4.0': False }, 'type': 'str' }, 'ippool': { 'required': False, 'revision': { '7.2.0': True, '6.2.0': False, '6.2.2': False, '6.2.6': False, '6.2.7': False, '6.2.8': False, '6.2.9': False, '6.2.10': False, '6.2.11': False, '6.4.1': False, '6.4.3': False, '6.4.4': False, '6.4.6': False, '6.4.7': True, '6.4.8': True, '6.4.9': True, '6.4.10': True, '6.4.11': True, '6.4.12': True, '7.0.1': True, '7.0.2': True, '7.0.3': True, '7.0.4': True, '7.0.5': True, '7.0.6': True, '7.0.7': True, '7.0.8': True, '7.2.1': False, '7.2.2': False, '7.2.3': False, '7.4.0': False }, 'choices': [ 'disable', 'enable' ], 'type': 'str' }, 'label': { 'required': False, 'revision': { '7.2.0': True, '6.2.0': False, '6.2.2': False, '6.2.6': False, '6.2.7': False, '6.2.8': False, '6.2.9': False, '6.2.10': False, '6.2.11': False, '6.4.1': False, '6.4.3': False, '6.4.4': False, '6.4.6': False, '6.4.7': True, '6.4.8': True, '6.4.9': True, '6.4.10': True, '6.4.11': True, '6.4.12': True, '7.0.1': True, '7.0.2': True, '7.0.3': True, '7.0.4': True, '7.0.5': True, '7.0.6': True, '7.0.7': True, '7.0.8': True, '7.2.1': False, '7.2.2': False, '7.2.3': False, '7.4.0': False }, 'type': 'str' }, 'name': { 'required': True, 'revision': { '7.2.0': True, '6.2.0': False, '6.2.2': False, '6.2.6': False, '6.2.7': False, '6.2.8': False, '6.2.9': False, '6.2.10': False, '6.2.11': False, '6.4.1': False, '6.4.3': False, '6.4.4': False, '6.4.6': False, '6.4.7': True, '6.4.8': True, '6.4.9': True, '6.4.10': True, '6.4.11': True, '6.4.12': True, '7.0.1': True, '7.0.2': True, '7.0.3': True, '7.0.4': True, '7.0.5': True, '7.0.6': True, '7.0.7': True, '7.0.8': True, '7.2.1': False, '7.2.2': False, '7.2.3': False, '7.4.0': False }, 'type': 'str' }, 'nat': { 'required': False, 'revision': { '7.2.0': True, '6.2.0': False, '6.2.2': False, '6.2.6': False, '6.2.7': False, '6.2.8': False, '6.2.9': False, '6.2.10': False, '6.2.11': False, '6.4.1': False, '6.4.3': False, '6.4.4': False, '6.4.6': False, '6.4.7': True, '6.4.8': True, '6.4.9': True, '6.4.10': True, '6.4.11': True, '6.4.12': True, '7.0.1': True, '7.0.2': True, '7.0.3': True, '7.0.4': True, '7.0.5': True, '7.0.6': True, '7.0.7': True, '7.0.8': True, '7.2.1': False, '7.2.2': False, '7.2.3': False, '7.4.0': False }, 'choices': [ 'disable', 'enable' ], 'type': 'str' }, 'policy-offload': { 'required': False, 'revision': { '7.2.0': True, '6.2.0': False, '6.2.2': False, '6.2.6': False, '6.2.7': False, '6.2.8': False, '6.2.9': False, '6.2.10': False, '6.2.11': False, '6.4.1': False, '6.4.3': False, '6.4.4': False, '6.4.6': False, '6.4.7': True, '6.4.8': True, '6.4.9': True, '6.4.10': True, '6.4.11': True, '6.4.12': True, '7.0.1': True, '7.0.2': True, '7.0.3': True, '7.0.4': True, '7.0.5': True, '7.0.6': True, '7.0.7': True, '7.0.8': True, '7.2.1': False, '7.2.2': False, '7.2.3': False, '7.4.0': False }, 'choices': [ 'disable', 'enable' ], 'type': 'str' }, 'policyid': { 'required': False, 'revision': { '7.2.0': True, '6.2.0': False, '6.2.2': False, '6.2.6': False, '6.2.7': False, '6.2.8': False, '6.2.9': False, '6.2.10': False, '6.2.11': False, '6.4.1': False, '6.4.3': False, '6.4.4': False, '6.4.6': False, '6.4.7': True, '6.4.8': True, '6.4.9': True, '6.4.10': True, '6.4.11': True, '6.4.12': True, '7.0.1': True, '7.0.2': True, '7.0.3': True, '7.0.4': True, '7.0.5': True, '7.0.6': True, '7.0.7': True, '7.0.8': True, '7.2.1': False, '7.2.2': False, '7.2.3': False, '7.4.0': False }, 'type': 'int' }, 'poolname': { 'required': False, 'revision': { '7.2.0': True, '6.2.0': False, '6.2.2': False, '6.2.6': False, '6.2.7': False, '6.2.8': False, '6.2.9': False, '6.2.10': False, '6.2.11': False, '6.4.1': False, '6.4.3': False, '6.4.4': False, '6.4.6': False, '6.4.7': True, '6.4.8': True, '6.4.9': True, '6.4.10': True, '6.4.11': True, '6.4.12': True, '7.0.1': True, '7.0.2': True, '7.0.3': True, '7.0.4': True, '7.0.5': True, '7.0.6': True, '7.0.7': True, '7.0.8': True, '7.2.1': False, '7.2.2': False, '7.2.3': False, '7.4.0': False }, 'type': 'str' }, 'poolname6': { 'required': False, 'revision': { '7.2.0': True, '6.2.0': False, '6.2.2': False, '6.2.6': False, '6.2.7': False, '6.2.8': False, '6.2.9': False, '6.2.10': False, '6.2.11': False, '6.4.1': False, '6.4.3': False, '6.4.4': False, '6.4.6': False, '6.4.7': True, '6.4.8': True, '6.4.9': True, '6.4.10': True, '6.4.11': True, '6.4.12': True, '7.0.1': False, '7.0.2': True, '7.0.3': True, '7.0.4': True, '7.0.5': True, '7.0.6': True, '7.0.7': True, '7.0.8': True, '7.2.1': False, '7.2.2': False, '7.2.3': False, '7.4.0': False }, 'type': 'str' }, 'send-deny-packet': { 'required': False, 'revision': { '7.2.0': True, '6.2.0': False, '6.2.2': False, '6.2.6': False, '6.2.7': False, '6.2.8': False, '6.2.9': False, '6.2.10': False, '6.2.11': False, '6.4.1': False, '6.4.3': False, '6.4.4': False, '6.4.6': False, '6.4.7': True, '6.4.8': True, '6.4.9': True, '6.4.10': True, '6.4.11': True, '6.4.12': True, '7.0.1': False, '7.0.2': True, '7.0.3': True, '7.0.4': True, '7.0.5': True, '7.0.6': True, '7.0.7': True, '7.0.8': True, '7.2.1': False, '7.2.2': False, '7.2.3': False, '7.4.0': False }, 'choices': [ 'disable', 'enable' ], 'type': 'str' }, 'service': { 'required': False, 'revision': { '7.2.0': True, '6.2.0': False, '6.2.2': False, '6.2.6': False, '6.2.7': False, '6.2.8': False, '6.2.9': False, '6.2.10': False, '6.2.11': False, '6.4.1': False, '6.4.3': False, '6.4.4': False, '6.4.6': False, '6.4.7': True, '6.4.8': True, '6.4.9': True, '6.4.10': True, '6.4.11': True, '6.4.12': True, '7.0.1': True, '7.0.2': True, '7.0.3': True, '7.0.4': True, '7.0.5': True, '7.0.6': True, '7.0.7': True, '7.0.8': True, '7.2.1': False, '7.2.2': False, '7.2.3': False, '7.4.0': False }, 'type': 'str' }, 'service-negate': { 'required': False, 'revision': { '7.2.0': True, '6.2.0': False, '6.2.2': False, '6.2.6': False, '6.2.7': False, '6.2.8': False, '6.2.9': False, '6.2.10': False, '6.2.11': False, '6.4.1': False, '6.4.3': False, '6.4.4': False, '6.4.6': False, '6.4.7': True, '6.4.8': True, '6.4.9': True, '6.4.10': True, '6.4.11': True, '6.4.12': True, '7.0.1': True, '7.0.2': True, '7.0.3': True, '7.0.4': True, '7.0.5': True, '7.0.6': True, '7.0.7': True, '7.0.8': True, '7.2.1': False, '7.2.2': False, '7.2.3': False, '7.4.0': False }, 'choices': [ 'disable', 'enable' ], 'type': 'str' }, 'srcaddr': { 'required': False, 'revision': { '7.2.0': True, '6.2.0': False, '6.2.2': False, '6.2.6': False, '6.2.7': False, '6.2.8': False, '6.2.9': False, '6.2.10': False, '6.2.11': False, '6.4.1': False, '6.4.3': False, '6.4.4': False, '6.4.6': False, '6.4.7': True, '6.4.8': True, '6.4.9': True, '6.4.10': True, '6.4.11': True, '6.4.12': True, '7.0.1': True, '7.0.2': True, '7.0.3': True, '7.0.4': True, '7.0.5': True, '7.0.6': True, '7.0.7': True, '7.0.8': True, '7.2.1': False, '7.2.2': False, '7.2.3': False, '7.4.0': False }, 'type': 'str' }, 'srcaddr-negate': { 'required': False, 'revision': { '7.2.0': True, '6.2.0': False, '6.2.2': False, '6.2.6': False, '6.2.7': False, '6.2.8': False, '6.2.9': False, '6.2.10': False, '6.2.11': False, '6.4.1': False, '6.4.3': False, '6.4.4': False, '6.4.6': False, '6.4.7': True, '6.4.8': True, '6.4.9': True, '6.4.10': True, '6.4.11': True, '6.4.12': True, '7.0.1': True, '7.0.2': True, '7.0.3': True, '7.0.4': True, '7.0.5': True, '7.0.6': True, '7.0.7': True, '7.0.8': True, '7.2.1': False, '7.2.2': False, '7.2.3': False, '7.4.0': False }, 'choices': [ 'disable', 'enable' ], 'type': 'str' }, 'srcaddr6': { 'required': False, 'revision': { '7.2.0': True, '6.2.0': False, '6.2.2': False, '6.2.6': False, '6.2.7': False, '6.2.8': False, '6.2.9': False, '6.2.10': False, '6.2.11': False, '6.4.1': False, '6.4.3': False, '6.4.4': False, '6.4.6': False, '6.4.7': True, '6.4.8': True, '6.4.9': True, '6.4.10': True, '6.4.11': True, '6.4.12': True, '7.0.1': False, '7.0.2': True, '7.0.3': True, '7.0.4': True, '7.0.5': True, '7.0.6': True, '7.0.7': True, '7.0.8': True, '7.2.1': False, '7.2.2': False, '7.2.3': False, '7.4.0': False }, 'type': 'str' }, 'srcintf': { 'required': False, 'revision': { '7.2.0': True, '6.2.0': False, '6.2.2': False, '6.2.6': False, '6.2.7': False, '6.2.8': False, '6.2.9': False, '6.2.10': False, '6.2.11': False, '6.4.1': False, '6.4.3': False, '6.4.4': False, '6.4.6': False, '6.4.7': True, '6.4.8': True, '6.4.9': True, '6.4.10': True, '6.4.11': True, '6.4.12': True, '7.0.1': True, '7.0.2': True, '7.0.3': True, '7.0.4': True, '7.0.5': True, '7.0.6': True, '7.0.7': True, '7.0.8': True, '7.2.1': False, '7.2.2': False, '7.2.3': False, '7.4.0': False }, 'type': 'str' }, 'status': { 'required': False, 'revision': { '7.2.0': True, '6.2.0': False, '6.2.2': False, '6.2.6': False, '6.2.7': False, '6.2.8': False, '6.2.9': False, '6.2.10': False, '6.2.11': False, '6.4.1': False, '6.4.3': False, '6.4.4': False, '6.4.6': False, '6.4.7': True, '6.4.8': True, '6.4.9': True, '6.4.10': True, '6.4.11': True, '6.4.12': True, '7.0.1': True, '7.0.2': True, '7.0.3': True, '7.0.4': True, '7.0.5': True, '7.0.6': True, '7.0.7': True, '7.0.8': True, '7.2.1': False, '7.2.2': False, '7.2.3': False, '7.4.0': False }, 'choices': [ 'disable', 'enable' ], 'type': 'str' }, 'tcp-timeout-pid': { 'required': False, 'revision': { '7.2.0': True, '6.2.0': False, '6.2.2': False, '6.2.6': False, '6.2.7': False, '6.2.8': False, '6.2.9': False, '6.2.10': False, '6.2.11': False, '6.4.1': False, '6.4.3': False, '6.4.4': False, '6.4.6': False, '6.4.7': True, '6.4.8': True, '6.4.9': True, '6.4.10': True, '6.4.11': True, '6.4.12': True, '7.0.1': True, '7.0.2': True, '7.0.3': True, '7.0.4': True, '7.0.5': True, '7.0.6': True, '7.0.7': True, '7.0.8': True, '7.2.1': False, '7.2.2': False, '7.2.3': False, '7.4.0': False }, 'type': 'str' }, 'traffic-shaper': { 'required': False, 'revision': { '7.2.0': True, '6.2.0': False, '6.2.2': False, '6.2.6': False, '6.2.7': False, '6.2.8': False, '6.2.9': False, '6.2.10': False, '6.2.11': False, '6.4.1': False, '6.4.3': False, '6.4.4': False, '6.4.6': False, '6.4.7': True, '6.4.8': True, '6.4.9': True, '6.4.10': True, '6.4.11': True, '6.4.12': True, '7.0.1': True, '7.0.2': True, '7.0.3': True, '7.0.4': True, '7.0.5': True, '7.0.6': True, '7.0.7': True, '7.0.8': True, '7.2.1': False, '7.2.2': False, '7.2.3': False, '7.4.0': False }, 'type': 'str' }, 'traffic-shaper-reverse': { 'required': False, 'revision': { '7.2.0': True, '6.2.0': False, '6.2.2': False, '6.2.6': False, '6.2.7': False, '6.2.8': False, '6.2.9': False, '6.2.10': False, '6.2.11': False, '6.4.1': False, '6.4.3': False, '6.4.4': False, '6.4.6': False, '6.4.7': True, '6.4.8': True, '6.4.9': True, '6.4.10': True, '6.4.11': True, '6.4.12': True, '7.0.1': True, '7.0.2': True, '7.0.3': True, '7.0.4': True, '7.0.5': True, '7.0.6': True, '7.0.7': True, '7.0.8': True, '7.2.1': False, '7.2.2': False, '7.2.3': False, '7.4.0': False }, 'type': 'str' }, 'udp-timeout-pid': { 'required': False, 'revision': { '7.2.0': True, '6.2.0': False, '6.2.2': False, '6.2.6': False, '6.2.7': False, '6.2.8': False, '6.2.9': False, '6.2.10': False, '6.2.11': False, '6.4.1': False, '6.4.3': False, '6.4.4': False, '6.4.6': False, '6.4.7': True, '6.4.8': True, '6.4.9': True, '6.4.10': True, '6.4.11': True, '6.4.12': True, '7.0.1': True, '7.0.2': True, '7.0.3': True, '7.0.4': True, '7.0.5': True, '7.0.6': True, '7.0.7': True, '7.0.8': True, '7.2.1': False, '7.2.2': False, '7.2.3': False, '7.4.0': False }, 'type': 'str' }, 'uuid': { 'required': False, 'revision': { '7.2.0': True, '6.2.0': False, '6.2.2': False, '6.2.6': False, '6.2.7': False, '6.2.8': False, '6.2.9': False, '6.2.10': False, '6.2.11': False, '6.4.1': False, '6.4.3': False, '6.4.4': False, '6.4.6': False, '6.4.7': True, '6.4.8': True, '6.4.9': True, '6.4.10': True, '6.4.11': True, '6.4.12': True, '7.0.1': True, '7.0.2': True, '7.0.3': True, '7.0.4': True, '7.0.5': True, '7.0.6': True, '7.0.7': True, '7.0.8': True, '7.2.1': False, '7.2.2': False, '7.2.3': False, '7.4.0': False }, 'type': 'str' } } } } params_validation_blob = [] check_galaxy_version(module_arg_spec) module = AnsibleModule(argument_spec=check_parameter_bypass(module_arg_spec, 'pkg_firewall_hyperscalepolicy'), supports_check_mode=False) fmgr = None if module._socket_path: connection = Connection(module._socket_path) connection.set_option('access_token', module.params['access_token'] if 'access_token' in module.params else None) connection.set_option('enable_log', module.params['enable_log'] if 'enable_log' in module.params else False) connection.set_option('forticloud_access_token', module.params['forticloud_access_token'] if 'forticloud_access_token' in module.params else None) fmgr = NAPIManager(jrpc_urls, perobject_jrpc_urls, module_primary_key, url_params, module, connection, top_level_schema_name='data') fmgr.validate_parameters(params_validation_blob) fmgr.process_curd(argument_specs=module_arg_spec) else: module.fail_json(msg='MUST RUN IN HTTPAPI MODE') module.exit_json(meta=module.params) if __name__ == '__main__': main()
PypiClean
/fhir.resources-7.0.2.tar.gz/fhir.resources-7.0.2/fhir/resources/DSTU2/location.py
from typing import List as ListType from pydantic import Field from . import fhirtypes from .backboneelement import BackboneElement from .domainresource import DomainResource class Location(DomainResource): """Details and position information for a physical place. Details and position information for a physical place where services are provided and resources and participants may be stored, found, contained or accommodated. """ resource_type = Field("Location", const=True) address: fhirtypes.AddressType = Field( None, alias="address", title="Type `Address` (represented as `dict` in JSON)", description="Physical location", ) description: fhirtypes.String = Field( None, alias="description", title="Type `String` (represented as `dict` in JSON)", description=( "Additional details about the location that could be displayed as " "further information to identify the location beyond its name" ), ) identifier: ListType[fhirtypes.IdentifierType] = Field( None, alias="identifier", title="List of `Identifier` items (represented as `dict` in JSON)", description="Unique code or number identifying the location to its users", ) managingOrganization: fhirtypes.ReferenceType = Field( None, alias="managingOrganization", title=( "Type `Reference` referencing `Organization` (represented as `dict` in " "JSON)" ), description="Organization responsible for provisioning and upkeep", ) mode: fhirtypes.Code = Field( None, alias="mode", title="Type `Code` (represented as `dict` in JSON)", description="instance | kind", ) name: fhirtypes.String = Field( None, alias="name", title="Type `String` (represented as `dict` in JSON)", description="Name of the location as used by humans", ) partOf: fhirtypes.ReferenceType = Field( None, alias="partOf", title=( "Type `Reference` referencing `Location` (represented as `dict` in " "JSON)" ), description="Another Location this one is physically part of", ) physicalType: fhirtypes.CodeableConceptType = Field( None, alias="physicalType", title="Type `CodeableConcept` (represented as `dict` in JSON)", description="Physical form of the location", ) position: fhirtypes.LocationPositionType = Field( None, alias="position", title="Type `LocationPosition` (represented as `dict` in JSON)", description="The absolute geographic location", ) status: fhirtypes.Code = Field( None, alias="status", title="Type `Code` (represented as `dict` in JSON)", description="active | suspended | inactive", ) telecom: ListType[fhirtypes.ContactPointType] = Field( None, alias="telecom", title="List of `ContactPoint` items (represented as `dict` in JSON)", description="Contact details of the location", ) type: fhirtypes.CodeableConceptType = Field( None, alias="type", title="Type `CodeableConcept` (represented as `dict` in JSON)", description="Type of function performed", ) class LocationPosition(BackboneElement): """The absolute geographic location. The absolute geographic location of the Location, expressed using the WGS84 datum (This is the same co-ordinate system used in KML). """ resource_type = Field("LocationPosition", const=True) altitude: fhirtypes.Decimal = Field( None, alias="altitude", title="Type `Decimal` (represented as `dict` in JSON)", description="Altitude with WGS84 datum", ) latitude: fhirtypes.Decimal = Field( ..., alias="latitude", title="Type `Decimal` (represented as `dict` in JSON)", description="Latitude with WGS84 datum", ) longitude: fhirtypes.Decimal = Field( ..., alias="longitude", title="Type `Decimal` (represented as `dict` in JSON)", description="Longitude with WGS84 datum", )
PypiClean
/ipp_toolkit-0.1.2.tar.gz/ipp_toolkit-0.1.2/ipp_toolkit/utils/rl/agents/DaggerAgent.py
from ipp_toolkit.utils.rl.agents.BaseAgent import BaseAgent from ipp_toolkit.utils.rl.agents.PerfectAgent import PerfectAgent from ipp_toolkit.utils.rl.agents.UCBAgent import UCBAgent from imitation.algorithms import bc from imitation.data import rollout from imitation.data.wrappers import RolloutInfoWrapper from stable_baselines3.common.vec_env import DummyVecEnv from stable_baselines3.common.evaluation import evaluate_policy import numpy as np from imitation.scripts.train_preference_comparisons import save_model from pathlib import Path import os from imitation.algorithms.bc import reconstruct_policy import torch import tempfile from imitation.algorithms.dagger import SimpleDAggerTrainer from imitation.data.types import TransitionsMinimal from tqdm import tqdm import imitation #from stable_baselines3.common.policies import ActorCriticPolicy class DaggerAgent(BaseAgent): def __init__(self, env): self.name = "DA" self.policy = None self.model_name = "da_model" def train( self, env, cfg, ): model_dir = cfg["model_dir"] savefile = Path(model_dir, self.model_name + ".zip") if not os.path.exists(model_dir): os.mkdir(model_dir) rng = np.random.default_rng(0) bc_trainer = bc.BC( observation_space=env.observation_space, action_space=env.action_space, rng=rng, batch_size=256, ) expert = PerfectAgent(env) #TODO #not really sure if wrapper is needed but it worked so keeping it #venv = DummyVecEnv([lambda: env]) venv = DummyVecEnv([lambda: RolloutInfoWrapper(env)]) with tempfile.TemporaryDirectory(prefix="dagger_example_") as tmpdir: print(tmpdir) dagger_trainer = SimpleDAggerTrainer( venv=venv, scratch_dir=tmpdir, expert_policy=expert, rng=rng, bc_trainer=bc_trainer, ) dagger_trainer.train(10000, rollout_round_min_episodes=100, bc_train_kwargs={"n_epochs": 100}, #policy=ActorCriticPolicy, ) dagger_trainer.save_policy(savefile) def load_model(self, model_dir): self.policy = reconstruct_policy(Path(model_dir, self.model_name + ".zip")) def get_action(self, observation, env): observation = torch.Tensor(observation).cuda() observation = torch.unsqueeze(observation, dim=0) action, _, _ = self.policy(observation) action = action.squeeze().detach().cpu().numpy() return action, None
PypiClean
/torch_geometric-2.3.1.tar.gz/torch_geometric-2.3.1/torch_geometric/datasets/coma.py
import os.path as osp from glob import glob from typing import Callable, List, Optional import torch from torch_geometric.data import InMemoryDataset, extract_zip from torch_geometric.io import read_ply class CoMA(InMemoryDataset): r"""The CoMA 3D faces dataset from the `"Generating 3D faces using Convolutional Mesh Autoencoders" <https://arxiv.org/abs/1807.10267>`_ paper, containing 20,466 meshes of extreme expressions captured over 12 different subjects. .. note:: Data objects hold mesh faces instead of edge indices. To convert the mesh to a graph, use the :obj:`torch_geometric.transforms.FaceToEdge` as :obj:`pre_transform`. To convert the mesh to a point cloud, use the :obj:`torch_geometric.transforms.SamplePoints` as :obj:`transform` to sample a fixed number of points on the mesh faces according to their face area. Args: root (str): Root directory where the dataset should be saved. train (bool, optional): If :obj:`True`, loads the training dataset, otherwise the test dataset. (default: :obj:`True`) transform (callable, optional): A function/transform that takes in an :obj:`torch_geometric.data.Data` object and returns a transformed version. The data object will be transformed before every access. (default: :obj:`None`) pre_transform (callable, optional): A function/transform that takes in an :obj:`torch_geometric.data.Data` object and returns a transformed version. The data object will be transformed before being saved to disk. (default: :obj:`None`) pre_filter (callable, optional): A function that takes in an :obj:`torch_geometric.data.Data` object and returns a boolean value, indicating whether the data object should be included in the final dataset. (default: :obj:`None`) **STATS:** .. list-table:: :widths: 10 10 10 10 10 :header-rows: 1 * - #graphs - #nodes - #edges - #features - #classes * - 20,465 - 5,023 - 29,990 - 3 - 12 """ url = 'https://coma.is.tue.mpg.de/' categories = [ 'bareteeth', 'cheeks_in', 'eyebrow', 'high_smile', 'lips_back', 'lips_up', 'mouth_down', 'mouth_extreme', 'mouth_middle', 'mouth_open', 'mouth_side', 'mouth_up', ] def __init__(self, root: str, train: bool = True, transform: Optional[Callable] = None, pre_transform: Optional[Callable] = None, pre_filter: Optional[Callable] = None): super().__init__(root, transform, pre_transform, pre_filter) path = self.processed_paths[0] if train else self.processed_paths[1] self.data, self.slices = torch.load(path) @property def raw_file_names(self) -> str: return 'COMA_data.zip' @property def processed_file_names(self) -> List[str]: return ['training.pt', 'test.pt'] def download(self): raise RuntimeError( f"Dataset not found. Please download 'COMA_data.zip' from " f"'{self.url}' and move it to '{self.raw_dir}'") def process(self): folders = sorted(glob(osp.join(self.raw_dir, 'FaceTalk_*'))) if len(folders) == 0: extract_zip(self.raw_paths[0], self.raw_dir, log=False) folders = sorted(glob(osp.join(self.raw_dir, 'FaceTalk_*'))) train_data_list, test_data_list = [], [] for folder in folders: for i, category in enumerate(self.categories): files = sorted(glob(osp.join(folder, category, '*.ply'))) for j, f in enumerate(files): data = read_ply(f) data.y = torch.tensor([i], dtype=torch.long) if self.pre_filter is not None and\ not self.pre_filter(data): continue if self.pre_transform is not None: data = self.pre_transform(data) if (j % 100) < 90: train_data_list.append(data) else: test_data_list.append(data) torch.save(self.collate(train_data_list), self.processed_paths[0]) torch.save(self.collate(test_data_list), self.processed_paths[1])
PypiClean
/Yodine-0.4.0rc3.tar.gz/Yodine-0.4.0rc3/yodine/core/vector.py
import math try: raise ImportError("blah") import numpy as np class _Vector(np.ndarray): def vsize(self) -> float: return np.linalg.norm(self) def sqsize(self) -> float: return np.linalg.norm(self) ** 2 def fisrsize(self) -> float: return 1 / (np.linalg.norm(self) ** 2) def unit(self) -> "Vector": sz = self.vsize() if sz == 0: return Vector(0, 0) elif sz == 1: return self else: return self / sz def ints(self) -> "Vector": return Vector(int(self[0]), int(self[1])) def rotate(self, angle: float) -> "Vector": c = math.cos(angle) s = math.sin(angle) return Vector(self.x * c - self.y * s, self.x * s + self.y * c) @property def x(self) -> float: return self[0] @property def y(self) -> float: return self[1] @x.setter def x(self, v: float): self[0] = v @y.setter def y(self, v: float): self[1] = v def dot(self, b: "Vector") -> float: return self[0] * b.vec[0] + self[1] * b.vec[1] @classmethod def new(cls, _x=None, _y=None): # get initial values if _y is not None: x = _x y = _y elif _x is not None: try: x, y = _x except TypeError: raise ValueError("Non-vectorial value: {}".format(repr(_x))) else: x = 0.0 y = 0.0 return super(_Vector, cls).__new__( cls, (2,), dtype=np.float128, buffer=np.array([x, y], dtype=np.float128) ) def __iadd__(self, b: "Vector") -> "Vector": self.x += b.x self.y += b.y return self def __isub__(self, b: "Vector") -> "Vector": self.x -= res.x self.y -= res.y return self def __imul__(self, b: "Vector") -> "Vector": try: self.x *= b.x self.y *= b.y except AttributeError: self.x *= b self.y *= b return self def __itruediv__(self, b: "Vector") -> "Vector": try: self.x /= b.x self.y /= b.y except AttributeError: self.x /= b self.y /= b return self def Vector(*args, **kwargs): return _Vector.new(*args, **kwargs) except ImportError: import struct class _Vector(object): def __init__(self, x=None, y=None): if y is not None: self.x = x self.y = y elif x is not None: try: self.x, self.y = x except TypeError: raise ValueError("Non-vectorial value: {}".format(repr(x))) else: self.x = 0 self.y = 0 def __iter__(self): return iter((self.x, self.y)) def __getitem__(self, co): if co in (0, "x"): return self.x if co in (1, "y"): return self.y raise KeyError("No such key in a vector: ", repr(co)) def ints(self): return Vector(int(self[0]), int(self[1])) def rotate(self, angle: float) -> "Vector": c = math.cos(angle) s = math.sin(angle) return Vector( self.x * c - self.y * s, self.x * s + self.y * c, ) def vsize(self) -> float: return math.sqrt(self[0] ** 2 + self[1] ** 2) def sqsize(self) -> float: return float(self[0] ** 2 + self[1] ** 2) def fisrsize(self, sqs=None) -> float: y = sqs or self.sqsize() threehalfs = 1.5 x2 = y * 0.5 packed_y = struct.pack("f", y) i = struct.unpack("i", packed_y)[0] # treat float's bytes as int i = 0x5F3759DF - (i >> 1) # arithmetic with magic number packed_i = struct.pack("i", i) y = struct.unpack("f", packed_i)[0] # treat int's bytes as float y = y * (threehalfs - (x2 * y * y)) # Newton's method return y def unit(self) -> "Vector": sz = self.sqsize() fz = self.fisrsize(sz) if sz == 0: return Vector(0, 0) elif sz == 1: return self else: return self * fz def __add__(self, b: "Vector") -> "Vector": return Vector(self[0] + b.x, self[1] + b.y) def __neg__(self) -> "Vector": return Vector(-self[0], -self[1]) def __mul__(self, b) -> "Vector": try: return Vector(self[0] * b.x, self[1] * b.y) except AttributeError: return Vector(self[0] * b, self[1] * b) def __truediv__(self, b) -> "Vector": try: return Vector(self[0] / b.x, self[1] / b.y) except AttributeError: return Vector(self[0] / b, self[1] / b) def __iadd__(self, b: "Vector") -> "ComponentVector": res = self + b self.x = res.x self.y = res.y return self def __isub__(self, b: "Vector") -> "ComponentVector": res = self - b self.x = res.x self.y = res.y return self def __imul__(self, b: "Vector") -> "ComponentVector": res = self * b self.x = res.x self.y = res.y return self def __itruediv__(self, b: "Vector") -> "ComponentVector": res = self / b self.x = res.x self.y = res.y return self def dot(self, b: "Vector") -> float: return self[0] * b.vec[0] + self[1] * b.vec[1] def __sub__(self, b: "Vector") -> "Vector": return self + (-b) def __lshift__(self, b: "Vector") -> "ComponentVector": self.x = b.x self.y = b.y return self def __repr__(self) -> str: return "{}(x={},y={})".format(type(self).__name__, self[0], self[1]) Vector = _Vector def ComponentVector(component: "Component"): res = component.get() if not (hasattr(res, "x") and hasattr(res, "y")): from .entity import VectorComponent res = component.entity.create_component( component.name, component.value, VectorComponent ) return res.get() return res
PypiClean
/Products.ResourceRegistries-3.0.8.tar.gz/Products.ResourceRegistries-3.0.8/Products/ResourceRegistries/tools/KSSRegistry.py
from .packer import CSSPacker from AccessControl import ClassSecurityInfo from App.class_init import InitializeClass from Products.PageTemplates.PageTemplateFile import PageTemplateFile from Products.ResourceRegistries import config from Products.ResourceRegistries import permissions from Products.ResourceRegistries.interfaces import IKSSRegistry from Products.ResourceRegistries.tools.BaseRegistry import BaseRegistryTool from Products.ResourceRegistries.tools.BaseRegistry import Resource from zope.interface import implementer class KineticStylesheet(Resource): security = ClassSecurityInfo() def __init__(self, id, **kwargs): Resource.__init__(self, id, **kwargs) self._data['compression'] = kwargs.get('compression', 'safe') if self.isExternal: self._data['compression'] = 'none' #External resources are not compressable security.declarePublic('getCompression') def getCompression(self): # as this is a new property, old instance might not have that value, so # return 'safe' as default compression = self._data.get('compression', 'safe') if compression in config.KSS_COMPRESSION_METHODS: return compression return 'none' security.declareProtected(permissions.ManagePortal, 'setCompression') def setCompression(self, compression): if self.isExternalResource() and compression not in config.KSS_EXTERNAL_COMPRESSION_METHODS: raise ValueError("Compression method %s must be one of: %s for External Resources" % ( compression, ', '.join(config.KSS_EXTERNAL_COMPRESSION_METHODS))) self._data['compression'] = compression InitializeClass(KineticStylesheet) @implementer(IKSSRegistry) class KSSRegistryTool(BaseRegistryTool): """A Plone registry for managing the linking to kss files.""" id = config.KSSTOOLNAME meta_type = config.KSSTOOLTYPE title = 'KSS Registry' security = ClassSecurityInfo() # # ZMI stuff # manage_kssForm = PageTemplateFile('www/kssconfig', config.GLOBALS) manage_kssComposition = PageTemplateFile('www/ksscomposition', config.GLOBALS) manage_options = ( { 'label': 'KSS Registry', 'action': 'manage_kssForm', }, { 'label': 'Merged KSS Composition', 'action': 'manage_kssComposition', }, ) + BaseRegistryTool.manage_options attributes_to_compare = ('getAuthenticated', 'getExpression', 'getCookable', 'getCacheable', 'getConditionalcomment') filename_base = 'ploneStyles' filename_appendix = '.kss' merged_output_prefix = u'' cache_duration = config.KSS_CACHE_DURATION resource_class = KineticStylesheet @property def manage_workspace_url(self): return "%s/manage_workspace" % self.absolute_url_path() # # Private Methods # security.declarePrivate('clearKineticStylesheets') def clearKineticStylesheets(self): self.clearResources() def _compressKSS(self, content, level='safe'): if level == 'full': return CSSPacker('full').pack(content) elif level == 'safe': return CSSPacker('safe').pack(content) else: return content security.declarePrivate('finalizeContent') def finalizeContent(self, resource, content): """Finalize the resource content.""" compression = resource.getCompression() if compression != 'none' and not self.getDebugMode(): orig_url = "%s/%s?original=1" % (self.absolute_url(), resource.getId()) content = "/* %s */\n%s" % (orig_url, self._compressKSS(content, compression)) return content # # ZMI Methods # security.declareProtected(permissions.ManagePortal, 'manage_addKineticStylesheet') def manage_addKineticStylesheet(self, id, expression='', media='screen', rel='stylesheet', title='', rendering='import', enabled=False, cookable=True, compression='safe', cacheable=True, conditionalcomment='', authenticated=False, bundle='default', REQUEST=None): """Register a kineticstylesheet from a TTW request.""" self.registerKineticStylesheet(id, expression, enabled, cookable, compression, cacheable, conditionalcomment, authenticated, bundle=bundle) if REQUEST: REQUEST.RESPONSE.redirect("manage_workspace") security.declareProtected(permissions.ManagePortal, 'manage_saveKineticStylesheets') def manage_saveKineticStylesheets(self, REQUEST=None): """Save kineticstylesheets from the ZMI. Updates the whole sequence. For editing and reordering. """ if REQUEST and not REQUEST.form: REQUEST.RESPONSE.redirect("manage_workspace") return debugmode = REQUEST.get('debugmode', False) self.setDebugMode(debugmode) records = REQUEST.get('kineticstylesheets', []) records.sort(lambda a, b: a.sort - b.sort) self.resources = () kineticstylesheets = [] for r in records: kss = self.resource_class( r.get('id'), expression=r.get('expression', ''), enabled=r.get('enabled', True), cookable=r.get('cookable', True), cacheable=r.get('cacheable', True), compression=r.get('compression', 'safe'), conditionalcomment=r.get('conditionalcomment',''), authenticated=r.get('authenticated', False), bundle=r.get('bundle', 'default')) kineticstylesheets.append(kss) self.resources = tuple(kineticstylesheets) self.cookResources() if REQUEST: REQUEST.RESPONSE.redirect("manage_workspace") security.declareProtected(permissions.ManagePortal, 'manage_removeKineticStylesheet') def manage_removeKineticStylesheet(self, id, REQUEST=None): """Remove kineticstylesheet from the ZMI.""" self.unregisterResource(id) if REQUEST: REQUEST.RESPONSE.redirect("manage_workspace") # # Protected Methods # security.declareProtected(permissions.ManagePortal, 'registerKineticStylesheet') def registerKineticStylesheet(self, id, expression='', enabled=1, cookable=True, compression='safe', cacheable=True, conditionalcomment='', authenticated=False, skipCooking=False, bundle='default'): """Register a kineticstylesheet.""" kineticstylesheet = self.resource_class(id, expression=expression, enabled=enabled, cookable=cookable, compression=compression, cacheable=cacheable, conditionalcomment=conditionalcomment, authenticated=authenticated, bundle=bundle) self.storeResource(kineticstylesheet, skipCooking=skipCooking) security.declareProtected(permissions.ManagePortal, 'updateKineticStylesheet') def updateKineticStylesheet(self, id, **data): kineticstylesheet = self.getResourcesDict().get(id, None) if kineticstylesheet is None: raise ValueError('Invalid resource id %s' % (id)) if data.get('expression', None) is not None: kineticstylesheet.setExpression(data['expression']) if data.get('authenticated', None) is not None: kineticstylesheet.setAuthenticated(data['authenticated']) if data.get('enabled', None) is not None: kineticstylesheet.setEnabled(data['enabled']) if data.get('cookable', None) is not None: kineticstylesheet.setCookable(data['cookable']) if data.get('compression', None) is not None: kineticstylesheet.setCompression(data['compression']) if data.get('cacheable', None) is not None: kineticstylesheet.setCacheable(data['cacheable']) if data.get('conditionalcomment', None) is not None: kineticstylesheet.setConditionalcomment(data['conditionalcomment']) if data.get('bundle', None) is not None: kineticstylesheet.setBundle(data['bundle']) security.declareProtected(permissions.ManagePortal, 'getCompressionOptions') def getCompressionOptions(self): """Compression methods for use in ZMI forms.""" return config.KSS_COMPRESSION_METHODS security.declareProtected(permissions.ManagePortal, 'getExternalCompressionOptions') def getExternalCompressionOptions(self): """Compression methods for use in ZMI forms.""" return config.KSS_EXTERNAL_COMPRESSION_METHODS security.declareProtected(permissions.View, 'getContentType') def getContentType(self): """Return the registry content type.""" return 'text/css;charset=utf-8' InitializeClass(KSSRegistryTool)
PypiClean
/Transcrypt-3.7.16.tar.gz/Transcrypt-3.7.16/transcrypt/demos/parcel_demo/node_modules/cosmiconfig/dist/createExplorer.js
'use strict'; const path = require('path'); const loaders = require('./loaders'); const readFile = require('./readFile'); const cacheWrapper = require('./cacheWrapper'); const getDirectory = require('./getDirectory'); const MODE_SYNC = 'sync'; // An object value represents a config object. // null represents that the loader did not find anything relevant. // undefined represents that the loader found something relevant // but it was empty. class Explorer { constructor(options ) { this.loadCache = options.cache ? new Map() : null; this.loadSyncCache = options.cache ? new Map() : null; this.searchCache = options.cache ? new Map() : null; this.searchSyncCache = options.cache ? new Map() : null; this.config = options; this.validateConfig(); } clearLoadCache() { if (this.loadCache) { this.loadCache.clear(); } if (this.loadSyncCache) { this.loadSyncCache.clear(); } } clearSearchCache() { if (this.searchCache) { this.searchCache.clear(); } if (this.searchSyncCache) { this.searchSyncCache.clear(); } } clearCaches() { this.clearLoadCache(); this.clearSearchCache(); } validateConfig() { const config = this.config; config.searchPlaces.forEach(place => { const loaderKey = path.extname(place) || 'noExt'; const loader = config.loaders[loaderKey]; if (!loader) { throw new Error( `No loader specified for ${getExtensionDescription( place )}, so searchPlaces item "${place}" is invalid` ); } }); } search(searchFrom ) { searchFrom = searchFrom || process.cwd(); return getDirectory(searchFrom).then(dir => { return this.searchFromDirectory(dir); }); } searchFromDirectory(dir ) { const absoluteDir = path.resolve(process.cwd(), dir); const run = () => { return this.searchDirectory(absoluteDir).then(result => { const nextDir = this.nextDirectoryToSearch(absoluteDir, result); if (nextDir) { return this.searchFromDirectory(nextDir); } return this.config.transform(result); }); }; if (this.searchCache) { return cacheWrapper(this.searchCache, absoluteDir, run); } return run(); } searchSync(searchFrom ) { searchFrom = searchFrom || process.cwd(); const dir = getDirectory.sync(searchFrom); return this.searchFromDirectorySync(dir); } searchFromDirectorySync(dir ) { const absoluteDir = path.resolve(process.cwd(), dir); const run = () => { const result = this.searchDirectorySync(absoluteDir); const nextDir = this.nextDirectoryToSearch(absoluteDir, result); if (nextDir) { return this.searchFromDirectorySync(nextDir); } return this.config.transform(result); }; if (this.searchSyncCache) { return cacheWrapper(this.searchSyncCache, absoluteDir, run); } return run(); } searchDirectory(dir ) { return this.config.searchPlaces.reduce((prevResultPromise, place) => { return prevResultPromise.then(prevResult => { if (this.shouldSearchStopWithResult(prevResult)) { return prevResult; } return this.loadSearchPlace(dir, place); }); }, Promise.resolve(null)); } searchDirectorySync(dir ) { let result = null; for (const place of this.config.searchPlaces) { result = this.loadSearchPlaceSync(dir, place); if (this.shouldSearchStopWithResult(result)) break; } return result; } shouldSearchStopWithResult(result ) { if (result === null) return false; if (result.isEmpty && this.config.ignoreEmptySearchPlaces) return false; return true; } loadSearchPlace(dir , place ) { const filepath = path.join(dir, place); return readFile(filepath).then(content => { return this.createCosmiconfigResult(filepath, content); }); } loadSearchPlaceSync(dir , place ) { const filepath = path.join(dir, place); const content = readFile.sync(filepath); return this.createCosmiconfigResultSync(filepath, content); } nextDirectoryToSearch( currentDir , currentResult ) { if (this.shouldSearchStopWithResult(currentResult)) { return null; } const nextDir = nextDirUp(currentDir); if (nextDir === currentDir || currentDir === this.config.stopDir) { return null; } return nextDir; } loadPackageProp(filepath , content ) { const parsedContent = loaders.loadJson(filepath, content); const packagePropValue = parsedContent[this.config.packageProp]; return packagePropValue || null; } getLoaderEntryForFile(filepath ) { if (path.basename(filepath) === 'package.json') { const loader = this.loadPackageProp.bind(this); return { sync: loader, async: loader }; } const loaderKey = path.extname(filepath) || 'noExt'; return this.config.loaders[loaderKey] || {}; } getSyncLoaderForFile(filepath ) { const entry = this.getLoaderEntryForFile(filepath); if (!entry.sync) { throw new Error( `No sync loader specified for ${getExtensionDescription(filepath)}` ); } return entry.sync; } getAsyncLoaderForFile(filepath ) { const entry = this.getLoaderEntryForFile(filepath); const loader = entry.async || entry.sync; if (!loader) { throw new Error( `No async loader specified for ${getExtensionDescription(filepath)}` ); } return loader; } loadFileContent( mode , filepath , content ) { if (content === null) { return null; } if (content.trim() === '') { return undefined; } const loader = mode === MODE_SYNC ? this.getSyncLoaderForFile(filepath) : this.getAsyncLoaderForFile(filepath); return loader(filepath, content); } loadedContentToCosmiconfigResult( filepath , loadedContent ) { if (loadedContent === null) { return null; } if (loadedContent === undefined) { return { filepath, config: undefined, isEmpty: true }; } return { config: loadedContent, filepath }; } createCosmiconfigResult( filepath , content ) { return Promise.resolve() .then(() => { return this.loadFileContent('async', filepath, content); }) .then(loaderResult => { return this.loadedContentToCosmiconfigResult(filepath, loaderResult); }); } createCosmiconfigResultSync( filepath , content ) { const loaderResult = this.loadFileContent('sync', filepath, content); return this.loadedContentToCosmiconfigResult(filepath, loaderResult); } validateFilePath(filepath ) { if (!filepath) { throw new Error('load and loadSync must pass a non-empty string'); } } load(filepath ) { return Promise.resolve().then(() => { this.validateFilePath(filepath); const absoluteFilePath = path.resolve(process.cwd(), filepath); return cacheWrapper(this.loadCache, absoluteFilePath, () => { return readFile(absoluteFilePath, { throwNotFound: true }) .then(content => { return this.createCosmiconfigResult(absoluteFilePath, content); }) .then(this.config.transform); }); }); } loadSync(filepath ) { this.validateFilePath(filepath); const absoluteFilePath = path.resolve(process.cwd(), filepath); return cacheWrapper(this.loadSyncCache, absoluteFilePath, () => { const content = readFile.sync(absoluteFilePath, { throwNotFound: true }); const result = this.createCosmiconfigResultSync( absoluteFilePath, content ); return this.config.transform(result); }); } } module.exports = function createExplorer(options ) { const explorer = new Explorer(options); return { search: explorer.search.bind(explorer), searchSync: explorer.searchSync.bind(explorer), load: explorer.load.bind(explorer), loadSync: explorer.loadSync.bind(explorer), clearLoadCache: explorer.clearLoadCache.bind(explorer), clearSearchCache: explorer.clearSearchCache.bind(explorer), clearCaches: explorer.clearCaches.bind(explorer), }; }; function nextDirUp(dir ) { return path.dirname(dir); } function getExtensionDescription(filepath ) { const ext = path.extname(filepath); return ext ? `extension "${ext}"` : 'files without extensions'; }
PypiClean
/py-pure-client-1.38.0.tar.gz/py-pure-client-1.38.0/pypureclient/flasharray/FA_2_22/models/network_interface_port_details_static_tx_power_thresholds.py
import pprint import re import six import typing from ....properties import Property if typing.TYPE_CHECKING: from pypureclient.flasharray.FA_2_22 import models class NetworkInterfacePortDetailsStaticTxPowerThresholds(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'alarm_low': 'float', 'warn_low': 'float', 'warn_high': 'float', 'alarm_high': 'float' } attribute_map = { 'alarm_low': 'alarm_low', 'warn_low': 'warn_low', 'warn_high': 'warn_high', 'alarm_high': 'alarm_high' } required_args = { } def __init__( self, alarm_low=None, # type: float warn_low=None, # type: float warn_high=None, # type: float alarm_high=None, # type: float ): """ Keyword args: alarm_low (float): The alarm low threshold for Tx output power in mW. warn_low (float): The warn low threshold for Tx output power in mW. warn_high (float): The warn high threshold for Tx output power in mW. alarm_high (float): The alarm high threshold for Tx output power in mW. """ if alarm_low is not None: self.alarm_low = alarm_low if warn_low is not None: self.warn_low = warn_low if warn_high is not None: self.warn_high = warn_high if alarm_high is not None: self.alarm_high = alarm_high def __setattr__(self, key, value): if key not in self.attribute_map: raise KeyError("Invalid key `{}` for `NetworkInterfacePortDetailsStaticTxPowerThresholds`".format(key)) self.__dict__[key] = value def __getattribute__(self, item): value = object.__getattribute__(self, item) if isinstance(value, Property): raise AttributeError else: return value def __getitem__(self, key): if key not in self.attribute_map: raise KeyError("Invalid key `{}` for `NetworkInterfacePortDetailsStaticTxPowerThresholds`".format(key)) return object.__getattribute__(self, key) def __setitem__(self, key, value): if key not in self.attribute_map: raise KeyError("Invalid key `{}` for `NetworkInterfacePortDetailsStaticTxPowerThresholds`".format(key)) object.__setattr__(self, key, value) def __delitem__(self, key): if key not in self.attribute_map: raise KeyError("Invalid key `{}` for `NetworkInterfacePortDetailsStaticTxPowerThresholds`".format(key)) object.__delattr__(self, key) def keys(self): return self.attribute_map.keys() def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): if hasattr(self, attr): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(NetworkInterfacePortDetailsStaticTxPowerThresholds, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, NetworkInterfacePortDetailsStaticTxPowerThresholds): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
PypiClean
/Pyccuracy-1.2.47.tar.gz/Pyccuracy-1.2.47/pyccuracy/help.py
import os import re CURR_DIR = os.path.dirname(__file__) or '.' class LanguageViewer(object): ACTIONS = ['page', 'button', 'checkbox', 'div', 'image', 'link', 'radio', 'select', 'textbox', 'element', ] def __init__(self, language='en-us'): self.languages_dir = CURR_DIR + '/languages/data' self.language = language self.actions = {} self._set_all_actions() def _set_all_actions(self): language_filename = os.path.join(self.languages_dir, '%s.txt' % self.language) if not os.path.exists(language_filename): raise Exception, 'Language file not found: %s' % language_filename language_file = open(language_filename) possible_action_lines = [] for line in language_file: line = line.strip() if not line.startswith('#') and '=' in line: values = line.split('=') left = values[0].strip() right = "=".join(values[1:]).strip() splitted_left_operand = left.split('_') if splitted_left_operand[-1] == 'regex' and splitted_left_operand[0] in self.ACTIONS: action_name = '_'.join(splitted_left_operand[:-1]) new_right_value = self.make_it_readable(right) self.actions[action_name] = new_right_value language_file.close() def make_it_readable(self, value): url_regex = "(?P<url>[\\\"](([\w:/._-]|\=|\?|\&|\\\"|\;|\%)+)[\\\"]|([\w\s_.-]+))$" value = value.replace(url_regex, '[page|"url"]') #replace urls value = value.replace('(?P<url>([\w\s_.-]+))', 'page') value = value.replace('(?P<parameters>.+)', 'parameters') value = re.sub(r'\(\?\P\<([\w\s]*)\>\<([\w\s]*)\>\)', r'[\1|\2]', value) value = re.sub(r'\(\?\P\<([\w\s]*)\>\[\^\"\]\+\)', r'\1', value) value = re.sub(r'\(\?\P\<([\w\s]*)\>\.\+\)', r'\1', value) value = re.sub(r'\(\?\P\<([\w\s]*)\>\\d\+\)', r'X', value) value = re.sub(r'\(\?\P\<\w\>(.*)\)', r'\1', value) value = re.sub(r'\(\?\P\<\w*\>\\d\+\(\[\.\]\\d\+\)\?\)', '[X|X.X]', value) value = re.sub(r'\P\<\w*\>', '', value) value = value.replace('[\\"]', '"') #replace quotes value = value.replace('(.+)', 'blah') #replace random text value = value.replace('\d+', 'X').replace('(X)', 'X') #replace digits value = value.replace('[\\\"\\\']', '"').replace('[\\\'\\\"]', '"') #replace quotes value = value.replace('X([.]X)?', '[X|X.X]') value = value.replace('?', '').replace('$', '').replace('^', '') value = value.replace('{1}', '') return value def get_actions(self, term): matches = {} for key in self.actions.keys(): if term in key: matches[key] = self.actions.get(key) return matches
PypiClean
/pulumi_gcp-6.65.0a1693462587.tar.gz/pulumi_gcp-6.65.0a1693462587/pulumi_gcp/iap/get_web_backend_service_iam_policy.py
import copy import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities __all__ = [ 'GetWebBackendServiceIamPolicyResult', 'AwaitableGetWebBackendServiceIamPolicyResult', 'get_web_backend_service_iam_policy', 'get_web_backend_service_iam_policy_output', ] @pulumi.output_type class GetWebBackendServiceIamPolicyResult: """ A collection of values returned by getWebBackendServiceIamPolicy. """ def __init__(__self__, etag=None, id=None, policy_data=None, project=None, web_backend_service=None): if etag and not isinstance(etag, str): raise TypeError("Expected argument 'etag' to be a str") pulumi.set(__self__, "etag", etag) if id and not isinstance(id, str): raise TypeError("Expected argument 'id' to be a str") pulumi.set(__self__, "id", id) if policy_data and not isinstance(policy_data, str): raise TypeError("Expected argument 'policy_data' to be a str") pulumi.set(__self__, "policy_data", policy_data) if project and not isinstance(project, str): raise TypeError("Expected argument 'project' to be a str") pulumi.set(__self__, "project", project) if web_backend_service and not isinstance(web_backend_service, str): raise TypeError("Expected argument 'web_backend_service' to be a str") pulumi.set(__self__, "web_backend_service", web_backend_service) @property @pulumi.getter def etag(self) -> str: """ (Computed) The etag of the IAM policy. """ return pulumi.get(self, "etag") @property @pulumi.getter def id(self) -> str: """ The provider-assigned unique ID for this managed resource. """ return pulumi.get(self, "id") @property @pulumi.getter(name="policyData") def policy_data(self) -> str: """ (Required only by `iap.WebBackendServiceIamPolicy`) The policy data generated by a `organizations_get_iam_policy` data source. """ return pulumi.get(self, "policy_data") @property @pulumi.getter def project(self) -> str: return pulumi.get(self, "project") @property @pulumi.getter(name="webBackendService") def web_backend_service(self) -> str: return pulumi.get(self, "web_backend_service") class AwaitableGetWebBackendServiceIamPolicyResult(GetWebBackendServiceIamPolicyResult): # pylint: disable=using-constant-test def __await__(self): if False: yield self return GetWebBackendServiceIamPolicyResult( etag=self.etag, id=self.id, policy_data=self.policy_data, project=self.project, web_backend_service=self.web_backend_service) def get_web_backend_service_iam_policy(project: Optional[str] = None, web_backend_service: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWebBackendServiceIamPolicyResult: """ Retrieves the current IAM policy data for webbackendservice ## example ```python import pulumi import pulumi_gcp as gcp policy = gcp.iap.get_web_backend_service_iam_policy(project=google_compute_backend_service["default"]["project"], web_backend_service=google_compute_backend_service["default"]["name"]) ``` :param str project: The ID of the project in which the resource belongs. If it is not provided, the project will be parsed from the identifier of the parent resource. If no project is provided in the parent identifier and no project is specified, the provider project is used. :param str web_backend_service: Used to find the parent resource to bind the IAM policy to """ __args__ = dict() __args__['project'] = project __args__['webBackendService'] = web_backend_service opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts) __ret__ = pulumi.runtime.invoke('gcp:iap/getWebBackendServiceIamPolicy:getWebBackendServiceIamPolicy', __args__, opts=opts, typ=GetWebBackendServiceIamPolicyResult).value return AwaitableGetWebBackendServiceIamPolicyResult( etag=pulumi.get(__ret__, 'etag'), id=pulumi.get(__ret__, 'id'), policy_data=pulumi.get(__ret__, 'policy_data'), project=pulumi.get(__ret__, 'project'), web_backend_service=pulumi.get(__ret__, 'web_backend_service')) @_utilities.lift_output_func(get_web_backend_service_iam_policy) def get_web_backend_service_iam_policy_output(project: Optional[pulumi.Input[Optional[str]]] = None, web_backend_service: Optional[pulumi.Input[str]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetWebBackendServiceIamPolicyResult]: """ Retrieves the current IAM policy data for webbackendservice ## example ```python import pulumi import pulumi_gcp as gcp policy = gcp.iap.get_web_backend_service_iam_policy(project=google_compute_backend_service["default"]["project"], web_backend_service=google_compute_backend_service["default"]["name"]) ``` :param str project: The ID of the project in which the resource belongs. If it is not provided, the project will be parsed from the identifier of the parent resource. If no project is provided in the parent identifier and no project is specified, the provider project is used. :param str web_backend_service: Used to find the parent resource to bind the IAM policy to """ ...
PypiClean
/recipe_scrapers-14.11.0.tar.gz/recipe_scrapers-14.11.0/recipe_scrapers/vegolosi.py
from ._abstract import AbstractScraper from ._utils import get_minutes, get_yields, normalize_string class Vegolosi(AbstractScraper): @classmethod def host(cls): return "vegolosi.it" def title(self): return self.soup.find("h1").get_text().strip() def preparation_time(self): possible_time_info_elements = self.soup.findAll( "span", {"class": "tasty-recipes-prep-time"} ) return sum([get_minutes(element) for element in possible_time_info_elements]) def cooking_time(self): possible_time_info_elements = self.soup.findAll( "span", {"class": "tasty-recipes-cook-time"} ) return sum([get_minutes(element) for element in possible_time_info_elements]) def total_time(self): possible_time_info_elements = self.soup.findAll( "span", {"class": "tasty-recipes-total-time"} ) return sum([get_minutes(element) for element in possible_time_info_elements]) def yields(self): possible_yields_info_elements = self.soup.findAll( "span", {"class": "tasty-recipes-yield"} ) for element in possible_yields_info_elements: if "persone" in element.get_text(): return get_yields(element) def ingredients(self): ingredients = self.soup.select(".tasty-recipe-ingredients > ul > li") if not ingredients: ingredients = self.soup.findAll("li", {"class": "ingredient"}) return [normalize_string(ingredient.get_text()) for ingredient in ingredients] def instructions(self): instructions = self.soup.findAll("div", {"class": "tasty-recipe-instructions"}) return "\n".join( [normalize_string(instruction.get_text()) for instruction in instructions] ) def ratings(self): return round( float( self.soup.find("div", {"class": "tasty-recipe-rating rating_panel"}) .get("data-content-rate") .replace(",", ".") ), 2, )
PypiClean
/volatility3-2.4.1.tar.gz/volatility3-2.4.1/doc/source/simple-plugin.rst
How to Write a Simple Plugin ============================ This guide will step through how to construct a simple plugin using Volatility 3. The example plugin we'll use is :py:class:`~volatility3.plugins.windows.dlllist.DllList`, which features the main traits of a normal plugin, and reuses other plugins appropriately. .. note:: This document will not include the complete code necessary for a working plugin (such as imports, etc) since it's designed to focus on the necessary components for writing a plugin. For complete and functioning plugins, the ``framework/plugins`` directory should be consulted. Inherit from PluginInterface ---------------------------- The first step is to define a class that inherits from :py:class:`~volatility3.framework.interfaces.plugins.PluginInterface`. Volatility automatically finds all plugins defined under the various plugin directories by importing them and then making use of any classes that inherit from :py:class:`~volatility3.framework.interfaces.plugins.PluginInterface`. :: from volatility3.framework import interfaces class DllList(interfaces.plugins.PluginInterface): The next step is to define the requirements of the plugin, these will be converted into options the user can provide based on the User Interface. Define the plugin requirements ------------------------------ These requirements are the names of variables that will need to be populated in the configuration tree for the plugin to be able to run properly. Any that are defined as optional need not necessarily be provided. :: _version = (1, 0, 0) _required_framework_version = (2, 0, 0) @classmethod def get_requirements(cls): return [requirements.ModuleRequirement(name = 'kernel', description = 'Windows kernel', architectures = ["Intel32", "Intel64"]), requirements.ListRequirement(name = 'pid', element_type = int, description = "Process IDs to include (all other processes are excluded)", optional = True), requirements.PluginRequirement(name = 'pslist', plugin = pslist.PsList, version = (2, 0, 0))] This is a classmethod, because it is called before the specific plugin object has been instantiated (in order to know how to instantiate the plugin). At the moment these requirements are fairly straightforward: :: requirements.ModuleRequirement(name = 'kernel', description = 'Windows kernel', architectures = ["Intel32", "Intel64"]), This requirement specifies the need for a particular submodule. Each module requires a :py:class:`TranslationLayer <volatility3.framework.interfaces.layers.TranslationLayerInterface>` and a :py:class:`SymbolTable <volatility3.framework.interfaces.symbols.SymbolTableInterface>`, which are fulfilled by two subrequirements: a :py:class:`~volatility3.framework.configuration.requirements.TranslationLayerRequirement` and a :py:class:`~volatility3.framework.configuration.requirements.SymbolTableRequirement`. At the moment, the automagic only fills `ModuleRequirements` with kernels, and so has relatively few parameters. It requires the architecture for the underlying TranslationLayer, and the offset of the module within that layer. The name of the module will be stored in the ``kernel`` configuration option, and the module object itself can be accessed from the ``context.modules`` collection. This requirement is a Complex Requirement and therefore will not be requested directly from the user. .. note:: In previous versions of volatility 3, there was no `ModuleRequirement`, and instead two requirements were defined a :py:class:`TranslationLayer <volatility3.framework.interfaces.layers.TranslationLayerInterface>` and a `SymbolTableRequirement`. These still exist, and can be used, most plugins just define a single `ModuleRequirement` for the kernel, which the automagic will populate. The `ModuleRequirement` has two automatic sub-requirements, a `TranslationLayerRequirement` and a `SymbolTableRequirement`, but the module also includes the offset of the module, and will allow future expansion to specify specific modules when application level plugins become more common. Below are how the requirements would be specified: :: requirements.TranslationLayerRequirement(name = 'primary', description = 'Memory layer for the kernel', architectures = ["Intel32", "Intel64"]), This requirement indicates that the plugin will operate on a single :py:class:`TranslationLayer <volatility3.framework.interfaces.layers.TranslationLayerInterface>`. The name of the loaded layer will appear in the plugin's configuration under the name ``primary``. Requirement values can be accessed within the plugin through the plugin's `config` attribute (for example ``self.config['pid']``). .. note:: The name itself is dynamic depending on the other layers already present in the Context. Always use the value from the configuration rather than attempting to guess what the layer will be called. Finally, this defines that the translation layer must be on the Intel Architecture. At the moment, this acts as a filter, failing to be satisfied by memory images that do not match the architecture required. Most plugins will only operate on a single layer, but it is entirely possible for a plugin to request two different layers, for example a plugin that carries out some form of difference or statistics against multiple memory images. This requirement (and the next two) are known as Complex Requirements, and user interfaces will likely not directly request a value for this from a user. The value stored in the configuration tree for a :py:class:`~volatility3.framework.configuration.requirements.TranslationLayerRequirement` is the string name of a layer present in the context's memory that satisfies the requirement. :: requirements.SymbolTableRequirement(name = "nt_symbols", description = "Windows kernel symbols"), This requirement specifies the need for a particular :py:class:`SymbolTable <volatility3.framework.interfaces.symbols.SymbolTableInterface>` to be loaded. This gets populated by various :py:class:`Automagic <volatility3.framework.interfaces.automagic.AutoMagicInterface>` as the nearest sibling to a particular :py:class:`~volatility3.framework.configuration.requirements.TranslationLayerRequirement`. This means that if the :py:class:`~volatility3.framework.configuration.requirements.TranslationLayerRequirement` is satisfied and the :py:class:`Automagic <volatility3.framework.interfaces.automagic.AutoMagicInterface>` can determine the appropriate :py:class:`SymbolTable <volatility3.framework.interfaces.symbols.SymbolTableInterface>`, the name of the :py:class:`SymbolTable <volatility3.framework.interfaces.symbols.SymbolTableInterface>` will be stored in the configuration. This requirement is also a Complex Requirement and therefore will not be requested directly from the user. :: requirements.ListRequirement(name = 'pid', description = 'Filter on specific process IDs', element_type = int, optional = True), The next requirement is a List Requirement, populated by integers. The description will be presented to the user to describe what the value represents. The optional flag indicates that the plugin can function without the ``pid`` value being defined within the configuration tree at all. :: requirements.PluginRequirement(name = 'pslist', plugin = pslist.PsList, version = (2, 0, 0))] This requirement indicates that the plugin will make use of another plugin's code, and specifies the version requirements on that plugin. The version is specified in terms of Semantic Versioning meaning that, to be compatible, the major versions must be identical and the minor version must be equal to or higher than the one provided. This requirement does not make use of any data from the configuration, even if it were provided, it is merely a functional check before running the plugin. To define the version of a plugin, populate the `_version` class variable as a tuple of version numbers `(major, minor, patch)`. So for example: :: _version = (1, 0, 0) The plugin may also require a specific version of the framework, and this also uses Semantic Versioning, and can be set by defining the `_required_framework_version`. The major version should match the version of volatility the plugin is to be used with, which at the time of writing would be 2.2.0, and so would be specified as below. If only features, for example, from 2.0.0 are used, then the lowest applicable version number should be used to support the greatest number of installations: :: _required_framework_version = (2, 0, 0) Define the `run` method ----------------------- The run method is the primary method called on a plugin. It takes no parameters (these have been passed through the context's configuration tree, and the context is provided at plugin initialization time) and returns an unpopulated :py:class:`~volatility3.framework.interfaces.renderers.TreeGrid` object. These are typically constructed based on a generator that carries out the bulk of the plugin's processing. The :py:class:`~volatility3.framework.interfaces.renderers.TreeGrid` also specifies the column names and types that will be output as part of the :py:class:`~volatility3.framework.interfaces.renderers.TreeGrid`. :: def run(self): filter_func = pslist.PsList.create_pid_filter(self.config.get('pid', None)) kernel = self.context.modules[self.config['kernel']] return renderers.TreeGrid([("PID", int), ("Process", str), ("Base", format_hints.Hex), ("Size", format_hints.Hex), ("Name", str), ("Path", str)], self._generator(pslist.PsList.list_processes(self.context, kernel.layer_name, kernel.symbol_table_name, filter_func = filter_func))) In this instance, the plugin constructs a filter (using the PsList plugin's *classmethod* for creating filters). It checks the plugin's configuration for the ``pid`` value, and passes it in as a list if it finds it, or None if it does not. The :py:func:`~volatility3.plugins.windows.pslist.PsList.create_pid_filter` method accepts a list of process identifiers that are included in the list. If the list is empty, all processes are returned. The next line specifies the columns by their name and type. The types are simple types (int, str, bytes, float, and bool) but can also provide hints as to how the output should be displayed (such as a hexadecimal number, using :py:class:`volatility3.framework.renderers.format_hints.Hex`). This indicates to user interfaces that the value should be displayed in a particular way, but does not guarantee that the value will be displayed that way (for example, if it doesn't make sense to do so in a particular interface). Finally, the generator is provided. The generator accepts a list of processes, which is gathered using a different plugin, the :py:class:`~volatility3.plugins.windows.pslist.PsList` plugin. That plugin features a *classmethod*, so that other plugins can call it. As such, it takes all the necessary parameters rather than accessing them from a configuration. Since it must be portable code, it takes a context, as well as the layer name, symbol table and optionally a filter. In this instance we unconditionally pass it the values from the configuration for the layer and symbol table from the kernel module object, constructed from the ``kernel`` configuration requirement. This will generate a list of :py:class:`~volatility3.framework.symbols.windows.extensions.EPROCESS` objects, as provided by the :py:class:`~volatility.plugins.windows.pslist.PsList` plugin, and is not covered here but is used as an example for how to share code across plugins (both as the provider and the consumer of the shared code). Define the generator -------------------- The :py:class:`~volatility3.framework.interfaces.renderers.TreeGrid` can be populated without a generator, but it is quite a common model to use. This is where the main processing for this plugin lives. :: def _generator(self, procs): for proc in procs: for entry in proc.load_order_modules(): BaseDllName = FullDllName = renderers.UnreadableValue() try: BaseDllName = entry.BaseDllName.get_string() # We assume that if the BaseDllName points to an invalid buffer, so will FullDllName FullDllName = entry.FullDllName.get_string() except exceptions.InvalidAddressException: pass yield (0, (proc.UniqueProcessId, proc.ImageFileName.cast("string", max_length = proc.ImageFileName.vol.count, errors = 'replace'), format_hints.Hex(entry.DllBase), format_hints.Hex(entry.SizeOfImage), BaseDllName, FullDllName)) This iterates through the list of processes and for each one calls the :py:meth:`~volatility3.framework.symbols.windows.extensions.EPROCESS.load_order_modules` method on it. This provides a list of the loaded modules within the process. The plugin then defaults the ``BaseDllName`` and ``FullDllName`` variables to an :py:class:`~volatility3.framework.renderers.UnreadableValue`, which is a way of indicating to the user interface that the value couldn't be read for some reason (but that it isn't fatal). There are currently four different reasons a value may be unreadable: * **Unreadable**: values which are empty because the data cannot be read * **Unparsable**: values which are empty because the data cannot be interpreted correctly * **NotApplicable**: values which are empty because they don't make sense for this particular entry * **NotAvailable**: values which cannot be provided now (but might in a future run, via new symbols or an updated plugin) This is a safety provision to ensure that the data returned by the Volatility library is accurate and describes why information may not be provided. The plugin then takes the process's ``BaseDllName`` value, and calls :py:meth:`~volatility3.framework.symbols.windows.extensions.UNICODE_STRING.get_string` on it. All structure attributes, as defined by the symbols, are directly accessible and use the case-style of the symbol library it came from (in Windows, attributes are CamelCase), such as ``entry.BaseDllName`` in this instance. Any attributes not defined by the symbol but added by Volatility extensions cannot be properties (in case they overlap with the attributes defined in the symbol libraries) and are therefore always methods and prepended with ``get_``, in this example ``BaseDllName.get_string()``. Finally, ``FullDllName`` is populated. These operations read from memory, and as such, the memory image may be unable to read the data at a particular offset. This will cause an exception to be thrown. In Volatility 3, exceptions are thrown as a means of communicating when something exceptional happens. It is the responsibility of the plugin developer to appropriately catch and handle any non-fatal exceptions and otherwise allow the exception to be thrown by the user interface. In this instance, the :py:class:`~volatility3.framework.exceptions.InvalidAddressException` class is caught, which is thrown by any layer which cannot access an offset requested of it. Since we have already populated both values with ``UnreadableValue`` we do not need to write code for the exception handler. Finally, we yield the record in the format required by the :py:class:`~volatility3.framework.interfaces.renderers.TreeGrid`, a tuple, listing the indentation level (for trees) and then the list of values for each column. This plugin demonstrates casting a value ``ImageFileName`` to ensure it's returned as a string with a specific maximum length, rather than its original type (potentially an array of characters, etc). This is carried out using the :py:meth:`~volatility3.framework.interfaces.objects.ObjectInterface.cast` method which takes a type (either a native type, such as string or pointer, or a structure type defined in a :py:class:`SymbolTable <volatility3.framework.interfaces.symbols.SymbolTableInterface>` such as ``<table>!_UNICODE``) and the parameters to that type. Since the cast value must populate a string typed column, it had to be a Python string (such as being cast to the native type string) and could not have been a special Structure such as ``_UNICODE``. For the format hint columns, the format hint type must be used to ensure the error checking does not fail.
PypiClean
/baiduads-sdk-auto-snapshot-2022.2.1.5.tar.gz/baiduads-sdk-auto-snapshot-2022.2.1.5/baiduads/extaudience/model/get_audience_list_response_wrapper.py
import re # noqa: F401 import sys # noqa: F401 from baiduads.model_utils import ( # noqa: F401 ApiTypeError, ModelComposed, ModelNormal, ModelSimple, cached_property, change_keys_js_to_python, convert_js_args_to_python_args, date, datetime, file_type, none_type, validate_get_composed_info, OpenApiModel ) from baiduads.exceptions import ApiAttributeError def lazy_import(): from baiduads.common.model.api_response_header import ApiResponseHeader from baiduads.extaudience.model.get_audience_list_response_wrapper_body import GetAudienceListResponseWrapperBody globals()['ApiResponseHeader'] = ApiResponseHeader globals()['GetAudienceListResponseWrapperBody'] = GetAudienceListResponseWrapperBody class GetAudienceListResponseWrapper(ModelNormal): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. Attributes: allowed_values (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict with a capitalized key describing the allowed value and an allowed value. These dicts store the allowed enum values. attribute_map (dict): The key is attribute name and the value is json key in definition. discriminator_value_class_map (dict): A dict to go from the discriminator variable value to the discriminator class name. validations (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict that stores validations for max_length, min_length, max_items, min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, inclusive_minimum, and regex. additional_properties_type (tuple): A tuple of classes accepted as additional properties values. """ allowed_values = { } validations = { } @cached_property def additional_properties_type(): """ This must be a method because a model may have properties that are of type self, this must run after the class is loaded """ lazy_import() return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501 _nullable = False @cached_property def openapi_types(): """ This must be a method because a model may have properties that are of type self, this must run after the class is loaded Returns openapi_types (dict): The key is attribute name and the value is attribute type. """ lazy_import() return { 'header': (ApiResponseHeader,), # noqa: E501 'body': (GetAudienceListResponseWrapperBody,), # noqa: E501 } @cached_property def discriminator(): return None attribute_map = { 'header': 'header', # noqa: E501 'body': 'body', # noqa: E501 } read_only_vars = { } _composed_schemas = {} @classmethod @convert_js_args_to_python_args def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 """GetAudienceListResponseWrapper - a model defined in OpenAPI Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) header (ApiResponseHeader): [optional] # noqa: E501 body (GetAudienceListResponseWrapperBody): [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) self = super(OpenApiModel, cls).__new__(cls) if args: raise ApiTypeError( "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( args, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) for var_name, var_value in kwargs.items(): if var_name not in self.attribute_map and \ self._configuration is not None and \ self._configuration.discard_unknown_keys and \ self.additional_properties_type is None: # discard variable. continue setattr(self, var_name, var_value) return self required_properties = set([ '_data_store', '_check_type', '_spec_property_naming', '_path_to_item', '_configuration', '_visited_composed_classes', ]) @convert_js_args_to_python_args def __init__(self, *args, **kwargs): # noqa: E501 """GetAudienceListResponseWrapper - a model defined in OpenAPI Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) header (ApiResponseHeader): [optional] # noqa: E501 body (GetAudienceListResponseWrapperBody): [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) if args: raise ApiTypeError( "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( args, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) for var_name, var_value in kwargs.items(): if var_name not in self.attribute_map and \ self._configuration is not None and \ self._configuration.discard_unknown_keys and \ self.additional_properties_type is None: # discard variable. continue setattr(self, var_name, var_value) if var_name in self.read_only_vars: raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate " f"class with read only attributes.")
PypiClean
/dag_workflows-1.0.5-py3-none-any.whl/workflows/dags.py
import importlib.util import logging import os import re import yaml from workflows import bases logger = logging.getLogger(__name__) def register_directory(folder_path: str): """ Given a folder, register all DAG folders inside it. """ registry = bases.DagRegistry() for entry in os.scandir(folder_path): # type: os.DirEntry if entry.is_dir(): if entry.name in ("__pycache__",): continue # skip try: registry[entry.name] = register_dag(entry.path) logger.debug("Registered DAG %s", entry.path) except Exception as exc: # pylint: disable=too-broad-exception registry[entry.name] = exc logger.warning("Could not register DAG in %s: %s", entry.path, exc) registry.validate() return registry def register_dag(folder_path: str) -> bases.DAG: """ Given a folder, register it if it has a workflow.yaml file. """ spec_file = os.path.join(folder_path, "workflow.yaml") if not os.path.isfile(spec_file): raise bases.RegistrationError(f"No workflow.yaml file found in {folder_path}") spec = yaml.safe_load(open(spec_file)) dag = bases.DAG(folder=folder_path, **spec) for entry in os.scandir(folder_path): # type: os.DirEntry if entry.is_file(): if entry.name in ("__init__.py", ".DS_Store", "workflow.yaml"): continue # skip non-transforms register_task(dag, entry.path) dag.validate() return dag SQL_COMMENT = re.compile(r"/\*(.*)\*/", re.MULTILINE + re.DOTALL) def register_task(dag: bases.DAG, file_path: str) -> None: """ Load a transform file and add it to the DAG """ if file_path.endswith(".py"): module_spec = importlib.util.spec_from_file_location("task", file_path) module = importlib.util.module_from_spec(module_spec) module_spec.loader.exec_module(module) spec = yaml.safe_load(module.__doc__) spec["run"] = module.run elif file_path.endswith(".sql"): sql = open(file_path).read() spec_text = SQL_COMMENT.match(sql) if not spec_text: raise bases.RegistrationError("Cannot find a YAML spec in file %s", file_path) spec = yaml.safe_load(spec_text.group(1)) spec["sql"] = sql elif file_path.endswith(".yaml"): spec = yaml.safe_load(open(file_path)) else: raise bases.RegistrationError("Unsupported file type: %s", file_path) task_type = spec.pop("type", None) if not task_type: raise bases.RegistrationError("`type` is a required field in task %s", file_path) task_class = bases.load_class(task_type, bases.TASK_TYPES) name = os.path.basename(file_path) task_class(dag=dag, name=name, **spec)
PypiClean