content
stringlengths
0
894k
type
stringclasses
2 values
from google.cloud import bigquery from google.cloud.bigquery import LoadJobConfig from google.cloud.bigquery import SchemaField from queryless.parser import BasicParser class BigQuery(object): def __init__(self, project=None): self._client = bigquery.Client(project=project) @property def client(self): return self._client def create_table(self, path, table_from='uri'): bp = BQParser(path) dataset_name = bp.dataset_name table_name = bp.table_name skip_leading_rows = bp.skip_leading_rows schema = bp.schema table_ref = self.client.dataset(dataset_name).table(table_name) load_config = LoadJobConfig() load_config.skip_leading_rows = skip_leading_rows load_config.schema = schema file_source = bp.properties.get('inputPath') if table_from == 'uri': self.client.load_table_from_uri(source_uris=file_source, destination=table_ref, job_config=load_config) else: raise ValueError('Not supported') class BQParser(BasicParser): def __init__(self, path: str): super().__init__(path=path) @property def dataset_name(self) -> str: return self.metadata.get('datasetName') @property def table_name(self) -> str: return self.metadata.get('tableName') @property def properties(self) -> dict: return self.metadata.get('srcProperty') @property def skip_leading_rows(self) -> int: return self.metadata.get('skipLeadingRows', 0) @property def schema(self) -> list: """ SCHEMA = [ SchemaField('full_name', 'STRING', mode='required'), SchemaField('age', 'INTEGER', mode='required'), ] :return: a list """ schema = self.spec.get('schema') schema = [[SchemaField(k, i[k]['type'], i[k]['mode']) for k in i.keys()] for i in schema] schema = [item for sublist in schema for item in sublist] return schema
python
# See in the Dark (SID) dataset import torch import os import glob import rawpy import numpy as np import random from os.path import join import data.torchdata as torchdata import util.process as process from util.util import loadmat import h5py import exifread import pickle import PIL.Image as Image from scipy.io import loadmat BaseDataset = torchdata.Dataset def worker_init_fn(worker_id): np.random.seed(np.random.get_state()[1][0] + worker_id) def metainfo(rawpath): with open(rawpath, 'rb') as f: tags = exifread.process_file(f) _, suffix = os.path.splitext(os.path.basename(rawpath)) if suffix == '.dng': expo = eval(str(tags['Image ExposureTime'])) iso = eval(str(tags['Image ISOSpeedRatings'])) else: expo = eval(str(tags['EXIF ExposureTime'])) iso = eval(str(tags['EXIF ISOSpeedRatings'])) # print('ISO: {}, ExposureTime: {}'.format(iso, expo)) return iso, expo def crop_center(img, cropx, cropy): _, y, x = img.shape startx = x//2-(cropx//2) starty = y//2-(cropy//2) return img[:, starty:starty+cropy,startx:startx+cropx] class SIDDataset(BaseDataset): def __init__(self, datadir, paired_fns, size=None, flag=None, augment=True, repeat=1, cfa='bayer', memorize=True, stage_in='raw', stage_out='raw', gt_wb=False): super(SIDDataset, self).__init__() assert cfa == 'bayer' or cfa == 'xtrans' self.size = size self.datadir = datadir self.paired_fns = paired_fns self.flag = flag self.augment = augment self.patch_size = 512 self.repeat = repeat self.cfa = cfa self.pack_raw = pack_raw_bayer if cfa == 'bayer' else pack_raw_xtrans assert stage_in in ['raw', 'srgb'] assert stage_out in ['raw', 'srgb'] self.stage_in = stage_in self.stage_out = stage_out self.gt_wb = gt_wb if size is not None: self.paired_fns = self.paired_fns[:size] self.memorize = memorize self.target_dict = {} self.target_dict_aux = {} self.input_dict = {} def __getitem__(self, i): i = i % len(self.paired_fns) input_fn, target_fn = self.paired_fns[i] input_path = join(self.datadir, 'short', input_fn) target_path = join(self.datadir, 'long', target_fn) ratio = compute_expo_ratio(input_fn, target_fn) if self.memorize: if target_fn not in self.target_dict: with rawpy.imread(target_path) as raw_target: target_image = self.pack_raw(raw_target) wb, ccm = process.read_wb_ccm(raw_target) if self.stage_out == 'srgb': target_image = process.raw2rgb(target_image, raw_target) self.target_dict[target_fn] = target_image self.target_dict_aux[target_fn] = (wb, ccm) if input_fn not in self.input_dict: with rawpy.imread(input_path) as raw_input: input_image = self.pack_raw(raw_input) * ratio if self.stage_in == 'srgb': if self.gt_wb: wb, ccm = self.target_dict_aux[target_fn] input_image = process.raw2rgb_v2(input_image, wb, ccm) else: input_image = process.raw2rgb(input_image, raw_input) self.input_dict[input_fn] = input_image input_image = self.input_dict[input_fn] target_image = self.target_dict[target_fn] (wb, ccm) = self.target_dict_aux[target_fn] else: with rawpy.imread(target_path) as raw_target: target_image = self.pack_raw(raw_target) wb, ccm = process.read_wb_ccm(raw_target) if self.stage_out == 'srgb': target_image = process.raw2rgb(target_image, raw_target) with rawpy.imread(input_path) as raw_input: input_image = self.pack_raw(raw_input) * ratio if self.stage_in == 'srgb': if self.gt_wb: input_image = process.raw2rgb_v2(input_image, wb, ccm) else: input_image = process.raw2rgb(input_image, raw_input) if self.augment: H = input_image.shape[1] W = target_image.shape[2] ps = self.patch_size xx = np.random.randint(0, W - ps) yy = np.random.randint(0, H - ps) input = input_image[:, yy:yy + ps, xx:xx + ps] target = target_image[:, yy:yy + ps, xx:xx + ps] if np.random.randint(2, size=1)[0] == 1: # random flip input = np.flip(input, axis=1) # H target = np.flip(target, axis=1) if np.random.randint(2, size=1)[0] == 1: input = np.flip(input, axis=2) # W target = np.flip(target, axis=2) if np.random.randint(2, size=1)[0] == 1: # random transpose input = np.transpose(input, (0, 2, 1)) target = np.transpose(target, (0, 2, 1)) else: input = input_image target = target_image input = np.maximum(np.minimum(input, 1.0), 0) input = np.ascontiguousarray(input) target = np.ascontiguousarray(target) dic = {'input': input, 'target': target, 'fn': input_fn, 'cfa': self.cfa, 'rawpath': target_path} if self.flag is not None: dic.update(self.flag) return dic def __len__(self): return len(self.paired_fns) * self.repeat def compute_expo_ratio(input_fn, target_fn): in_exposure = float(input_fn.split('_')[-1][:-5]) gt_exposure = float(target_fn.split('_')[-1][:-5]) ratio = min(gt_exposure / in_exposure, 300) return ratio def pack_raw_bayer(raw): #pack Bayer image to 4 channels im = raw.raw_image_visible.astype(np.float32) raw_pattern = raw.raw_pattern R = np.where(raw_pattern==0) G1 = np.where(raw_pattern==1) B = np.where(raw_pattern==2) G2 = np.where(raw_pattern==3) white_point = 16383 img_shape = im.shape H = img_shape[0] W = img_shape[1] out = np.stack((im[R[0][0]:H:2,R[1][0]:W:2], #RGBG im[G1[0][0]:H:2,G1[1][0]:W:2], im[B[0][0]:H:2,B[1][0]:W:2], im[G2[0][0]:H:2,G2[1][0]:W:2]), axis=0).astype(np.float32) black_level = np.array(raw.black_level_per_channel)[:,None,None].astype(np.float32) # if max(raw.black_level_per_channel) != min(raw.black_level_per_channel): # black_level = 2**round(np.log2(np.max(black_level))) # print(black_level) out = (out - black_level) / (white_point - black_level) out = np.clip(out, 0, 1) return out def pack_raw_xtrans(raw): # pack X-Trans image to 9 channels im = raw.raw_image_visible.astype(np.float32) im = (im - 1024) / (16383 - 1024) # subtract the black level im = np.clip(im, 0, 1) img_shape = im.shape H = (img_shape[0] // 6) * 6 W = (img_shape[1] // 6) * 6 out = np.zeros((9, H // 3, W // 3), dtype=np.float32) # 0 R out[0, 0::2, 0::2] = im[0:H:6, 0:W:6] out[0, 0::2, 1::2] = im[0:H:6, 4:W:6] out[0, 1::2, 0::2] = im[3:H:6, 1:W:6] out[0, 1::2, 1::2] = im[3:H:6, 3:W:6] # 1 G out[1, 0::2, 0::2] = im[0:H:6, 2:W:6] out[1, 0::2, 1::2] = im[0:H:6, 5:W:6] out[1, 1::2, 0::2] = im[3:H:6, 2:W:6] out[1, 1::2, 1::2] = im[3:H:6, 5:W:6] # 1 B out[2, 0::2, 0::2] = im[0:H:6, 1:W:6] out[2, 0::2, 1::2] = im[0:H:6, 3:W:6] out[2, 1::2, 0::2] = im[3:H:6, 0:W:6] out[2, 1::2, 1::2] = im[3:H:6, 4:W:6] # 4 R out[3, 0::2, 0::2] = im[1:H:6, 2:W:6] out[3, 0::2, 1::2] = im[2:H:6, 5:W:6] out[3, 1::2, 0::2] = im[5:H:6, 2:W:6] out[3, 1::2, 1::2] = im[4:H:6, 5:W:6] # 5 B out[4, 0::2, 0::2] = im[2:H:6, 2:W:6] out[4, 0::2, 1::2] = im[1:H:6, 5:W:6] out[4, 1::2, 0::2] = im[4:H:6, 2:W:6] out[4, 1::2, 1::2] = im[5:H:6, 5:W:6] out[5, :, :] = im[1:H:3, 0:W:3] out[6, :, :] = im[1:H:3, 1:W:3] out[7, :, :] = im[2:H:3, 0:W:3] out[8, :, :] = im[2:H:3, 1:W:3] return out class ELDEvalDataset(BaseDataset): def __init__(self, basedir, camera_suffix, scenes=None, img_ids=None): super(ELDEvalDataset, self).__init__() self.basedir = basedir self.camera_suffix = camera_suffix # ('Canon', '.CR2') self.scenes = scenes self.img_ids = img_ids # self.input_dict = {} # self.target_dict = {} def __getitem__(self, i): camera, suffix = self.camera_suffix scene_id = i // len(self.img_ids) img_id = i % len(self.img_ids) scene = 'scene-{}'.format(self.scenes[scene_id]) datadir = join(self.basedir, camera, scene) input_path = join(datadir, 'IMG_{:04d}{}'.format(self.img_ids[img_id], suffix)) gt_ids = np.array([1, 6, 11, 16]) ind = np.argmin(np.abs(self.img_ids[img_id] - gt_ids)) target_path = join(datadir, 'IMG_{:04d}{}'.format(gt_ids[ind], suffix)) iso, expo = metainfo(target_path) target_expo = iso * expo iso, expo = metainfo(input_path) ratio = target_expo / (iso * expo) with rawpy.imread(input_path) as raw: input = pack_raw_bayer(raw) * ratio with rawpy.imread(target_path) as raw: target = pack_raw_bayer(raw) input = np.maximum(np.minimum(input, 1.0), 0) target = np.maximum(np.minimum(target, 1.0), 0) input = np.ascontiguousarray(input) target = np.ascontiguousarray(target) data = {'input': input, 'target': target, 'fn':input_path, 'rawpath': target_path} return data def __len__(self): return len(self.scenes) * len(self.img_ids)
python
from task import CustomTask from Agent import Agent if __name__ == '__main__': goal_task=CustomTask("自定义任务") aida=Agent() goal_task.set_agent(aida) goal_task.init_agent() # 采集5个队伍,每次采集等待5秒 goal_task.run_collection(collection_team=5,wait_sec=5)
python
# * Copyright (c) 2020-2021. Authors: see NOTICE file. # * # * Licensed under the Apache License, Version 2.0 (the "License"); # * you may not use this file except in compliance with the License. # * You may obtain a copy of the License at # * # * http://www.apache.org/licenses/LICENSE-2.0 # * # * Unless required by applicable law or agreed to in writing, software # * distributed under the License is distributed on an "AS IS" BASIS, # * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # * See the License for the specific language governing permissions and # * limitations under the License. from datetime import datetime from functools import cached_property from typing import Optional import numpy as np from pint import Quantity from pyvips import Image as VIPSImage from tifffile import TiffFile, TiffPageSeries, xml2dict from pims.formats import AbstractFormat from pims.formats.utils.abstract import CachedDataPath from pims.formats.utils.engines.omexml import OMEXML from pims.formats.utils.engines.tifffile import TifffileChecker, TifffileParser, cached_tifffile from pims.formats.utils.engines.vips import VipsReader from pims.formats.utils.histogram import DefaultHistogramReader from pims.formats.utils.structures.metadata import ImageChannel, ImageMetadata, MetadataStore from pims.formats.utils.structures.planes import PlanesInfo from pims.formats.utils.structures.pyramid import Pyramid from pims.utils import UNIT_REGISTRY from pims.utils.color import infer_channel_color from pims.utils.dict import flatten from pims.utils.dtypes import dtype_to_bits def clean_ome_dict(d: dict) -> dict: for k, v in d.items(): if k.endswith('Settings') or k.endswith('Ref'): continue if type(v) is dict: if 'ID' in v.keys(): id = ''.join([f"[{i}]" for i in v['ID'].split(':')[1:]]) del v['ID'] v = {id: v} d[k] = v d[k] = clean_ome_dict(v) elif type(v) is list: new_v = dict() for item in v: if 'ID' in item.keys(): id = ''.join([f"[{i}]" for i in item['ID'].split(':')[1:]]) del item['ID'] new_v[id] = item if len(new_v) == 0: new_v = v d[k] = new_v # TODO: original metadata from StructuredAnnotations return d def parse_ome(omexml: str) -> OMEXML: return OMEXML(omexml) def cached_omexml(format: AbstractFormat) -> OMEXML: tf = cached_tifffile(format) return format.get_cached('_omexml', parse_ome, tf.pages[0].description) def cached_omedict(format: AbstractFormat) -> dict: tf = cached_tifffile(format) return format.get_cached('_omedict', xml2dict, tf.pages[0].description) def cached_tifffile_baseseries(format: AbstractFormat) -> TiffPageSeries: tf = cached_tifffile(format) def get_baseseries(tf: TiffFile) -> TiffPageSeries: idx = np.argmax([np.prod(s.shape) for s in tf.series]) return tf.series[idx] return format.get_cached('_tf_baseseries', get_baseseries, tf) class OmeTiffChecker(TifffileChecker): @classmethod def match(cls, pathlike: CachedDataPath) -> bool: try: if super().match(pathlike): tf = cls.get_tifffile(pathlike) return tf.is_ome return False except RuntimeError: return False class OmeTiffParser(TifffileParser): @property def base(self) -> TiffPageSeries: return cached_tifffile_baseseries(self.format) def parse_main_metadata(self) -> ImageMetadata: base = self.base shape = dict(zip(base.axes, base.shape)) imd = ImageMetadata() imd.width = shape['X'] imd.height = shape['Y'] imd.depth = shape.get('Z', 1) imd.duration = shape.get('T', 1) imd.pixel_type = base.dtype imd.significant_bits = dtype_to_bits(imd.pixel_type) imd.n_channels = shape.get('C', 1) * shape.get('S', 1) imd.n_intrinsic_channels = shape.get('C', 1) imd.n_channels_per_read = shape.get('S', 1) omexml = cached_omexml(self.format) base = omexml.main_image if imd.n_channels == 3: default_names = ['R', 'G', 'B'] elif imd.n_channels == 2: default_names = ['R', 'G'] elif imd.n_channels == 1: default_names = ['L'] else: default_names = None for c in range(imd.n_channels): ome_c = (c - (c % imd.n_channels_per_read)) // imd.n_channels_per_read channel = base.pixels.channel(ome_c) name = channel.name if not name and default_names is not None: name = default_names[c] color = infer_channel_color(channel.color, c, imd.n_channels) imd.set_channel( ImageChannel( index=c, emission_wavelength=channel.emission_wavelength, excitation_wavelength=channel.excitation_wavelength, suggested_name=name, color=color ) ) return imd def parse_known_metadata(self) -> ImageMetadata: omexml = cached_omexml(self.format) base = omexml.main_image imd = super().parse_known_metadata() imd.description = base.description imd.acquisition_datetime = self.parse_ome_acquisition_date( base.acquisition_date ) imd.physical_size_x = self.parse_ome_physical_size( base.pixels.physical_size_X, base.pixels.physical_size_X_unit ) imd.physical_size_y = self.parse_ome_physical_size( base.pixels.physical_size_Y, base.pixels.physical_size_Y_unit ) imd.physical_size_z = self.parse_ome_physical_size( base.pixels.physical_size_Z, base.pixels.physical_size_Z_unit ) imd.frame_rate = self.parse_frame_rate( base.pixels.time_increment, base.pixels.time_increment_unit ) if base.instrument is not None and \ base.instrument.microscope is not None: imd.microscope.model = base.instrument.microscope.model if base.objective is not None: imd.objective.nominal_magnification = \ base.objective.nominal_magnification imd.objective.calibrated_magnification = \ base.objective.calibrated_magnification for i in range(omexml.image_count): base = omexml.image(i) name = base.name.lower() if base.name else None if name == "thumbnail": associated = imd.associated_thumb elif name == "label": associated = imd.associated_label elif name == "macro": associated = imd.associated_macro else: continue associated.width = base.pixels.size_X associated.height = base.pixels.size_Y associated.n_channels = base.pixels.size_C imd.is_complete = True return imd @staticmethod def parse_frame_rate( time_increment: Optional[float], unit: Optional[str] ) -> Optional[Quantity]: if unit is None: unit = 's' if time_increment in [None, 0]: return None return 1 / time_increment * UNIT_REGISTRY(unit) @staticmethod def parse_ome_physical_size( physical_size: Optional[float], unit: Optional[str] ) -> Optional[Quantity]: if unit is None: unit = 'µm' if physical_size in [None, 0] or unit in ['pixel', 'reference frame']: return None return physical_size * UNIT_REGISTRY(unit) @staticmethod def parse_ome_acquisition_date(date: Optional[str]) -> Optional[datetime]: if date is None: return None return datetime.fromisoformat(date) def parse_raw_metadata(self) -> MetadataStore: store = super().parse_raw_metadata() ome = flatten(clean_ome_dict(cached_omedict(self.format))) for full_key, value in ome.items(): key = full_key.split('.')[-1] if key not in ('TiffData', 'BinData'): store.set(full_key, value) return store def parse_pyramid(self) -> Pyramid: base_series = cached_tifffile_baseseries(self.format) pyramid = Pyramid() for i, level in enumerate(base_series.levels): page = level[0] tilewidth = page.tilewidth if page.tilewidth != 0 else page.imagewidth tilelength = page.tilelength if page.tilelength != 0 else page.imagelength subifd = i - 1 if i > 0 else None pyramid.insert_tier( page.imagewidth, page.imagelength, (tilewidth, tilelength), subifd=subifd ) return pyramid def parse_planes(self) -> PlanesInfo: omexml = cached_omexml(self.format) base = omexml.main_image imd = self.format.main_imd pi = PlanesInfo( imd.n_intrinsic_channels, imd.depth, imd.duration, ['page_index'], [np.int] ) for i in range(base.pixels.tiff_data_count): td = base.pixels.tiff_data(i) pi.set(td.first_c, td.first_z, td.first_t, page_index=td.ifd) return pi class OmeTiffReader(VipsReader): def read_thumb(self, out_width, out_height, precomputed=None, c=None, z=None, t=None): # TODO: precomputed ? # Thumbnail already uses shrink-on-load feature in default VipsReader # (i.e it loads the right pyramid level according the requested dimensions) page = self.format.planes_info.get(c, z, t, 'page_index') im = self.vips_thumbnail(out_width, out_height, page=page) return im.flatten() if im.hasalpha() else im def read_window(self, region, out_width, out_height, c=None, z=None, t=None): tier = self.format.pyramid.most_appropriate_tier( region, (out_width, out_height) ) region = region.scale_to_tier(tier) page = self.format.planes_info.get(c, z, t, 'page_index') subifd = tier.data.get('subifd') opts = dict(page=page) if subifd is not None: opts['subifd'] = subifd tiff_page = VIPSImage.tiffload(str(self.format.path), **opts) return tiff_page.extract_area( region.left, region.top, region.width, region.height ) def read_tile(self, tile, c=None, z=None, t=None): tier = tile.tier page = self.format.planes_info.get(c, z, t, 'page_index') subifd = tier.data.get('subifd') opts = dict(page=page) if subifd is not None: opts['subifd'] = subifd tiff_page = VIPSImage.tiffload(str(self.format.path), **opts) return tiff_page.extract_area( tile.left, tile.top, tile.width, tile.height ) class OmeTiffFormat(AbstractFormat): """ OME-TIFF format. Known limitations: * References: """ checker_class = OmeTiffChecker parser_class = OmeTiffParser reader_class = OmeTiffReader histogram_reader_class = DefaultHistogramReader def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._enabled = True @classmethod def get_name(cls): return "OME-TIFF" @classmethod def is_spatial(cls): return True @cached_property def need_conversion(self): return False @property def media_type(self): return "ome/ome-tiff"
python
import torch from utils.helpers import * import warnings from PIL import Image from torchvision import transforms #from torchsummary import summary def image_transform(imagepath): test_transforms = transforms.Compose([transforms.Resize(255), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) image = Image.open(imagepath) imagetensor = test_transforms(image) return imagetensor def predict(imagepath, verbose=False): if not verbose: warnings.filterwarnings('ignore') model_path = './models/catvdog.pth' try: checks_if_model_is_loaded = type(model) except: model = load_model(model_path) model.eval() #summary(model, input_size=(3,244,244)) if verbose: print("Model Loaded..") image = image_transform(imagepath) image1 = image[None,:,:,:] ps=torch.exp(model(image1)) topconf, topclass = ps.topk(1, dim=1) if topclass.item() == 1: return {'class':'dog','confidence':str(topconf.item())} else: return {'class':'cat','confidence':str(topconf.item())} #print(predict('data/dog1.jpeg')) #print(predict('data/cat1.jpeg')) #print(predict('data/dog2.jpeg')) #print(predict('data/cat2.jpeg'))
python
""" Compute or load tail cost of """ import scipy.io as sio import numpy as np class TailCost(object): def __init__(self, dyn_system, gamma): C = dyn_system.C self.P0 = C.T.dot(C) self.q0 = np.zeros(C.shape[1]) self.r0 = 0. self.gamma = gamma def load(self, name): tail_mat = sio.loadmat('examples/power_converter/tail_backups/'+name) self.P0 = tail_mat['P0'] self.q0 = tail_mat['q0'] self.r0 = tail_mat['r0'] def compute(self, dyn_system, N_tail): """ Compute tail cost by solving an SDP """ # Load samples mean and variance # TODO: Complete # Compute ADP tail by solving an SDP # TODO: Complete
python
from sqlalchemy import Column, Integer, String from models.base import Base class Tiered_Song(Base): __tablename__ = 'tiered_songs' id = Column(Integer, primary_key=True) name = Column(String(256), nullable=False) artist = Column(String(128), nullable=False) song_type = Column(String(256), nullable=True) def __repr__(self): return "Song: <id=%r, name=%r>" % \ (self.id, self.name) SONG_TYPE_BASIC = "basic" # simple match - name + artist SONG_TYPE_ELASTIC = "elastic" # matched with close enough elastic search SONG_TYPE_MANUAL = "manual" # manually confirmed to be the same song # We want basic match songs first then we use those songs and match them to find more 'correct' songs
python
import getopt args = [ '-a', '-b', 'foo', '--exclude', 'bar', 'x1','x2'] opts, pargs = getopt.getopt() print `opts` print `pargs`
python
from decimal import Decimal from django.db.models import Sum from trojsten.results.generator import ( BonusColumnGeneratorMixin, PrimarySchoolGeneratorMixin, ResultsGenerator, ) from .default import CompetitionRules from .default import FinishedRoundsResultsRulesMixin as FinishedRounds class UFOResultsGenerator(PrimarySchoolGeneratorMixin, BonusColumnGeneratorMixin, ResultsGenerator): def create_empty_results(self, request): res = super(UFOResultsGenerator, self).create_empty_results(request) request.max_points = sum( request.round.task_set.aggregate( x=Sum("description_points"), y=Sum("source_points") ).values() ) return res def calculate_row_round_total(self, request, row, cols): super(UFOResultsGenerator, self).calculate_row_round_total(request, row, cols) r = 9 - (row.user.graduation - self.get_minimal_year_of_graduation(request, row.user)) self.bonus = ( row.round_total * (request.max_points - row.round_total) * (Decimal("0.000") if r == 9 else Decimal("0.008") if r == 8 else Decimal("0.015")) ) # FIXME(generic_results_stage_2): Hacking backward compatibility, since there is no # results freezing yet. if request.round.semester.pk == 10: self.bonus = (request.max_points - row.round_total) * ( Decimal("0.000") if r == 9 else Decimal("0.008") if r == 8 else Decimal("0.015") ) row.round_total += self.bonus class UFORules(FinishedRounds, CompetitionRules): RESULTS_GENERATOR_CLASS = UFOResultsGenerator
python
from .merchant_id import unique_order_id_generator from django.db.models.signals import pre_save from universal_billing_system.models import Merchant def pre_save_create_bill_id(sender, instance, *args, **kwargs): if not instance.bill_id: instance.bill_id= unique_order_id_generator(instance) pre_save.connect(pre_save_create_bill_id, sender = Bills)
python
# -*- coding: utf-8 -*- """ Copy + Paste in OS X """ import subprocess from .base import * def copy(string): """Copy given string into system clipboard.""" try: subprocess.Popen(['pbcopy'], stdin=subprocess.PIPE).communicate( string.encode("utf-8")) except OSError as why: raise XcodeNotFound return def paste(): """Returns system clipboard contents.""" try: return subprocess.check_output('pbpaste').decode("utf-8") except OSError as why: raise XcodeNotFound
python
#import discord from discord.ext import commands import configparser config = configparser.ConfigParser() config.read("config.ini") server_owner = config['role_name']['server_owner'] admin = config['role_name']['admin'] whis = config['id']['whis_id'] def possible(ctx, user, victim): msg = f"{ctx.message.author.mention} you are not allowed to use this on the " \ f"Omni-King, me, other moderators, or yourself" if victim.top_role.name == whis: return msg elif victim.top_role.name == server_owner: return msg elif victim.top_role.name == admin: return msg elif victim == user: return msg else: msg = '' return msg def is_author(ctx): user = ctx.message.author.id owner = config['id']['author_id'] if user == owner: return True else: return False def is_server_owner(ctx): best_role = ctx.message.author.top_role if best_role.name == config['role_name']['server_owner']: return True else: return False def is_mod(ctx): best_role = ctx.message.author.top_role if best_role.name == config['role_name']['admin']: return True else: return False def is_whis(bot_info): if bot_info.id == int(whis): return True def whis_check(): def predicate(ctx): if is_whis(ctx): return True else: user = ctx.message.author msg = f"{user.mention},only the {server_owner} has access, you can not use this command" raise commands.CheckFailure(msg) return commands.check(predicate) def server_owner_check(): def predicate(ctx): total = sum([is_author(ctx), is_server_owner(ctx)]) if total > 0: return True else: user = ctx.message.author msg = f"{user.mention},only the {server_owner} has access, you can not use this command" raise commands.CheckFailure(msg) return commands.check(predicate) def mod_check(): def predicate(ctx): total = sum([is_author(ctx), is_server_owner(ctx), is_mod(ctx)]) if total > 0: return True else: user = ctx.message.author msg = f"{user.mention}, you don't have a power level that can rival the {admin}, much less the" \ f" {server_owner}, you can not use this command" raise commands.CheckFailure(msg) return commands.check(predicate)
python
# SPDX-License-Identifier: MIT # (c) 2019 The TJHSST Director 4.0 Development Team & Contributors import os import re import shutil from typing import Any, Dict import jinja2 from .. import settings from ..exceptions import OrchestratorActionError from ..files import get_site_directory_path TEMPLATE_DIRECTORY = os.path.join(os.path.dirname(__file__), "templates") jinja_env = jinja2.Environment(loader=jinja2.FileSystemLoader(TEMPLATE_DIRECTORY)) nginx_template = jinja_env.get_template("nginx.conf") def update_nginx_config(site_id: int, data: Dict[str, Any]) -> None: """Returns None on success or a message on failure.""" new_data = {} for key in [ "name", "no_redirect_domains", "primary_url_base", "type", "resource_limits", "is_being_served", ]: if key not in data: raise OrchestratorActionError("Missing key {!r}".format(key)) new_data[key] = data[key] # Some basic validation if ( not isinstance(new_data["name"], str) or re.search(r"^[a-z0-9]+(-[a-z0-9]+)*$", new_data["name"]) is None ): raise OrchestratorActionError("Invalid name") if new_data["primary_url_base"] is not None and ( not isinstance(new_data["primary_url_base"], str) or re.search( r"^https?://[-a-zA-Z0-9.]+(:\d+)?(/([-_a-zA-Z0-9.~]+/)*[-_a-zA-Z0-9.~]*)?$", new_data["primary_url_base"], ) is None ): raise OrchestratorActionError("Invalid primary URL") if not isinstance(new_data["no_redirect_domains"], list): raise OrchestratorActionError("Invalid 'no redirect' domains") for domain in new_data["no_redirect_domains"]: if not isinstance(domain, str) or ( re.search(r"^[a-z0-9]*(-[a-z0-9]+)*(\.[a-z][a-z0-9]*(-[a-z0-9]+)*)+$", domain) is None and re.search(r"^((\d+\.){3}\d+|([0-9a-fA-F]|:):[0-9a-fA-F:]*)$", domain) is None ): raise OrchestratorActionError("Invalid 'no redirect' domain {!r}".format(domain)) if re.search(r"^\d+[kKmM]?$", new_data["resource_limits"]["client_body_limit"]) is None: raise OrchestratorActionError("Invalid client body limit") variables = { "settings": settings, "id": site_id, "site_dir": get_site_directory_path(site_id), "client_body_limit": new_data["resource_limits"]["client_body_limit"], **new_data, } text = nginx_template.render(variables) nginx_config_path = os.path.join( settings.NGINX_CONFIG_DIRECTORY, "site-{}.conf".format(site_id) ) if os.path.exists(nginx_config_path): try: shutil.move(nginx_config_path, nginx_config_path + ".bak") except OSError as ex: raise OrchestratorActionError( "Error backing up old Nginx config: {}".format(ex) ) from ex try: with open(nginx_config_path, "w") as f_obj: f_obj.write(text) except OSError as ex: raise OrchestratorActionError("Error writing Nginx config: {}".format(ex)) from ex def disable_nginx_config(site_id: int) -> None: """Returns None on success or a message on failure.""" nginx_config_path = os.path.join( settings.NGINX_CONFIG_DIRECTORY, "site-{}.conf".format(site_id) ) if os.path.exists(nginx_config_path): try: shutil.move(nginx_config_path, nginx_config_path + ".bad") except OSError as ex: raise OrchestratorActionError( "Error moving old Nginx config out of the way: {}".format(ex) ) from ex def remove_nginx_config(site_id: int) -> None: """Returns None on success or a message on failure.""" nginx_config_path = os.path.join( settings.NGINX_CONFIG_DIRECTORY, "site-{}.conf".format(site_id) ) if os.path.exists(nginx_config_path): try: os.remove(nginx_config_path) except OSError as ex: raise OrchestratorActionError( "Error moving old Nginx config out of the way: {}".format(ex) ) from ex
python
import re import lorawanwrapper.LorawanWrapper as LorawanWrapper def formatData(data): result = "" if data is None: return result else: search = re.search('(.*)"data":"(.*?)"(.*)', data) if search is not None: #means that a PHYPayload was received result = "Parsed data: %s\n"%(LorawanWrapper.printPHYPayload(search.group(2),None)) return result
python
import sqlalchemy import sqlalchemy_utils from rentomatic.repository.postgres_objects import Base, Room # Just for development purposes. Should never store password in plain text and into GitHub setup = { "dbname": "rentomaticdb", "user": "postgres", "password": "rentomaticdb", "host": "localhost", } connection_string = ( f"postgresql+psycopg2://{setup['user']}" f":{setup['password']}@{setup['host']}/{setup['dbname']}" ) engine = sqlalchemy.create_engine(connection_string) sqlalchemy_utils.create_database(engine.url) conn = engine.connect() Base.metadata.create_all(engine) Base.metadata.bind = engine DBSession = sqlalchemy.orm.sessionmaker(bind=engine) session = DBSession() data = [ { "code": "f853578c-fc0f-4e65-81b8-566c5dffa35a", "size": 215, "price": 39, "longitude": -0.09998975, "latitude": 51.75436293, }, { "code": "fe2c3195-aeff-487a-a08f-e0bdc0ec6e9a", "size": 405, "price": 66, "longitude": 0.18228006, "latitude": 51.74640997, }, { "code": "913694c6-435a-4366-ba0d-da5334a611b2", "size": 56, "price": 60, "longitude": 0.27891577, "latitude": 51.45994069, }, { "code": "eed76e77-55c1-41ce-985d-ca49bf6c0585", "size": 93, "price": 48, "longitude": 0.33894476, "latitude": 51.39916678, }, ] for r in data: new_room = Room( code=r["code"], size=r["size"], price=r["price"], longitude=r["longitude"], latitude=r["latitude"], ) session.add(new_room) session.commit()
python
import http.server import logging from urllib.parse import urlparse logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) class DefaultHTTPRequestHandler(http.server.BaseHTTPRequestHandler): """Default HTTP Request Handler Interface class.""" def do_OPTIONS(self): """Default OPTIONS function for the Request Handler""" try: logger.debug("OPTIONS request from: {0} to {1}".format(self.client_address, self.path[1])) self._handle_OPTIONS() except Exception as ex: self.send_response(500, ex) print("Exception in DefaultHTTPRequestHandler.do_OPTIONS(): {0}".format(ex)) def do_HEAD(self): """Default HEAD function for the Request Handler""" try: logger.debug("HEAD request from: {0} to {1}".format(self.client_address, self.path[1])) self._handle_HEAD() except Exception as ex: self.send_response(500, ex) print("Exception in DefaultHTTPRequestHandler.do_HEAD(): {0}".format(ex)) def do_GET(self): """Default GET function for the Request Handler""" try: logger.debug("GET request from: {0} to {1}".format(self.client_address, self.path[1])) self._handle_GET() except Exception as ex: self.send_response(500, ex) print("Exception in DefaultHTTPRequestHandler.do_GET(): {0}".format(ex)) def do_PUT(self): """Default PUT function for the Request Handler""" try: logger.debug("PUT request from: {0} to {1}".format(self.client_address, self.path[1])) self._handle_PUT() except Exception as ex: self.send_response(500, ex) print("Exception in DefaultHTTPRequestHandler.do_PUT(): {0}".format(ex)) def do_POST(self): """Default POST function for the Request Handler""" try: logger.debug("POST request from: {0} to {1}".format(self.client_address, self.path[1])) self._handle_POST() except Exception as ex: self.send_response(500, ex) print("Exception in DefaultHTTPRequestHandler.do_POST(): {0}".format(ex)) def do_DELETE(self): """Default DELETE function for the Request Handler""" try: logger.debug("DELETE request from: {0} to {1}".format(self.client_address, self.path[1])) self._handle_DELETE() except Exception as ex: self.send_response(500, ex) print("Exception in DefaultHTTPRequestHandler.do_POST(): {0}".format(ex)) def _handle_OPTIONS(self): """Handle OPTIONS function. Override this method.""" self.send_response(501, "Not implemented") def _handle_HEAD(self): """Handle HEAD function. Override this method.""" self.send_response(501, "Not implemented") def _handle_GET(self): """Handle GET function. Override this method.""" self.send_response(501, "Not implemented") def _handle_PUT(self): """Handle PUT function. Override this method.""" self.send_response(501, "Not implemented") def _handle_POST(self): """Handle POST function. Override this method.""" self.send_response(501, "Not implemented") def _handle_DELETE(self): """Handle DELETE function. Override this method.""" self.send_response(501, "Not implemented")
python
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ """ import meep as mp import numpy as np #import scipy as sp #from scipy import optimize as op from scipy import interpolate as itp from matplotlib import pyplot as plt from multiprocessing import Pool # from mpl_toolkits.mplot3d import Axes3D import meep_objects as mpo import json import io import sys import time #from ipywidgets import IntProgress #from IPython.display import display #import csv ## useful function def convert_seconds (elapsed): minutes = np.floor(elapsed/60) secs = elapsed-minutes*60 secs = np.round(secs*100)/100 hours = np.int_(np.floor(minutes/60)) minutes = np.int_(minutes-hours*60) return f'{hours}h-{minutes}min-{secs}s' class Simulation(mp.Simulation): def __init__(self, sim_name='simulation_2D', dimensions=2, symmetries = []): self.name = sim_name self.extra_space_xy = .3 self.PML_width = .6 self._empty = True super().__init__( cell_size = mp.Vector3(1,1,0), geometry = [], sources = [], resolution = 1, boundary_layers = [], dimensions = dimensions, symmetries = symmetries, filename_prefix = sim_name, force_complex_fields = False, eps_averaging = False) @property def empty(self): return self._empty @empty.setter def empty(self,value): self._empty = value self.reset_meep() self.geometry = [] try: if self._empty : self.geometry.extend( self._empty_geometry ) else: self.geometry.extend( self._empty_geometry ) self.geometry.extend( self._geometry ) except AttributeError: raise AttributeError("cannot assign 'empty' property before initializing the geometry") def init_geometric_objects(self, eff_index_info={}, resolution=1, pattern_type='positive', cavity_parameters={}, outcoupler_parameters={}): self._geometry = [] self._empty_geometry = [] self.cavity_r_size = (cavity_parameters["D"]/2 + cavity_parameters["period"] * cavity_parameters["N_rings"]) * (cavity_parameters["N_rings"]>0) self.outcou_r_size = (outcoupler_parameters["D"]/2 + outcoupler_parameters["period"] * outcoupler_parameters["N_rings"]) * (outcoupler_parameters["N_rings"]>0) self.domain_x = self.domain_y = 2*(self.cavity_r_size + self.outcou_r_size + self.extra_space_xy) if pattern_type == 'positive': grating_index = np.real(eff_index_info["n_eff_l"]) background_index = np.real(eff_index_info["n_eff_h"]) medium_back = mpo.anysotropic_material(background_index, eff_index_info["anisotropy"], rot_angle_3=eff_index_info["tilt_anisotropy"]) medium_groove = mp.Medium(epsilon = grating_index**2 ) elif pattern_type == 'negative': grating_index = np.real(eff_index_info["n_eff_h"]) background_index = np.real(eff_index_info["n_eff_l"]) medium_groove = mpo.anysotropic_material(grating_index, eff_index_info["anisotropy"], rot_angle_3=eff_index_info["tilt_anisotropy"]) medium_back = mp.Medium(epsilon = background_index**2 ) else : raise ValueError(f'patter type "{pattern_type}" is unknown') self.default_material = medium_back if cavity_parameters["N_rings"] > 0: cavity = mpo.circular_DBR_cavity( medium_back, medium_groove, cavity_parameters["D"], cavity_parameters["period"], cavity_parameters["FF"], cavity_parameters["N_rings"], orientation = mp.Vector3(0,0,1), thickness = 0) self._geometry.extend(cavity) elif outcoupler_parameters["N_rings"] > 0: outcoupler = mpo.circular_DBR_cavity( medium_back, medium_groove, self.cavity_r_size*2 + outcoupler_parameters["D"], outcoupler_parameters["period"], outcoupler_parameters["FF"], outcoupler_parameters["N_rings"], orientation = mp.Vector3(0,0,1), thickness = 0) self._geometry.extend(outcoupler) # this will add all geometric objects to the simulation self.empty = False # resolution is 10 points per wavelength in the highest index material time a scale factor self.resolution = resolution self.name = self.name + f'_res{self.resolution}' self.filename_prefix = self.name # round domain with an integer number of grid points self.grid_step = 1/self.resolution self.cell_size = mp.Vector3(self.domain_x + 2*self.PML_width, self.domain_y + 2*self.PML_width) print(self.cell_size) # make domain an integer number of voxels Nx = int(self.cell_size.x / self.grid_step) Nx -= np.mod(Nx,2) # make even; + 1 # make odd self.cell_size.x = Nx * self.grid_step Ny = int(self.cell_size.y / self.grid_step) Ny -= np.mod(Ny,2) # make even; + 1 self.cell_size.y = Ny * self.grid_step print(self.cell_size) print() print(f"Number of voxels is ({Nx}x{Ny}) = {Nx*Ny/1e6} Mln") print(f"Minimum expected memory is {96*Nx*Ny/2**30:.2f}GB") print() self.boundary_layers = [mp.PML(self.PML_width)] # print( [self.cell_size.x / self. with open(f'{self.name}.json', 'w') as fp: data2save = {"eff_index_info": eff_index_info, "pattern_type": pattern_type, "resolution": self.resolution} if cavity_parameters["N_rings"] > 0: data2save["cavity_parameters"] = cavity_parameters if outcoupler_parameters["N_rings"] > 0: data2save["outcoupler_parameters"] = outcoupler_parameters json.dump(data2save, fp, indent=4) def init_sources_and_monitors(self, f, df, source_pos, allow_profile=False) : self.sources = [ mp.Source( src = mp.ContinuousSource(f,fwidth=0.1) if df==0 else mp.GaussianSource(f,fwidth=df), center = source_pos, size = mp.Vector3(), component = mp.Ey)] self.harminv_instance = None self.field_profile = None self.spectrum_monitors = [] if allow_profile : self.field_profile = self.add_dft_fields([mp.Ey], f, 0, 1, center = mp.Vector3(), size = mp.Vector3(self.domain_x-.5*self.extra_space_xy, 0)) #, yee_grid=True)) else: if self.cavity_r_size > 0 : DL = self.cavity_r_size + 0.02 nfreq = 1000 fluxr = mp.FluxRegion( center = mp.Vector3(DL, 0), size = mp.Vector3(0, 0), direction = mp.X) self.spectrum_monitors.append(self.add_flux(f, df, nfreq, fluxr))#, yee_grid=True)) # if not self.empty: # self.harminv_instance = mp.Harminv(mp.Ey, mp.Vector3(), f, df) #%% function for parallel computing def run_parallel(wavelength, n_eff_h, n_eff_l, D, DBR_period, empty=False, source_pos=0, anisotropy = 0, tilt_anisotropy = 0): # import meep as mp c0 = 1 # wavelength = 0.590 wwidth = 0.25 f=c0/wavelength sim_end=500 fmax=c0/(wavelength-wwidth/2) fmin=c0/(wavelength+wwidth/2) df=fmax-fmin pattern_type = 'positive' t0 = time.time() cavity_parameters = { "D": D, "FF": .5, "period": DBR_period, "N_rings": 30} outcoupler_parameters = { "type": 'spiral', "D": 1, "FF": .5, "period": DBR_period * 2, "N_rings": 0, "N_arms": 0} eff_index_info = { "n_eff_h" : n_eff_h, "n_eff_l" : n_eff_l, "anisotropy" : anisotropy, "tilt_anisotropy" : tilt_anisotropy} t0 = time.time() date = time.strftime('%y%m%d-%H%M%S')#'211001-121139'# if len(sys.argv) > 1: sim_prefix = f"{sys.argv[1]}" else: sim_prefix = f"{date}" sim_name = "2D_eff_index_" sim_name += "cavity_" if cavity_parameters["N_rings"] > 0 else "" sim_name += "and_outcoupler_" if outcoupler_parameters["N_rings"] > 0 else "" sim_name += f"{sim_prefix}_" sim_name += f"D{D*1e3:.0f}_src{source_pos*1e3:.0f}" sim = Simulation(sim_name,symmetries=[mp.Mirror(mp.X), mp.Mirror(mp.Y,phase=-1) ])#mp.Mirror(mp.Y,phase=-1)])# sim.extra_space_xy += wavelength/n_eff_l sim.eps_averaging = False sim.init_geometric_objects( eff_index_info = eff_index_info, resolution = 100, pattern_type = pattern_type, cavity_parameters = cavity_parameters, outcoupler_parameters = outcoupler_parameters) if empty: sim.empty = True sim.name += '_empty' else: sim.empty = False sim.init_sources_and_monitors(f, df, source_pos=mp.Vector3(x=source_pos, y=1e-3), allow_profile=False) sim.init_sim() # fig = plt.figure(dpi=150, figsize=(10,10)) # plot = sim.plot2D(eps_parameters={"interpolation":'none'}) # fig.colorbar(plot.images[0]) # # plt.show() # fig.savefig(f'{sim.name}-xy.jpg') # plt.close() # raise Exception() # mp.verbosity(0) sim.run(until=sim_end) print(f'\n\nSimulation took {convert_seconds(time.time()-t0)} to run\n') t = np.round(sim.round_time(), 2) data2save = {} if sim.harminv_instance != None : resonances_Q = [] resonances_f = [] for mode in sim.harminv_instance.modes : if np.abs(mode.Q) > 100 : resonances_Q.append(np.abs(mode.Q)) resonances_f.append(mode.freq) resonances_Q = np.array(resonances_Q) resonances_f = np.array(resonances_f) sorting = np.argsort(resonances_Q) resonances_Q = resonances_Q[sorting[::-1]] resonances_f = resonances_f[sorting[::-1]] N_resonances = len(resonances_f) resonance_table = [] for l in range(N_resonances): resonance_table.append([np.round(1/resonances_f[l]*1e3, 1), int(resonances_Q[l])] ) if N_resonances == 0 : resonance_table.append([ 0, 0 ]) print() print(resonance_table) print() # with open(f'{sim.name}_output.json', 'a') as fp: # data2save = {f"resonance_table_t{t}": resonance_table} # json.dump(data2save, fp, indent=4) data2save = {f"resonance_table_t{t}": resonance_table} if sim.field_profile != None: for j in range(sim.field_profile.nfreqs): data2save[f"field_profile_Ey_{j}"] = sim.get_dft_array(sim.field_profile, mp.Ey, j) data2save["field_profile_Eps"] = sim.get_array(mp.Dielectric, center = sim.field_profile.regions[0].center, size = sim.field_profile.regions[0].size) (x, _, _, _) = sim.get_array_metadata(center = sim.field_profile.regions[0].center, size = sim.field_profile.regions[0].size) data2save["field_profile_x"] = x spectra = [] for monitor in sim.spectrum_monitors : spectrum_f = np.array(mp.get_flux_freqs(monitor)) spectra.append(np.array(mp.get_fluxes(monitor))) if len(spectra) > 0 : data2save["wavelength"] = 1/spectrum_f*1e3 data2save["spectra"] = spectra if len(data2save) > 0: mpo.savemat(f'{sim.name}_spectra_t{t}.mat', data2save) return data2save, sim.name #%% geometry and simulation parameters if __name__ == "__main__": # good practise in parallel computing anisotropy = 0 wavelength = .600# 0.5703#.6088#.5703#.5884#.5893#0.5947#0.5893#.5922, ] n_eff_l = 1 n_eff_hs = [1.1, 1.14, 1.17] #np.linspace(1.01,1.2,100) # [1.1]#1.0543, 1.0985, 1.1405] # 50 75 and 100 nm pmma thickness period = .280 #round(wavelength/(n_eff_l+n_eff_h),3 ) Ds = period * np.array([0.45])#np.linspace(0, 3, 100) #np.array([0, 0.45, 1, 1.5, 2.36])#0.45, 0.9, 2.36])# # crete input vector for parallell pool. It has to be a list of tuples, # where each element of the list represent one iteration and thus the # element of the tuple represent the inputs. empty = True tuple_list = [ (wavelength, n_eff_hs[0], n_eff_l, Ds[-1], period, empty, 0, anisotropy, 0 )] empty = False j = 1 for source_pos in [0]: # 0, period/4, period/2]: for n_eff_h in n_eff_hs : for D in Ds: tuple_list.append( (wavelength, n_eff_h, n_eff_l, D, period, empty, source_pos, anisotropy, 0 ) ) j += 1 mp.verbosity(1) # mp.quiet(True) output = [] names = [] t0 = time.time() try: from mpi4py import MPI except: non_parallel_conda = True else: non_parallel_conda = False if len(sys.argv) > 2: if sys.argv[2] == "parallel_grid": non_parallel_conda = True else: bash_parallel_run = (sys.argv[2] == "parallel_bash") if len(sys.argv) < 2 or non_parallel_conda : for i in range(j): t1 = time.time() # print(tuple_list[i]) data, name = run_parallel(*tuple_list[i]) output.append(data) names.append(name) print(f'It has run for {convert_seconds(time.time()-t1)}, {i+1}/{j}') print(f'It will take roughly {convert_seconds((time.time()-t0)/(i+1)*(j-i-1))} more') print() print() elif bash_parallel_run : N_jobs = int(sys.argv[-1]) j = int(sys.argv[3]) N_list = len(tuple_list) if N_list < N_jobs : raise ValueError(f"Number of jobs should be lower than number of loop iterations ({N_list})") reminder = np.mod(N_list,N_jobs) N_loops_per_job = int(N_list/N_jobs) if j < reminder: N_loops_per_job += 1 data_list = [] name_list = [] for i in range(N_loops_per_job): t1 = time.time() if j < reminder: tuple_index = j*N_loops_per_job + i else: tuple_index = reminder*(N_loops_per_job+1) + (j-reminder)*N_loops_per_job + i if tuple_index >= N_list : continue data, name = run_parallel(*tuple_list[tuple_index]) # data_list.append(data) # name_list.append(name) print(f'It has run for {convert_seconds(time.time()-t1)}, {i+1}/{N_loops_per_job}') print(f'It will take roughly {convert_seconds((time.time()-t0)/(i+1)*(N_loops_per_job-i-1))} more') else: # mp.reset_meep() comm = MPI.COMM_WORLD N_jobs = int(sys.argv[-1]) print(f'number of processor is {mp.count_processors()}') j = mp.divide_parallel_processes(N_jobs) N_list = len(tuple_list) if N_list < N_jobs : raise ValueError(f"Number of jobs should be lower than number of loop iterations ({N_list})") reminder = np.mod(N_list,N_jobs) N_loops_per_job = int(N_list/N_jobs) if j < reminder: N_loops_per_job += 1 data_list = [] name_list = [] for i in range(N_loops_per_job): t1 = time.time() if j < reminder: tuple_index = j*N_loops_per_job + i else: tuple_index = reminder*(N_loops_per_job+1) + (j-reminder)*N_loops_per_job + i if tuple_index >= N_list : continue data, name = run_parallel(*tuple_list[tuple_index]) # data_list.append(data) # name_list.append(name) print(f'It has run for {convert_seconds(time.time()-t1)}, {i+1}/{N_loops_per_job}') print(f'It will take roughly {convert_seconds((time.time()-t0)/(i+1)*(N_loops_per_job-i-1))} more') # if mp.am_really_master(): # output.extend(data_list) # names.extend(name_list) # for src in range(1, N_jobs): # output.extend( comm.recv(source=src, tag=11) ) # names.extend ( comm.recv(source=src, tag=12) ) # # comm.recv(source=src, tag=11) # # comm.recv(source=src, tag=12) # else: # comm.send(data_list, dest=0, tag=11) # comm.send(name_list, dest=0, tag=12) # exit() print(f'Total took {convert_seconds(time.time()-t0)}') #%% # plt.figure() # wv = output[0]["wavelength"] # s0 = output[0]["spectra"][0] # s1 = output[1]["spectra"][0]/s0 # s2 = output[2]["spectra"][0]/s0 # s3 = output[3]["spectra"][0]/s0 # plt.semilogy(wv, s1, wv, s2, wv, s3) # plt.grid(True) # plt.xlabel("wavelength")
python
from functools import partial from ..experiment_base import ExperimentBase from ...models.linear import Linear_S, Linear_M, Linear_L from ..training_args import LMMixupArgs from ...data_loaders.json_loader import JsonLoader from ...utils.label_convertors import convert2vec class ExperimentLinearGinFPNSNoPartial(ExperimentBase): def load_data(self): data_loader = JsonLoader(self.data_path, rand_seed=self.rand_seed) x_train, y_train, x_test, y_test = data_loader.load_data( ratio=0.7, shuffle=True ) convert2vec_float = partial(convert2vec, dtype=float) x_train, y_train, x_test, y_test = list( map(convert2vec_float, [x_train, y_train, x_test, y_test]) ) if self.mixup is not None: x_train, y_train = self._mixup(x_train, y_train) x_unlabeled, _ = data_loader.load_unlabeled() return x_train, y_train, x_test, y_test, x_unlabeled def run_experiment(self): # load training and testing data x_train, y_train, x_test, y_test, x_unlabeled = self.load_data() # open log log_f, log_path = self.open_log_(self.log_path) # train the teacher model trained_model, histories = self.train_teacher( model=Linear_S, x_train=x_train, y_train=y_train, x_test=x_test, y_test=y_test, x_pred=x_unlabeled, batch_size=self.batch_size, epochs=self.epochs, log_f=log_f, log_path=log_path, n_repeat=self.n_repeat, activation="sigmoid", loss="binary_crossentropy", out_len=12, ) # log results self.log_training(trained_model, histories, log_path) # train student models for student in [Linear_M, Linear_L]: trained_model, histories = self.train_student( student_model=student, teacher_model=trained_model, x_train=x_train, y_train=y_train, x_test=x_test, y_test=y_test, x_pred=x_unlabeled, batch_size=self.batch_size, epochs=self.epochs, log_f=log_f, log_path=log_path, n_repeat=self.n_repeat, activation="sigmoid", loss="binary_crossentropy", out_len=12, ) # log results self.log_training(trained_model, histories, log_path) log_f.write("best losses:\n {}\n".format(str(self.best_loss))) log_f.write("best accuracies:\n {}\n".format(str(self.best_acc))) log_f.close() self.log_predictions(trained_model, x_test, y_test, log_path) if __name__ == "__main__": parser = LMMixupArgs() args = parser.parse_args() experiment = ExperimentLinearGinFPNSNoPartial( data_path=args.data_path, log_path=args.log_path, es_patience=args.es_patience, batch_size=args.batch_size, epochs=args.epochs, n_repeat=args.repeat, mixup=args.mixup, mixup_repeat=args.mixup_repeat, learning_rate=args.learning_rate, rand_seed=args.rand_seed, drop_rate=args.drop_rate, ) experiment.run_experiment()
python
import copy import rdtest import renderdoc as rd class VK_Vertex_Attr_Zoo(rdtest.TestCase): demos_test_name = 'VK_Vertex_Attr_Zoo' def check_capture(self): draw = self.find_draw("Draw") self.check(draw is not None) self.controller.SetFrameEvent(draw.eventId, False) # Make an output so we can pick pixels out: rd.ReplayOutput = self.controller.CreateOutput(rd.CreateHeadlessWindowingData(100, 100), rd.ReplayOutputType.Texture) self.check(out is not None) ref = { 0: { 'SNorm': [1.0, -1.0, 1.0, -1.0], 'UNorm': [12345.0/65535.0, 6789.0/65535.0, 1234.0/65535.0, 567.0/65535.0], 'UScaled': [12345.0, 6789.0, 1234.0, 567.0], 'UInt': [12345, 6789, 1234, 567], 'Double': [9.8765432109, -5.6789012345], 'Array[0]': [1.0, 2.0], 'Array[1]': [3.0, 4.0], 'Matrix:row0': [7.0, 8.0], 'Matrix:row1': [9.0, 10.0], }, 1: { 'SNorm': [32766.0/32767.0, -32766.0/32767.0, 16000.0/32767.0, -16000.0/32767.0], 'UNorm': [56.0/65535.0, 7890.0/65535.0, 123.0/65535.0, 4567.0/65535.0], 'UScaled': [56.0, 7890.0, 123.0, 4567.0], 'UInt': [56, 7890, 123, 4567], 'Double': [-7.89012345678, 6.54321098765], 'Array[0]': [11.0, 12.0], 'Array[1]': [13.0, 14.0], 'Matrix:row0': [17.0, 18.0], 'Matrix:row1': [19.0, 20.0], }, 2: { 'SNorm': [5.0/32767.0, -5.0/32767.0, 0.0, 0.0], 'UNorm': [8765.0/65535.0, 43210.0/65535.0, 987.0/65535.0, 65432.0/65535.0], 'UScaled': [8765.0, 43210.0, 987.0, 65432.0], 'UInt': [8765, 43210, 987, 65432], 'Double': [0.1234567890123, 4.5678901234], 'Array[0]': [21.0, 22.0], 'Array[1]': [23.0, 24.0], 'Matrix:row0': [27.0, 28.0], 'Matrix:row1': [29.0, 30.0], }, } # Copy the ref values and prepend 'In' in_ref = {} for idx in ref: in_ref[idx] = {} for key in ref[idx]: in_ref[idx]['In' + key] = ref[idx][key] # Copy the ref values and prepend 'Out' out_ref = {} for idx in ref: out_ref[idx] = {} for key in ref[idx]: out_ref[idx]['Out' + key] = ref[idx][key] vsout_ref = copy.deepcopy(out_ref) gsout_ref = out_ref vsout_ref[0]['gl_PerVertex.gl_Position'] = [-0.5, 0.5, 0.0, 1.0] gsout_ref[0]['gl_PerVertex.gl_Position'] = [0.5, -0.5, 0.4, 1.2] vsout_ref[1]['gl_PerVertex.gl_Position'] = [0.0, -0.5, 0.0, 1.0] gsout_ref[1]['gl_PerVertex.gl_Position'] = [-0.5, 0.0, 0.4, 1.2] vsout_ref[2]['gl_PerVertex.gl_Position'] = [0.5, 0.5, 0.0, 1.0] gsout_ref[2]['gl_PerVertex.gl_Position'] = [0.5, 0.5, 0.4, 1.2] self.check_mesh_data(in_ref, self.get_vsin(draw)) rdtest.log.success("Vertex input data is as expected") self.check_mesh_data(vsout_ref, self.get_postvs(rd.MeshDataStage.VSOut)) rdtest.log.success("Vertex output data is as expected") # This is optional to account for drivers without XFB postgs_data = self.get_postvs(rd.MeshDataStage.GSOut) if len(postgs_data) > 0: self.check_mesh_data(gsout_ref, postgs_data) rdtest.log.success("Geometry output data is as expected") else: rdtest.log.print("Geometry output not tested") pipe: rd.PipeState = self.controller.GetPipelineState() tex = rd.TextureDisplay() tex.resourceId = pipe.GetOutputTargets()[0].resourceId out.SetTextureDisplay(tex) texdetails = self.get_texture(tex.resourceId) picked: rd.PixelValue = out.PickPixel(tex.resourceId, False, int(texdetails.width / 2), int(texdetails.height / 2), 0, 0, 0) if not rdtest.value_compare(picked.floatValue, [0.0, 1.0, 0.0, 1.0]): raise rdtest.TestFailureException("Picked value {} doesn't match expectation".format(picked.floatValue)) rdtest.log.success("Triangle picked value is as expected") # Step to the next draw with awkward struct/array outputs self.controller.SetFrameEvent(draw.next.eventId, False) ref = { 0: { 'outData.outStruct.a': [1.1], 'outData.outStruct.b[0]': [2.2], 'outData.outStruct.b[1]': [3.3], 'outData.outStruct.c.foo[0]': [4.4], 'outData.outStruct.c.foo[1]': [5.5], 'outData.outStruct.d[0].foo': [6.6], 'outData.outStruct.d[1].foo': [7.7], }, } self.check_mesh_data(ref, self.get_postvs(rd.MeshDataStage.VSOut)) rdtest.log.success("Nested vertex output data is as expected") # The array-of-structs data is a broken in transform feedback del ref[0]['outData.outStruct.d[0].foo'] del ref[0]['outData.outStruct.d[1].foo'] self.check_mesh_data(ref, self.get_postvs(rd.MeshDataStage.GSOut)) rdtest.log.success("Nested geometry output data is as expected") out.Shutdown()
python
#!/usr/bin/python ####################################################### # Copyright (c) 2019 Intel Corporation. All rights reserved. # # GNU General Public License v3.0+ # (see LICENSE.GPL or https://www.gnu.org/licenses/gpl-3.0.txt) # # Authors: # - Marco Chiappero - <[email protected]> ####################################################### from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = { 'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community' } DOCUMENTATION = ''' --- module: rsd_compose short_description: Manages the life cycle of Rack Scale Design composed resources description: - Allocate/Assemble/Destroy Rack Scale Design Composed Nodes. - Non-absent nodes may be called "composed" within this source file, i.e., nodes that are either allocated or assembled but the distinction wouldn't matter under the context. version_added: "2.6" author: - Marco Chiappero options: id: description: - Specify the node to act on by specifying 'type' for type of identification key and 'value' for its value. required: false suboptions: type: type: str description: - Specify type of identification. For best performance it is suggested to use identity as type. required: false default: identity choices: [identity, uuid, name] value: type: str description: - Identification signature. required: true spec: description: - Enumerate the desired resources for allocating or assembling a node. It's incompatible with I(id) and I(specfile). Each sub-option can express PODM API compliant specifications in either JSON or YAML format. Refer to the PODM API specification for a complete list of available options. required: false suboptions: name: type: str description: - Name for the composed node description: type: str description: - Description of the composed node processors: type: json description: - List of processors and related requirements memory: type: json description: - List of memory modules and related requirements local_drives: type: json description: - A list of local drives and related requirements remote_drives: type: json description: - A list of remote drives and related requirements eth_ifaces: type: json description: - A list of ethernet interfaces and related requirements security: type: dict description: - Security specifications # oem: # type: dict # description: # - OEM specific features total_cores: type: int description: - Total core count for the whole composed node total_memory: type: int description: - Total memory amount for the whole composed node specfile: description: - Execute this task even if it requires deleting a Composed Node - This option is mutually exclusive with I(spec) and I(id) type: path aliases: - 'file' state: description: - Assert the desired state for the composed node, whether such node is described by I(spec), I(specfile) or I(id). I(state=allocated) will try to allocate a node as described in I(spec) or I(specfile). I(state=assembled) will try to first allocate and then assemble a node as described in I(spec) or I(specfile), or assemble a pre-allocated node referenced by I(id). An existing allocated or assembled node specified by I(id) can be decomposed and its resources released by requesting I(state=absent). choices: [allocated, assembled, absent] default: assembled required: false extends_documentation_fragment: - rsd requirements: - enum34 or Python >= 3.4 notes: - Due to the nature of the PODM API, check mode cannot be supported - For the same reason the module is not idempotent at the moment, since any result depends on decisions actually made by PODM - While modules should not require that a user know all the underlying options of an API/tool to be used, PODM API contains multiple nested levels that would be difficult to capture anyway. Moreover the API is still under heavy development and the use of a 'catch-all' spec/specfile option, promotes forward compatibility while delegating up-to-date value checking to rsd-lib. ''' EXAMPLES = ''' --- - name: Allocate a node with the provided specs rsd_compose: spec: processors: - ProcessorType: CPU AchievableSpeedMHz: 3000 - ProcessorType: FPGA Connectivity: RemotePCIe local_drives: - Type: SSD remote_drives: - CapacityGiB: 60 Protocol: iScsi - CapacityGiB: 80 Protcol: NVMeOverFabrics state: allocated register: result - name: Assemble the allocated node rsd_compose: id: value: result.node.Id state: assembled - name: Delete the previously assembled rsd_compose: id: value: result.node.Id state: absent - name: Assemble a node from spec file rsd_compose: specfile: /path/to/my_node_spec.json podm: host: 192.168.0.1 port: 12345 - name: Allocate a node using JSON formatted specs rsd_compose: spec: processors: [{ ProcessorType: CPU, AchievableSpeedMHz: 3000 }] remote_drives: [{ CapacityGiB: 60, Protocol: iScsi }] state: allocated ''' RETURN = ''' --- # request: # description: In the case of a composition request, provide the full spec # as provided by the user via either I(spec) or I(specfile) # returned: On success # type: complex node: description: Complete description of the node returned: On success type: complex contains: Id: description: Composed node ID Name: description: Packages that are installed but at bad versions. Description: description: Description associated with the node UUID: description: The resource UUID assigned by PODM PowerState: description: Current power state ComposedNodeState: description: State of the composed node sample: "Id": "Node1" "Name": "Composed Node" "Description": "Node #1" "UUID": "00000000-0000-0000-0000-000000000000" "PowerState": "On" "ComposedNodeState": "Allocated" ''' from enum import Enum from enum import unique from time import sleep import os.path import json from ansible.module_utils.rsd_common import RSD try: import rsd_lib import sushy import jsonschema except ImportError: pass class RsdNodeCompose(RSD): @unique class STATE(Enum): ABSENT = 'absent' ALLOCATING = 'allocating' ALLOCATED = 'allocated' ASSEMBLING = 'assembling' ASSEMBLED = 'assembled' FAILED = 'failed' @classmethod def allowed_module_args(cls): return ( cls.ABSENT.value, cls.ALLOCATED.value, cls.ASSEMBLED.value ) @classmethod def allowed_for_deletion(cls): return ( cls.ALLOCATED, cls.ASSEMBLED, cls.FAILED ) @classmethod def transition_states(cls): return ( cls.ALLOCATING, cls.ASSEMBLING ) @staticmethod def of(node): return RsdNodeCompose.STATE(node.composed_node_state.lower()) def __init__(self): required_if = [ ['state', 'absent', ['id']], ['state', 'allocated', ['spec', 'specfile'], True], ['state', 'assembled', ['spec', 'specfile', 'id'], True], ] mutually_exclusive = [ ['id', 'spec', 'specfile'] ] required_one_of = [ ['id', 'spec', 'specfile'] ] argument_spec = dict( id=dict( type='dict', required=False, options=dict( type=dict( type='str', required=False, choices=['name', 'identity', 'uuid'], default='identity' ), value=dict( type='str', required=True ) ) ), spec=dict( type='dict', required=False, options=dict( name=dict(type='str', required=False), description=dict(type='str', required=False), processors=dict(type='json', required=False), memory=dict(type='json', required=False), local_drives=dict(type='json', required=False), remote_drives=dict(type='json', required=False), eth_ifaces=dict(type='json', required=False), security=dict(type='dict', required=False), # oem=dict(type='dict', required=False), total_cores=dict(type='int', required=False), total_mem=dict(type='int', required=False), # performance=() #SupportedPerformanceConfigurations ), ), specfile=dict( type='path', aliases=['file'], required=False ), state=dict( default=self.STATE.ASSEMBLED.value, choices=self.STATE.allowed_module_args(), required=False ), ) super(RsdNodeCompose, self).__init__( argument_spec, required_one_of=required_one_of, required_if=required_if, mutually_exclusive=mutually_exclusive, supports_check_mode=False) def _wait_for_state_transition(self, node, wait_time=0.5, retries=60): if not node: raise ValueError("Cannot wait on node transition without a node") while retries > 0: sleep(wait_time) node.refresh() state = self.STATE.of(node) if state in self.STATE.transition_states(): retries -= 1 else: break return state def _delete_node(self, node): if not node: # Nothing to delete, no changes self.module.exit_json(changed=False, msg="Node already absent") state = self.STATE.of(node) self.module.debug( "Trying to delete node '{0}' from state '{1}'".format( node.identity, state.value)) if state in self.STATE.transition_states(): state = self._wait_for_state_transition(node) if state in self.STATE.allowed_for_deletion(): node.delete_node() self.module.exit_json(changed=True, msg="Node deleted") else: self.module.fail_json( msg="Cannot delete node in '{0}' state".format(state.value)) def _parse_node_specfile(self): podm_file_mappings = [ ('Name', 'name', False), ('Description', 'description', False), ('Processors', 'processor_req', False), ('Memory', 'memory_req', False), ('RemoteDrives', 'remote_drive_req', False), ('LocalDrives', 'local_drive_req', False), ('EthernetInterfaces', 'ethernet_interface_req', False), ('Security', 'security_req', False), # ('OEM', '', False), ('TotalSystemCoreCount', 'total_system_core_req', False), ('TotalSystemMemoryMiB', 'total_system_memory_req', False), # ('SupportedPerformanceConfigurations', '', False) ] filename = self.module.params.get('specfile', None) if not filename: return if not filename.endswith(".json"): raise ValueError("File must end with .json extension") with open(filename, 'r') as f: spec = json.load(f) return self._translate_request(spec, podm_file_mappings) def _parse_node_spec(self): module_arg_mappings = [ ('name', 'name', False), ('description', 'description', False), ('processors', 'processor_req', True), ('memory', 'memory_req', True), ('remote_drives', 'remote_drive_req', True), ('local_drives', 'local_drive_req', True), ('eth_ifaces', 'ethernet_interface_req', True), ('security', 'security_req', False), # ('oem', '', False), ('total_cores', 'total_system_core_req', False), ('total_mem', 'total_system_memory_req', False), # ('SupportedPerformanceConfigurations', '', True), ] spec = self.module.params.get('spec', None) return self._translate_request(spec, module_arg_mappings) def _translate_request(self, spec, mappings): if not spec: raise ValueError("Missing node spec to perform transtation") if not isinstance(spec, dict): raise TypeError("Node specifications must be a dictionary") if not mappings: raise ValueError("Missing node mappings to perform translation") to_translate = spec.copy() # no need for a deep copy translated = dict() for (podm_opt, lib_opt, decode) in mappings: value = to_translate.pop(podm_opt, None) if value: if decode: translated[lib_opt] = json.loads(value) else: translated[lib_opt] = value if to_translate: self.module.fail_json(msg="Invalid, unsupported or duplicated " "values in spec: {0}".format(to_translate)) self.module.debug("rsd-lib node spec {0}".format(translated)) return translated def _allocate_node(self): spec = self._parse_node_specfile() if not spec: spec = self._parse_node_spec() if not spec: self.module.fail_json(msg="Unable to parse node specs") return self._do_allocate_node(spec) def _do_allocate_node(self, spec): nodes = self.rsd.get_node_collection() try: node_uri = nodes.compose_node(**spec) except sushy.exceptions.HTTPError as e: self.module.fail_json( msg="Failed to allocate node: {0}".format(str(e))) except jsonschema.exceptions.ValidationError as e: self.module.fail_json( msg="Invalid spec formatting or value: {0}".format(str(e))) node_id = os.path.split(node_uri)[-1] node = self.rsd.get_node(node_uri) state = self._wait_for_state_transition(node) if state is not self.STATE.ALLOCATED: self.module.fail_json( msg="Failed to allocate node '{0}'".format(node_id)) self.module.debug("Allocated new node with id '{0}'".format(node_id)) return node def _assemble_node(self, node): if not node: raise ValueError("No node provided to assemble") state = self.STATE.of(node) self.module.debug( "Trying to assemble node '{0}' from state {1}".format( node.identity, state.value)) if state in self.STATE.transition_states(): state = self._wait_for_state_transition(node) if state is self.STATE.ALLOCATED: self._do_assemble_node(node) self._return_ok_node_response(node, True) elif state is self.STATE.ASSEMBLED: # Already in the desired state, nothing to do self._return_ok_node_response(node, False) elif state is self.STATE.FAILED: self.module.fail_json( msg="Cannot assemble node in 'Failed' state") else: self.module.fail_json( msg="Cannot assemble node '{0}' from state '{1}'".format( node.identity, state.value)) def _do_assemble_node(self, node): node.assemble_node() state = self._wait_for_state_transition(node) if state is self.STATE.ASSEMBLED: self.module.debug( "Node '{0}' now in Assembled state".format(node.identity)) elif state is self.STATE.FAILED: self.module.fail_json( msg="Failed to assemble node '{0}'".format(node.identity)) else: self.module.fail_json( msg="Node '{0}' is in state '{1}', cannot assemble".format( node.identity, state)) def _get_node_links_info(self, node): info = dict() system = self.rsd.get_system(node.links.computer_system) info["System"] = { "Name": system.name, "Description": system.description, "Id": system.identity, "ProcessorSummary": { "Count": system.processor_summary.count, "Model": system.processor_summary.model }, "TotalSystemMemoryGiB": system.memory_summary.total_system_memory_gib } # Waiting for bug fix in rsd-lib # ifaces = [] # info["Interfaces"] = ifaces # iface_ids = node.links.ethernet_interfaces # (tuple of URIs/IDs) # for iface_id in iface_ids: # iface = system.ethernet_interfaces.get_member(iface_id) # ifaces.append({ # "Name": iface.name, # "Description": iface.description, # "Id": iface.identity, # "MACAddress": iface.mac_address, # "IPv4Addresses": [a.address for a in iface.ipv4_addresses], # "IPv6Addresses": [a.address for a in iface.ipv6_addresses], # }) info["Drives"] = dict() local_drives = [] info["Drives"]["Local"] = local_drives local_drive_ids = node.links.local_drives for drive_id in local_drive_ids or []: pass # drive = system.storage().get_member(drive_id) # local_drives.append({ # # }) remote_drives = [] info["Drives"]["Remote"] = remote_drives remote_drive_ids = node.links.remote_drives for drive_id in remote_drive_ids or []: pass # drive = rsd.get_storage_service() return info def _return_ok_node_response(self, node, changed): if not node: raise ValueError("No node provided to return") if not node.uuid: self.module.fail_json(msg="There is no UUID. Failure.") node_desc = dict() node_desc["Id"] = node.identity node_desc["Name"] = node.name node_desc["Description"] = node.description node_desc["UUID"] = node.uuid node_desc["ComposedNodeState"] = node.composed_node_state node_desc["PowerState"] = node.power_state node_desc["Status"] = { 'State': node.status.state, 'Health': node.status.health, # 'HealthRollup': node.status.health_rollup } # node_desc["Boot"] = { # 'BootSourceOverrideEnabled': node.boot.enabled, # 'BootSourceOverrideMode': node.boot.mode, # 'BootSourceOverrideTarget': node.boot.target # } node_desc['Details'] = self._get_node_links_info(node) self.module.exit_json(changed=changed, node=node_desc) ############################################################################### def _delete_existing_node(self): self.module.debug("Request to delete an existing node") node = self._get_node() self._delete_node(node) def _assemble_allocated_node(self): self.module.debug("Request to assemble an existing node") node = self._get_node() self._assemble_node(node) def _allocate_new_node(self): self.module.debug("Request to allocate a new node") node = self._allocate_node() self._return_ok_node_response(node, True) def _allocate_and_assemble_new_node(self): self.module.debug("Request to assemble a new node") node = self._allocate_node() self._assemble_node(node) def run(self): id = self.module.params.get('id', None) requested_state = self.STATE(self.module.params['state']) if id and requested_state is self.STATE.ABSENT: self._delete_existing_node() elif id and requested_state is self.STATE.ASSEMBLED: self._assemble_allocated_node() elif requested_state is self.STATE.ALLOCATED: self._allocate_new_node() elif requested_state is self.STATE.ASSEMBLED: self._allocate_and_assemble_new_node() else: self.module.fail_json(msg="Invalid options for the module") def main(): compose = RsdNodeCompose() compose.run() if __name__ == '__main__': main()
python
r = float(input()) print("A=%.4f" % (3.14159 * (r ** 2)))
python
# Copyright 2020 Unibg Seclab (https://seclab.unibg.it) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import functools def get_validation_function(K, L): if K and L: return functools.partial(is_k_l_valid, K=K, L=L) elif K: return functools.partial(is_k_anonymous, K=K) elif L: return functools.partial(is_l_diverse, L=L) else: raise AttributeError("Both K and L parameters not given or equal to zero.") # Functions to evaluate if a partition is valid def is_k_anonymous(df, partition, sensitive_columns, K): """Check if the number of values of a columns is k-numerous.""" return len(partition) >= K def is_l_diverse(df, partition, sensitive_columns, L): """Check if a partition is l-diverse.""" # Low performance solution # nunique = df.loc[partition, sensitive_columns].nunique() # return (nunique >= L).all() for column in sensitive_columns: if df[column][partition].nunique() < L: return False return True def is_k_l_valid(df, partition, sensitive_columns, K, L): """Check if a partition is both k-anonymous and l-diverse.""" return is_k_anonymous(df, partition, sensitive_columns, K) \ and is_l_diverse(df, partition, sensitive_columns, L)
python
import play import tactics.line_up import behavior_sequence import tools.sleep import robocup import constants import time import enum class Brain(play.Play): # initialize constants, etc. def __init__(self): # not sure if we need this super().__init__(continuous=True) class State(enum.Enum): waiting = 0 dummy = 0
python
from .na_syndra_top import * from .na_syndra_jng import * from .na_syndra_mid import * from .na_syndra_bot import * from .na_syndra_sup import *
python
import os __version__ = 'v0.0.8' # update also in setup.py root_dir = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) info = { "name": "NiLabels", "version": __version__, "description": "", "repository": { "type": "git", "url": "" }, "author": "Sebastiano Ferraris", "dependencies": { # requirements.txt automatically generated using pipreqs "python requirements" : "{0}/requirements.txt".format(root_dir) } } definition_template = """ A template is the average, computed with a chose protocol, of a series of images acquisition of the same anatomy, or in genreral of different objects that share common features. """ definition_atlas = """ An atlas is the segmentation of the template, obtained averaging with a chosen protocol, the series of segmentations corresponding to the series of images acquisition that generates the template. """ definition_label = """ A segmentation assigns each region a label, and labels are represented as subset of voxel with the same positive integer value. """ nomenclature_conventions = """ pfi_xxx = path to file xxx, \npfo_xxx = path to folder xxx, \nin_xxx = input data structure xxx, \nout_xxx = output data structure xxx, \nz_ : prefix to temporary files and folders, \nfin_ : file name. """
python
from random import randint from time import sleep itens = ('pedra', 'papel', 'tesoura') computador = randint(0, 2) print('''Suas opções: [0] Pedra [1] Papel [2] Tesoura''') jogador = int(input('Qual é a sua jogada? ')) print('JO') sleep(1) print('KEN') sleep(1) print('PO =!!!') print('-=' * 12) print('O computador jogou {} '.format(itens[computador])) print('O jogador jogou {} '.format(itens[jogador])) print('-=' * 12) if computador == 0: # computador jogou pedra if jogador == 0: print("EMPATE") elif jogador == 1: print('JOGADOR VENCOU! ') elif jogador == 2: print('COMPUTADOR VENCEU!') else: print('JOGADA INVALIDA !') elif computador == 1: # computador jogou papel if jogador == 0: print('COMPUTADOR VENCEU!') elif jogador == 1: print('EMPATE!') elif jogador ==2: print('JOGADOR VENCEU!') else: print('JOGADA INVALIDA ! ') elif computador == 2: # computador jogou tesoura if jogador == 0: print('JOGADOR VENCEU!') elif jogador == 1: print('COMPUTADOR VENCEU!') elif jogador == 2: print('EMPATE!') else: print('JOGADA INVALIDA ! ')
python
"""683. Word Break III """ class Solution: """ @param s: A string @param dict: A set of word @return: the number of possible sentences. """ def wordBreak3(self, s, dict): # Write your code here ## Practice: lower_dict = set() for word in dict: lower_dict.add(word.lower()) memo = {} s = s.lower() return self.dfs(0, s, lower_dict, memo) def dfs(self, idx, s, dict, memo): if idx == len(s): return 1 res = 0 for i in range(idx, len(s)): prefix = s[idx: i + 1] if prefix not in dict: continue res += self.dfs(i + 1, s, dict, memo) memo[s[idx:]] = res return res ##### lower_dict = set() for word in dict: lower_dict.add(word.lower()) s = s.lower() return self.dfs(s, 0, lower_dict, {}) def dfs(self, s, index, dict, memo): if index == len(s): return 1 if s[index:] in memo: return memo[s[index:]] res = 0 for i in range(index, len(s)): prefix = s[index :i + 1] if prefix not in dict: continue res += self.dfs(s, i + 1, dict, memo) memo[s[index:]] = res return res
python
#!/bin/python3 import math import os import random import re import sys from collections import deque, defaultdict # Complete the findShortest function below. # # For the weighted graph, <name>: # # 1. The number of nodes is <name>_nodes. # 2. The number of edges is <name>_edges. # 3. An edge exists between <name>_from[i] to <name>_to[i]. # # def findShortest(graph_nodes, graph_from, graph_to, ids, val): # solve here maps = defaultdict(list) print(maps) colour = defaultdict(int) for i in range(len(graph_from)): maps[graph_from[i]].append(graph_to[i]) maps[graph_to[i]].append(graph_from[i]) if(graph_from[i] not in colour): colour[graph_from[i]] = ids[graph_from[i]-1] if(graph_to[i] not in colour): colour[graph_to[i]] = ids[graph_to[i]-1] queue = deque() queue.append((val, 0)) start_colour = colour[val] visited = set() while(queue): current, count = queue.popleft() visited.add(current) for i in maps[current]: if(i not in visited): if(colour[i] == start_colour): return count+1 visited.add(i) queue.append((i, count+1)) return -1 if __name__ == '__main__': fptr = open(os.environ['OUTPUT_PATH'], 'w') graph_nodes, graph_edges = map(int, input().split()) graph_from = [0] * graph_edges graph_to = [0] * graph_edges for i in range(graph_edges): graph_from[i], graph_to[i] = map(int, input().split()) ids = list(map(int, input().rstrip().split())) val = int(input()) ans = findShortest(graph_nodes, graph_from, graph_to, ids, val) fptr.write(str(ans) + '\n') fptr.close()
python
""" Django settings for monitoramento project. Generated by 'django-admin startproject' using Django 3.1.7. For more information on this file, see https://docs.djangoproject.com/en/3.1/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/3.1/ref/settings/ """ from pathlib import Path, os from datetime import timedelta import django_on_heroku from decouple import config # Build paths inside the project like this: BASE_DIR / 'subdir'. BASE_DIR = Path(__file__).resolve().parent.parent # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = config('SECRET_KEY') # SECURITY WARNING: don't run with debug turned on in production! DEBUG = config('DEBUG',cast=bool,default=False) ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'rest_framework', 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'noticias', 'rest_framework_simplejwt', 'django_filters', 'api', 'users', 'rest_framework_simplejwt.token_blacklist', 'drf_yasg', 'django.contrib.postgres', 'crispy_forms', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'monitoramento.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [os.path.join(BASE_DIR, 'noticias/templates')], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'monitoramento.wsgi.application' # Database # https://docs.djangoproject.com/en/3.1/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': 'newsdb', 'USER':'stefano', 'PASSWORD': '389171', 'HOST':'localhost', 'PORT':'5432', } } # Password validation # https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/3.1/topics/i18n/ LANGUAGE_CODE = 'pt-BR' TIME_ZONE = 'America/Sao_Paulo' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/3.1/howto/static-files/ STATIC_URL = '/static/' STATIC_ROOT = os.path.join(BASE_DIR, 'noticias/static') MEDIA_ROOT = os.path.join(BASE_DIR, 'media') MEDIA_URL = '/media/' EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' DEFAULT_AUTO_FIELD = 'django.db.models.AutoField' # REST_FRAMEWORK = { # 'DEFAULT_AUTHENTICATION_CLASSES': [ # 'rest_framework_simplejwt.authentication.JWTAuthentication', # ], # 'DEFAULT_PERMISSION_CLASSES': [ # 'rest_framework.permissions.IsAuthenticated', # ], # } REST_FRAMEWORK={ 'DEFAULT_FILTER_BACKENDS': ['django_filters.rest_framework.DjangoFilterBackend', 'rest_framework.filters.SearchFilter' ], 'DATE_INPUT_FORMATS': ["%d-%m-%Y"], 'DEFAULT_AUTHENTICATION_CLASSES': [ 'rest_framework_simplejwt.authentication.JWTAuthentication', ], 'DEFAULT_PERMISSION_CLASSES': [ 'rest_framework.permissions.IsAuthenticatedOrReadOnly', ], # 'DEFAULT_SCHEMA_CLASS':'rest_framework.schemas.coreapi.AutoSchema' } AUTH_USER_MODEL='users.NewUser' SIMPLE_JWT = { 'ACCESS_TOKEN_LIFETIME': timedelta(minutes=1), 'REFRESH_TOKEN_LIFETIME': timedelta(days=10), 'ROTATE_REFRESH_TOKENS': False, 'BLACKLIST_AFTER_ROTATION': True, 'ALGORITHM': 'HS256', 'SIGNING_KEY': SECRET_KEY, 'VERIFYING_KEY': None, 'AUTH_HEADER_TYPES': ('Bearer',), 'AUTH_HEADER_NAME': 'HTTP_AUTHORIZATION', 'USER_ID_FIELD': 'id', 'USER_ID_CLAIM': 'user_id', 'AUTH_TOKEN_CLASSES': ('rest_framework_simplejwt.tokens.AccessToken',), 'TOKEN_TYPE_CLAIM': 'token_type', } django_on_heroku.settings(locals()) CRISPY_TEMPLATE_PACK='bootstrap4'
python
#!/usr/bin/env python from subprocess import call import sys import subprocess import dbus import string import os import fcntl import time import pexpect import glib import gobject import dbus.service import dbus.mainloop.glib DBUS_NAME = 'org.openbmc.UserManager' INTF_NAME = 'org.openbmc.Enrol' OBJ_NAME_GROUPS = '/org/openbmc/UserManager/Groups' OBJ_NAME_GROUP = '/org/openbmc/UserManager/Group' OBJ_NAME_USERS = '/org/openbmc/UserManager/Users' OBJ_NAME_USER = '/org/openbmc/UserManager/User' ''' Object Path > /org/openbmc/UserManager/Groups Interface:Method > org.openbmc.Enrol.GroupAddSys string:"groupname" Interface:Method > org.openbmc.Enrol.GroupAddUsr string:"groupname" Interface:Method > org.openbmc.Enrol.GroupListUsr Interface:Method > org.openbmc.Enrol.GroupListSys Object Path > /org/openbmc/UserManager/Group Interface:Method > org.openbmc.Enrol.GroupDel string:"groupname" Object Path > /org/openbmc/UserManager/Users Interface:Method > org.openbmc.Enrol.UserAdd string:"comment" string:"username" string:"groupname" string:"passwd" Interface:Method > org.openbmc.Enrol.UserList Object Path > /org/openbmc/UserManager/User Interface:Method > org.openbmc.Enrol.UserDel string:"username" Interface:Method > org.openbmc.Enrol.Passswd string:"username" string:"passwd" ''' userman_providers = { 'pam' : { 'adduser' : 'user add', }, 'ldap' : { 'adduser' : 'ldap command to add user', }, } class UserManGroups (dbus.service.Object): def __init__(self, bus, name): self.bus = bus self.name = name dbus.service.Object.__init__(self,bus,name) def setUsermanProvider(self, provider): self.provider = provider @dbus.service.method(INTF_NAME, "", "") def test(self): print("TEST") @dbus.service.method(INTF_NAME, "s", "x") def GroupAddUsr (self, groupname): if not groupname : raise ValueError("Invalid Groupname") groups = self.GroupListAll () if groupname in groups: raise ValueError("Group ", groupname, " Exists") r = call (["addgroup", groupname]) return r #@dbus.service.method(INTF_NAME, "s", "x") def GroupAddSys (self, groupname): if not groupname : raise ValueError("Invalid Groupname") groups = self.GroupListAll () if groupname in groups: raise ValueError("Group ", groupname, " Exists") r = call (["addgroup", "-S", groupname]) return r @dbus.service.method(INTF_NAME, "", "as") def GroupListUsr (self): groupList = [] with open("/etc/group", "r") as f: for grent in f: groupParams = grent.split (":") if (int(groupParams[2]) >= 1000 and int(groupParams[2]) != 65534): groupList.append(groupParams[0]) return groupList @dbus.service.method(INTF_NAME, "", "as") def GroupListSys (self): groupList = [] with open("/etc/group", "r") as f: for grent in f: groupParams = grent.split (":") if (int(groupParams[2]) > 100 and int(groupParams[2]) < 1000): groupList.append(groupParams[0]) return groupList def GroupListAll (self): groupList = [] with open("/etc/group", "r") as f: for grent in f: groupParams = grent.split (":") groupList.append(groupParams[0]) return groupList class UserManGroup (dbus.service.Object): def __init__(self, bus, name): self.bus = bus self.name = name dbus.service.Object.__init__(self,bus,name) def setUsermanProvider(self, provider): self.provider = provider @dbus.service.method(INTF_NAME, "", "") def test(self): print("TEST") @dbus.service.method(INTF_NAME, "", "x") def GroupDel (self, groupname): if not groupname : raise ValueError("Invalid Groupname") groups = Groupsobj.GroupListAll () if groupname not in groups: raise ValueError("No such Group: ", groupname) r = call (["delgroup", groupname]) return r class UserManUsers (dbus.service.Object): def __init__(self, bus, name): self.bus = bus self.name = name dbus.service.Object.__init__(self,bus,name) def setUsermanProvider(self, provider): self.provider = provider @dbus.service.method(INTF_NAME, "", "") def test(self): print("TEST") @dbus.service.method(INTF_NAME, "ssss", "x") def UserAdd (self, gecos, username, groupname, passwd): if not username : raise ValueError("Invalid Username") users = self.UserListAll () if username in users : raise ValueError("User ", username, " Exists") if groupname: groups = Groupsobj.GroupListAll () if groupname not in groups: raise ValueError("No such Group: ", groupname) opts = "" if gecos: opts = " -g " + '"' + gecos + '"' if groupname: cmd = "adduser " + opts + " " + " -G " + groupname + " " + "-s /bin/sh" + " " + username else: cmd = "adduser " + opts + " " + "-s /bin/sh" + " " + username prompts = ['New password: ', 'Retype password: ', 'Re-enter new password: '] proc = pexpect.spawn (cmd) proc.expect (prompts) proc.sendline (passwd) proc.expect (prompts) proc.sendline (passwd) if proc.expect(prompts + [pexpect.EOF]) != len(prompts): proc.sendline (passwd) r = proc.wait() return r if r else 0 @dbus.service.method(INTF_NAME, "", "as") def UserList (self): userList = [] with open("/etc/passwd", "r") as f: for usent in f: userParams = usent.split (":") if (int(userParams[2]) >= 1000 and int(userParams[2]) != 65534): userList.append(userParams[0]) return userList def UserListAll (self): userList = [] with open("/etc/passwd", "r") as f: for usent in f: userParams = usent.split (":") userList.append(userParams[0]) return userList class UserManUser (dbus.service.Object): def __init__(self, bus, name): self.bus = bus self.name = name dbus.service.Object.__init__(self,bus,name) @dbus.service.method(INTF_NAME, "", "") def test(self): print("TEST") def setUsermanProvider(self, provider): self.provider = provider @dbus.service.method(INTF_NAME, "s", "x") def UserDel (self, username): if not username : raise ValueError("Invalid Username") users = Usersobj.UserList () if username not in users : raise ValueError("No such User: ", username) r = call (["deluser", username]) return r @dbus.service.method(INTF_NAME, "ss", "x") def Passwd (self, username, passwd): if not username : raise ValueError("Invalid Username") users = Usersobj.UserList () if username not in users : raise ValueError("No such User: ", username) cmd = "passwd" + " " + username prompts = ['New password: ', 'Retype password: ', 'Re-enter new password: '] proc = pexpect.spawn (cmd) proc.expect (prompts) proc.sendline (passwd) proc.expect (prompts) proc.sendline (passwd) if proc.expect(prompts + [pexpect.EOF]) != len(prompts): proc.sendline (passwd) r = proc.wait() return r if r else 0 def main(): dbus.mainloop.glib.DBusGMainLoop(set_as_default=True) bus = dbus.SystemBus() name = dbus.service.BusName(DBUS_NAME, bus) global Groupsobj global Groupobj global Usersobj global Userobj Groupsobj = UserManGroups (bus, OBJ_NAME_GROUPS) Groupobj = UserManGroup (bus, OBJ_NAME_GROUP) Usersobj = UserManUsers (bus, OBJ_NAME_USERS) Userobj = UserManUser (bus, OBJ_NAME_USER) Groupsobj.setUsermanProvider ("pam") Groupobj.setUsermanProvider ("pam") Usersobj.setUsermanProvider ("pam") Userobj.setUsermanProvider ("pam") mainloop = gobject.MainLoop() print("Started") mainloop.run() if __name__ == '__main__': sys.exit(main())
python
import bpy from bpy import data as D from bpy import context as C from mathutils import * from math import * # bpy.ops.mesh.primitive_grid_add( # x_subdivisions=10, y_subdivisions=10, # radius=1, view_align=False, enter_editmode=False, # location=(0, 0, 0), rotation=(0, 0, 0)) def new_grid(name='Grid', x_subdivisions=10, y_subdivisions=10, radius=1, location=(0, 0, 0), rotation=(0, 0, 0), scale=(1,1,1)): bpy.ops.object.select_all(action='DESELECT') bpy.ops.mesh.primitive_grid_add( x_subdivisions=x_subdivisions, y_subdivisions=y_subdivisions, radius=radius, location=location, rotation=rotation) bpy.context.object.scale = scale bpy.context.object.name = name return bpy.context.object x_scale = 1 x_subdivisions = 10 * x_scale y_scale = 20 y_subdivisions = 10 * y_scale g = new_grid(x_subdivisions=x_subdivisions, y_subdivisions=y_subdivisions, scale=(x_scale, y_scale, 1)) # g.data.vertices.foreach_set(attr, seq) import random for v in g.data.vertices: if abs(v.co.x) != 1 and abs(v.co.y) != 1: v.co += Vector((0, 0, random.uniform(0, 0.24))) else: v.co += Vector((0, 0, random.uniform(0.08, 0.16)))
python
# -*- coding: utf-8 -*- # Generated by Django 1.10.5 on 2017-03-29 19:55 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('core', '0009_auto_20170329_1938'), ] operations = [ migrations.RemoveField( model_name='event', name='event_description', ), migrations.AddField( model_name='event', name='description', field=models.TextField(null=True, verbose_name='Description'), ), migrations.AddField( model_name='event', name='subject', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Subject'), ), migrations.AddField( model_name='event', name='teacher', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Teacher'), ), migrations.AlterField( model_name='event', name='assigned_to', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Class', verbose_name='Assigned to class'), ), migrations.AlterField( model_name='event', name='user', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Student', verbose_name='Assigned to student'), ), ]
python
import logging import re from django.conf import settings from django import forms from django.db import models from django.contrib.auth.models import User, AnonymousUser from django.forms import FileField, CharField, Textarea, ValidationError from django.core.validators import validate_email try: from tower import ugettext_lazy as _ except ImportError: from django.utils.translation import ugettext_lazy as _ from badger.models import Award, Badge, Nomination try: from taggit.managers import TaggableManager except ImportError: TaggableManager = None EMAIL_SEPARATOR_RE = re.compile(r'[,;\s]+') class MyModelForm(forms.ModelForm): required_css_class = "required" error_css_class = "error" def as_ul(self): """Returns this form rendered as HTML <li>s -- excluding the <ul></ul>. """ # TODO: l10n: This doesn't work for rtl languages return self._html_output( normal_row=(u'<li%(html_class_attr)s>%(label)s %(field)s' '%(help_text)s%(errors)s</li>'), error_row=u'<li>%s</li>', row_ender='</li>', help_text_html=u' <p class="help">%s</p>', errors_on_separate_row=False) class MyForm(forms.Form): required_css_class = "required" error_css_class = "error" def as_ul(self): """Returns this form rendered as HTML <li>s -- excluding the <ul></ul>. """ # TODO: l10n: This doesn't work for rtl languages return self._html_output( normal_row=(u'<li%(html_class_attr)s>%(label)s %(field)s' '%(help_text)s%(errors)s</li>'), error_row=u'<li>%s</li>', row_ender='</li>', help_text_html=u' <p class="help">%s</p>', errors_on_separate_row=False) class MultipleItemsField(forms.Field): """Form field which accepts multiple text items""" # Based on https://docs.djangoproject.com/en/dev/ref/forms/validation/ # #form-field-default-cleaning widget = Textarea def __init__(self, **kwargs): self.max_items = kwargs.get('max_items', 10) if 'max_items' in kwargs: del kwargs['max_items'] self.separator_re = re.compile(r'[,;\s]+') if 'separator_re' in kwargs: del kwargs['separator_re'] super(MultipleItemsField, self).__init__(**kwargs) def to_python(self, value): """Normalize data to a list of strings.""" if not value: return [] items = self.separator_re.split(value) return [i.strip() for i in items if i.strip()] def validate_item(self, item): return True def validate(self, value): """Check if value consists only of valid items.""" super(MultipleItemsField, self).validate(value) # Enforce max number of items if len(value) > self.max_items: raise ValidationError( _(u'{num} items entered, only {maxnum} allowed').format( num=len(value), maxnum=self.max_items)) # Validate each of the items invalid_items = [] for item in value: try: self.validate_item(item) except ValidationError: invalid_items.append(item) if len(invalid_items) > 0: # TODO: l10n: Not all languages separate with commas raise ValidationError( _(u'These items were invalid: {itemlist}').format( itemlist=u', '.join(invalid_items))) class MultiEmailField(MultipleItemsField): """Form field which accepts multiple email addresses""" def validate_item(self, item): validate_email(item) class BadgeAwardForm(MyForm): """Form to create either a real or deferred badge award""" # TODO: Needs a captcha? emails = MultiEmailField(max_items=10, help_text=_(u'Enter up to 10 email addresses for badge award ' 'recipients')) description = CharField( label='Explanation', widget=Textarea, required=False, help_text=_(u'Explain why this badge should be awarded')) class DeferredAwardGrantForm(MyForm): """Form to grant a deferred badge award""" # TODO: Needs a captcha? email = forms.EmailField() class MultipleClaimCodesField(MultipleItemsField): """Form field which accepts multiple DeferredAward claim codes""" def validate_item(self, item): from badger.models import DeferredAward try: DeferredAward.objects.get(claim_code=item) return True except DeferredAward.DoesNotExist: raise ValidationError(_(u'No such claim code, {claimcode}').format( claimcode=item)) class DeferredAwardMultipleGrantForm(MyForm): email = forms.EmailField( help_text=_(u'Email address to which claims should be granted')) claim_codes = MultipleClaimCodesField( help_text=_(u'Comma- or space-separated list of badge claim codes')) class BadgeEditForm(MyModelForm): class Meta: model = Badge fields = ('title', 'image', 'description',) # try: # # HACK: Add "tags" as a field only if the taggit app is available. # import taggit # fields += ('tags',) # except ImportError: # pass fields += ('unique',) required_css_class = "required" error_css_class = "error" def __init__(self, *args, **kwargs): super(BadgeEditForm, self).__init__(*args, **kwargs) # TODO: l10n: Pretty sure this doesn't work for rtl languages. # HACK: inject new templates into the image field, monkeypatched # without creating a subclass self.fields['image'].widget.template_with_clear = u''' <p class="clear">%(clear)s <label for="%(clear_checkbox_id)s">%(clear_checkbox_label)s</label></p> ''' # TODO: l10n: Pretty sure this doesn't work for rtl languages. self.fields['image'].widget.template_with_initial = u''' <div class="clearablefileinput"> <p>%(initial_text)s: %(initial)s</p> %(clear_template)s <p>%(input_text)s: %(input)s</p> </div> ''' class BadgeNewForm(BadgeEditForm): class Meta(BadgeEditForm.Meta): pass def __init__(self, *args, **kwargs): super(BadgeNewForm, self).__init__(*args, **kwargs) class BadgeSubmitNominationForm(MyForm): """Form to submit badge nominations""" emails = MultiEmailField(max_items=10, help_text=_( u'Enter up to 10 email addresses for badge award nominees'))
python
# Copyright 2021 VMware, Inc. # SPDX-License-Identifier: Apache-2.0 import json import os from unittest.mock import patch from click.testing import CliRunner from py._path.local import LocalPath from pytest_httpserver.pytest_plugin import PluginHTTPServer from taurus_datajob_api import DataJobDeployment from taurus_datajob_api import DataJobExecution from vdk.internal import test_utils from vdk.internal.control.command_groups.job.execute import execute from werkzeug import Response test_utils.disable_vdk_authentication() def test_execute(httpserver: PluginHTTPServer, tmpdir: LocalPath): rest_api_url = httpserver.url_for("") team_name = "test-team" job_name = "test-job" httpserver.expect_request( uri=f"/data-jobs/for-team/{team_name}/jobs/{job_name}/deployments/production/executions", method="POST", ).respond_with_response( Response( status=200, headers=dict( Location=f"/data-jobs/for-team/{team_name}/jobs/{job_name}/executions/foo" ), ) ) runner = CliRunner() result = runner.invoke( execute, ["-n", job_name, "-t", team_name, "--start", "-u", rest_api_url] ) assert result.exit_code == 0, ( f"result exit code is not 0, result output: {result.output}, " f"result.exception: {result.exception}" ) def test_cancel(httpserver: PluginHTTPServer, tmpdir: LocalPath): rest_api_url = httpserver.url_for("") team_name = "test-team" job_name = "test-job" execution_id = "test-execution" httpserver.expect_request( uri=f"/data-jobs/for-team/{team_name}/jobs/{job_name}/executions/{execution_id}", method="DELETE", ).respond_with_response(Response(status=200, headers={})) runner = CliRunner() result = runner.invoke( execute, [ "-n", job_name, "-t", team_name, "-i", execution_id, "--cancel", "-u", rest_api_url, ], ) assert result.exit_code == 0, ( f"result exit code is not 0, result output: {result.output}, " f"result.exception: {result.exception}" ) def test_execute_without_url(httpserver: PluginHTTPServer, tmpdir: LocalPath): runner = CliRunner() result = runner.invoke(execute, ["-n", "job_name", "-t", "team_name", "-u", ""]) assert ( result.exit_code == 2 ), f"result exit code is not 2, result output: {result.output}, exc: {result.exc_info}" assert "what" in result.output and "why" in result.output def test_execute_with_empty_url(httpserver: PluginHTTPServer, tmpdir: LocalPath): runner = CliRunner() result = runner.invoke(execute, ["-n", "job_name", "-t", "team_name", "-u", ""]) assert ( result.exit_code == 2 ), f"result exit code is not 2, result output: {result.output}, exc: {result.exc_info}" assert "what" in result.output and "why" in result.output def test_execute_start_output_text(httpserver: PluginHTTPServer, tmpdir: LocalPath): rest_api_url = httpserver.url_for("") team_name = "test-team" job_name = "test-job" httpserver.expect_request( uri=f"/data-jobs/for-team/{team_name}/jobs/{job_name}/deployments/production/executions", method="POST", ).respond_with_response( Response( status=200, headers=dict( Location=f"/data-jobs/for-team/{team_name}/jobs/{job_name}/executions/foo" ), ) ) runner = CliRunner() result = runner.invoke( execute, ["-n", job_name, "-t", team_name, "--start", "-u", rest_api_url] ) assert f"-n {job_name}" in result.output assert f"-t {team_name}" in result.output def test_execute_start_output_json(httpserver: PluginHTTPServer, tmpdir: LocalPath): rest_api_url = httpserver.url_for("") team_name = "test-team" job_name = "test-job" httpserver.expect_request( uri=f"/data-jobs/for-team/{team_name}/jobs/{job_name}/deployments/production/executions", method="POST", ).respond_with_response( Response( status=200, headers=dict( Location=f"/data-jobs/for-team/{team_name}/jobs/{job_name}/executions/foo" ), ) ) runner = CliRunner() result = runner.invoke( execute, ["-n", job_name, "-t", team_name, "--start", "-u", rest_api_url, "-o", "json"], ) json_output = json.loads(result.output) assert job_name == json_output.get("job_name") assert team_name == json_output.get("team") def test_execute_with_exception(httpserver: PluginHTTPServer, tmpdir: LocalPath): runner = CliRunner() result = runner.invoke( execute, ["--start", "-n", "job_name", "-t", "team_name", "-u", "localhost"] ) assert ( result.exit_code == 2 ), f"result exit code is not 2, result output: {result.output}, exc: {result.exc_info}" assert "what" in result.output and "why" in result.output def test_execute_no_execution_id(httpserver: PluginHTTPServer, tmpdir: LocalPath): rest_api_url = httpserver.url_for("") team_name = "test-team" job_name = "test-job" execution: DataJobExecution = DataJobExecution( id="1", job_name=job_name, logs_url="", deployment=DataJobDeployment(), start_time="2021-09-24T14:14:03.922Z", ) older_execution = DataJobExecution( id="2", job_name=job_name, logs_url="", deployment=DataJobDeployment(), start_time="2020-09-24T14:14:03.922Z", ) httpserver.expect_request( uri=f"/data-jobs/for-team/{team_name}/jobs/{job_name}/executions", method="GET", ).respond_with_json( [older_execution.to_dict(), execution.to_dict(), older_execution.to_dict()] ) httpserver.expect_request( uri=f"/data-jobs/for-team/{team_name}/jobs/{job_name}/executions/1/logs", method="GET", ).respond_with_json({"logs": "We are the logs! We are awesome!"}) runner = CliRunner() result = runner.invoke( execute, ["-n", job_name, "-t", team_name, "--logs", "-u", rest_api_url], ) test_utils.assert_click_status(result, 0) assert result.output.strip() == "We are the logs! We are awesome!".strip() def test_execute_logs_using_api(httpserver: PluginHTTPServer, tmpdir: LocalPath): rest_api_url = httpserver.url_for("") team_name = "test-team" job_name = "test-job" id = "1" execution: DataJobExecution = DataJobExecution( id=id, job_name=job_name, logs_url="", deployment=DataJobDeployment() ) httpserver.expect_request( uri=f"/data-jobs/for-team/{team_name}/jobs/{job_name}/executions/1", method="GET", ).respond_with_json(execution.to_dict()) httpserver.expect_request( uri=f"/data-jobs/for-team/{team_name}/jobs/{job_name}/executions/1/logs", method="GET", ).respond_with_json({"logs": "We are the logs! We are awesome!"}) runner = CliRunner() result = runner.invoke( execute, ["-n", job_name, "-t", team_name, "-i", id, "--logs", "-u", rest_api_url], ) test_utils.assert_click_status(result, 0) assert result.output.strip() == "We are the logs! We are awesome!".strip() def test_execute_logs_with_external_log_url( httpserver: PluginHTTPServer, tmpdir: LocalPath ): rest_api_url = httpserver.url_for("") team_name = "test-team" job_name = "test-job" id = "1" execution: DataJobExecution = DataJobExecution( id=id, job_name=job_name, logs_url="http://external-service-job-logs", deployment=DataJobDeployment(), ) httpserver.expect_request( uri=f"/data-jobs/for-team/{team_name}/jobs/{job_name}/executions/1", method="GET", ).respond_with_json(execution.to_dict()) with patch("webbrowser.open") as mock_browser_open: mock_browser_open.return_value = False runner = CliRunner() result = runner.invoke( execute, ["-n", job_name, "-t", team_name, "-i", id, "--logs", "-u", rest_api_url], ) test_utils.assert_click_status(result, 0) mock_browser_open.assert_called_once_with("http://external-service-job-logs") def test_execute_start_extra_arguments_invalid_json( httpserver: PluginHTTPServer, tmpdir: LocalPath ): rest_api_url = httpserver.url_for("") team_name = "test-team" job_name = "test-job" httpserver.expect_request( uri=f"/data-jobs/for-team/{team_name}/jobs/{job_name}/deployments/production/executions", method="POST", ) runner = CliRunner() result = runner.invoke( execute, [ "-n", job_name, "-t", team_name, "--start", "-u", rest_api_url, "--arguments", '{key1": "value1", "key2": "value2"}', ], ) assert ( result.exit_code == 2 ), f"Result exit code not 2. result output {result.output}, exc: {result.exc_info}" assert "Failed to validate job arguments" in result.output assert "what" and "why" in result.output assert "Make sure provided --arguments is a valid JSON string." in result.output def test_execute_start_extra_arguments(httpserver: PluginHTTPServer, tmpdir: LocalPath): rest_api_url = httpserver.url_for("") team_name = "test-team" job_name = "test-job" arguments = '{"key1": "value1", "key2": "value2"}' httpserver.expect_request( uri=f"/data-jobs/for-team/{team_name}/jobs/{job_name}/deployments/production/executions", method="POST", json=json.loads( '{"args": {"key1": "value1", "key2": "value2"}, "started_by": "vdk-control-cli"}' ), ).respond_with_response( Response( status=200, headers=dict( Location=f"/data-jobs/for-team/{team_name}/jobs/{job_name}/executions/foo" ), ) ) runner = CliRunner() result = runner.invoke( execute, [ "-n", job_name, "-t", team_name, "--start", "-u", rest_api_url, "--arguments", arguments, ], ) assert ( result.exit_code == 0 ), f"Result exit code not 0. result output {result.output}, exc: {result.exc_info}"
python
import cv2 as cv import numpy as np pathj = 'D:\\MyProjects\\WearGlasses\\I.jpg' pathg = 'D:\\MyProjects\\WearGlasses\\glasses.png' pathf = 'D:\\MyProjects\\WearGlasses\\haarcascade_frontalface_default.xml' pathe = 'D:\\MyProjects\\WearGlasses\\haarcascade_eye.xml' def wear(): glasses = cv.imread(pathg) face_cascade = cv.CascadeClassifier(pathf) eye_cascade = cv.CascadeClassifier(pathe) while True: centers = [] cap = cv.VideoCapture(0) ret,img = cap.read() gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray, 1.1, 3) for(x,y,w,h) in faces: face_re = img[y:y+h, x:x+h] face_re_g = gray[y:y+h, x:x+h] eyes = eye_cascade.detectMultiScale(face_re_g) for(ex,ey,ew,eh) in eyes: cv.rectangle(face_re,(ex,ey),(ex+ew,ey+eh),(0,255,0),2) centers.append((x+int(ex+0.5*ew),y+int(ey+0.5*eh),x + int(0.6*ex),y+ey)) if len(centers) > 0: eye_w = 2.0*abs(centers[1][0]-centers[0][0]) overlay_img = np.ones(img.shape,np.uint8)*0 gls_h,gls_w = glasses.shape[:2] k = eye_w/gls_w overlay_glasses = cv.resize(glasses,None, fx = k, fy = k, interpolation = cv.INTER_AREA) x = centers[0][0] if centers[0][0] < centers[1][0] else centers[1][0] y = centers[0][1] if centers[0][1] < centers[1][1] else centers[1][1] startx = centers[0][2] if centers[0][2] < centers[1][2] else centers[1][2] starty = centers[0][3] h,w = overlay_glasses.shape[:2] overlay_img[starty:starty+h,startx:startx+w] = overlay_glasses gray_glasses = cv.cvtColor(overlay_img,cv.COLOR_BGR2GRAY) ret, mask = cv.threshold(gray_glasses,110,255,cv.THRESH_BINARY) mask_inv = cv.bitwise_not(mask) finalImg = cv.bitwise_and(img,img,mask=mask_inv) cv.imshow("Wear =|=",finalImg) if cv.waitKey(10) == 27: break cap.release() cv.destroyAllWindows() if __name__ == '__main__': wear()
python
print("Phuong Hoang is here") # move the code in to githep # git add . # git commit -m "remove ld" # git push #git pull origin master
python
""" Zemberek: Histogram Example Original Java Example: https://bit.ly/2PmUyIV """ from os.path import join from jpype import ( JClass, JInt, JString, getDefaultJVMPath, java, shutdownJVM, startJVM) if __name__ == '__main__': ZEMBEREK_PATH: str = join('..', '..', 'bin', 'zemberek-full.jar') startJVM( getDefaultJVMPath(), '-ea', f'-Djava.class.path={ZEMBEREK_PATH}', convertStrings=False ) Histogram: JClass = JClass('zemberek.core.collections.Histogram') histogram_1: Histogram = Histogram() histogram_1.add( ['apple', 'pear', 'grape', 'apple', 'apple', 'appricot', 'grape'] ) histogram_2: Histogram = Histogram() histogram_2.add(['apple', 'apple', 'banana']) print('Histogram 1:', histogram_1) print('Histogram 2:', histogram_2) print('\nHistogram 1, Keys:', histogram_1.getKeySet()) print('Histogram 2, Keys:', histogram_2.getKeySet()) print('\nHistogram 1, Sorted Keys:', histogram_1.getSortedList()) print('Histogram 2, Sorted Keys:', histogram_2.getSortedList()) print('\nHistogram 1, Entries:', histogram_1.getEntryList()) print('Histogram 2, Entries:', histogram_2.getEntryList()) print('\nHistogram 1, Sorted Entries:', histogram_1.getSortedEntryList()) print('Histogram 2, Sorted Entries:', histogram_2.getSortedEntryList()) print('\nHistogram 1, Total Count:', histogram_1.totalCount()) print('Histogram 2, Total Count:', histogram_2.totalCount()) print( '\nIntersection of Histogram 1 and 2:', histogram_1.getIntersectionOfKeys(histogram_2) ) print('\nHistogram 1, Size:', histogram_1.size()) print('Histogram 2, Size:', histogram_2.size()) print( '\nHistogram 1, \'apple\' Count:', histogram_1.getCount(JString('apple')) ) print( 'Histogram 2, \'apple\' Count:', histogram_2.getCount(JString('apple')) ) print( '\nHistogram 1, Contains \'grape\':', histogram_1.contains(JString('grape')) ) print( 'Histogram 2, Contains \'grape\':', histogram_2.contains(JString('grape')) ) print('\nHistogram 1, Top 3:', histogram_1.getTop(JInt(3))) print('Histogram 2, Top 3:', histogram_2.getTop(JInt(3))) print('\nHistogram 1, Less Than 2:', histogram_1.sizeSmaller(JInt(2))) print('Histogram 2, Less Than 2:', histogram_2.sizeSmaller(JInt(2))) print('\nHistogram 1, More Than 2:', histogram_1.sizeLarger(JInt(2))) print('Histogram 2, More Than 2:', histogram_2.sizeLarger(JInt(2))) print( '\nHistogram 1, Between 1 and 3:', histogram_1.totalCount(JInt(1), JInt(3)) ) print( 'Histogram 2, Between 1 and 3:', histogram_2.totalCount(JInt(1), JInt(3)) ) print('\nHistogram 1, Max Count:', histogram_1.maxValue()) print('Histogram 2, Max Count:', histogram_2.maxValue()) print('\nHistogram 1, Min Count:', histogram_1.minValue()) print('Histogram 2, Min Count:', histogram_2.minValue()) print( '\nHistogram 1, Equals to 2:', histogram_1.getItemsWithCount(JInt(2)) ) print( 'Histogram 2, Equals to 2:', histogram_2.getItemsWithCount(JInt(2)) ) print( '\nHistogram 1, >= 2 AND <= 3:', histogram_1.getItemsWithCount(JInt(2)), JInt(3) ) print( 'Histogram 2, >= 2 AND <= 3:', histogram_2.getItemsWithCount(JInt(2), JInt(3)) ) print( '\nHistogram 1, % of >= 2 AND <= 3:', histogram_1.countPercent(JInt(2), JInt(3)) ) print( 'Histogram 2, % of >= 2 AND <= 3:', histogram_2.countPercent(JInt(2), JInt(3)) ) print('\nHistogram 1, Sorted:', histogram_1.getSortedList()) print('Histogram 2, Sorted:', histogram_2.getSortedList()) print('\nHistogram 1, More Than 2:', histogram_1.sizeLarger(2)) print('Histogram 2, More Than 2:', histogram_2.sizeLarger(2)) print( '\nHistogram 1, Contains Apple:', histogram_1.contains(JString('apple'))) print( 'Histogram 2, Contains Apple:', histogram_2.contains(JString('apple')) ) histogram_1.set(JString('apple'), 5) histogram_2.set(JString('apple'), 5) print('\nHistogram 1, Set Apple Count to 5:', histogram_1.getEntryList()) print('Histogram 2, Set Apple Count to 5:', histogram_2.getEntryList()) histogram_1.remove(JString('apple')) histogram_2.remove(JString('apple')) print('\nHistogram 1, Remove Apple:', histogram_1.getEntryList()) print('Histogram 2, Remove Apple:', histogram_2.getEntryList()) histogram_1.decrementIfPositive(JString('appricot')) histogram_2.decrementIfPositive(JString('appricot')) print( '\nHistogram 1, Decrease Appricot If Positive:', histogram_1.getEntryList() ) print( 'Histogram 2, Decrease Appricot If Positive:', histogram_2.getEntryList() ) remove: java.util.ArrayList = java.util.ArrayList() remove.add(JString('grape')) remove.add(JString('banana')) histogram_1.removeAll(remove) histogram_2.removeAll(remove) print( '\nHistogram 1, Remove All Grape and Banana:', histogram_1.getEntryList() ) print( 'Histogram 2, Remove All Grape and Banana:', histogram_2.getEntryList() ) shutdownJVM()
python
"""newskylabs/tools/bookblock/scripts/bookblock.py: Main of bookblock tool. Description bookblock - A tool to cut out pages from a scanned book. bookblock is a tool to cut out pages from a scanned book. When scanning a book each scan contains two book pages. The book cover on the other side in often consists out of two scans of half the size showing only the front or back cover. Further in most cases some pages might be blanc or not interesting and should be ignored. bookblock allowes to specify the size and offset of a page bounding box and a specification of the pages which should be extracted. The pages then can be previewed and finally cut out of the scan and saved to disk. """ __author__ = "Dietrich Bollmann" __email__ = "[email protected]" __copyright__ = "Copyright 2019 Dietrich Bollmann" __license__ = "Apache License 2.0, http://www.apache.org/licenses/LICENSE-2.0" __date__ = "2019/10/17" import sys, os, click from newskylabs.tools.bookblock.utils.settings import Settings from newskylabs.tools.bookblock.utils.generic import get_version_long # -i, --source-dir option_source_dir_help = "Directory where the scans are stored." option_source_dir_default = "/tmp" # -o, --target-dir option_target_dir_help = "Directory where the pages should be stored." option_target_dir_default = "/tmp" # -i, --source-file_format option_source_file_format_help = "File name format of the scans." option_source_file_format_default = 'scan%03d.png' # -o, --target-file_format option_target_file_format_help = "File name format for the pages." option_target_file_format_default = 'page%03d.png' # -p, --pages option_pages_help = "Specification of the pages to be cut out." option_pages_default = '1r,2-9lr,10l' # -g, --geometry option_geometry_help = "Geometry of the pages." option_geometry_default = '600x800+10+20' # -c, --image-mode option_image_mode_help = "Should I generate color or grayscale images?" option_image_mode_choice = ['color', 'grayscale'] option_image_mode_default = 'color' # -v, --view-mode option_view_mode_help = "View mode: " + \ "either show the scan with a bounding box marking the page - " + \ "or the resulting page." option_view_mode_choice = ['scan', 'page'] option_view_mode_default = 'page' # -e, --examples option_examples_help = "Show some usage examples." option_examples_default = False # -d, --debug option_debug_help = "Set the log level." option_debug_choice = ['trace', 'debug', 'info', 'warning', 'error', 'critical'] option_debug_default = 'warning' command_context_settings={'help_option_names': ['-h', '--help']} @click.command(context_settings=command_context_settings) @click.option('-i', '--source-dir', type=click.Path(exists=True), default=option_source_dir_default, help=option_source_dir_help) @click.option('-o', '--target-dir', type=click.Path(exists=True), default=option_target_dir_default, help=option_target_dir_help) @click.option('-s', '--source-file-format', default=option_source_file_format_default, help=option_source_file_format_help) @click.option('-t', '--target-file-format', default=option_target_file_format_default, help=option_target_file_format_help) @click.option('-p', '--pages', default=option_pages_default, help=option_pages_help) @click.option('-g', '--geometry', default=option_geometry_default, help=option_geometry_help) @click.option('-c', '--image-mode', type=click.Choice(option_image_mode_choice), default=option_image_mode_default, help=option_image_mode_help) @click.option('-v', '--view-mode', type=click.Choice(option_view_mode_choice), default=option_view_mode_default, help=option_view_mode_help) @click.option('-e', '--examples', is_flag=True, default=option_examples_default, help=option_examples_help) @click.option('-d', '--debug', type=click.Choice(option_debug_choice), default=option_debug_default, help=option_debug_help) @click.version_option(get_version_long(), '-V', '--version') def bookblock(source_dir, target_dir, source_file_format, target_file_format, pages, geometry, image_mode, view_mode, examples, debug): """Cut out pages from book scans. """ # Resetting `sys.argv': # # The bookblock command line options disturb Kivy: # See file site-packages/kivy/__init__.py : # # try: # opts, args = getopt(sys_argv[1:], 'hp:fkawFem:sr:dc:', [ # 'help', 'fullscreen', 'windowed', 'fps', 'event', # 'module=', 'save', 'fake-fullscreen', 'auto-fullscreen', # 'multiprocessing-fork', 'display=', 'size=', 'rotate=', # 'config=', 'debug', 'dpi=']) # # except GetoptError as err: # Logger.error('Core: %s' % str(err)) # kivy_usage() # # Example: the option `--source-dir <dir>' causes the following error: # # Core: option --source-dir not recognized # # Therefore only options relevant for Kivy should be # contained in sys.argv when starting to deal with Kivy code: sys.argv = [ sys.argv[1] ] if debug in ['trace', 'debug', 'info']: print("DEBUG bookblock:") print("") print(" - source_dir: {}".format(source_dir)) print(" - target_dir: {}".format(target_dir)) print(" - source_file_format: {}".format(source_file_format)) print(" - target_file_format: {}".format(target_file_format)) print(" - pages: {}".format(pages)) print(" - geometry: {}".format(geometry)) print(" - image_mode: {}".format(image_mode)) print(" - view_mode: {}".format(view_mode)) print(" - examples: {}".format(examples)) print(" - debug: {}".format(debug)) # Show examples? if examples: print_examples() exit() # Settings settings = Settings() \ .set_debug_level(debug) \ .set_image_mode(image_mode) \ .set_view_mode(view_mode) \ .set_source_dir(source_dir) \ .set_target_dir(target_dir) \ .set_source_file_format(source_file_format) \ .set_target_file_format(target_file_format) \ .set_geometry(geometry) \ .set_pages(pages) # Print settings settings.print_settings() # Hack to silently import Kivy's noisy logger: # The logger prints all kind of messages before the log level can be set # and seems to ignore its config file log level settings as well # (Kivy's config is at ~/.kivy/config.ini) if not debug in ['trace', 'debug', 'info']: # Silence stderr orig_stderr = sys.stderr sys.stderr = open(os.devnull, "w") # Import Kivy's logger from kivy.logger import Logger, LOG_LEVELS # Set the log level Logger.setLevel(level=LOG_LEVELS.get(debug)) # Restore stdout sys.stderr = orig_stderr # Start the GUI # For some reason BookBlockApp cannot be imported before # as it seems to interfere with click from newskylabs.tools.bookblock.gui.main import BookBlockApp app = BookBlockApp(settings) app.run() # done :) print("") print("Bye :)") print("") exit() ## ========================================================= ## Main ## --------------------------------------------------------- def print_examples(): """Print examples.""" print(""" Examples: Generate color pages from the left and right side of scan 0 to 99: bookblock \\ --debug trace \\ --source-dir ~/home/tmp/the-secret-garden/png \\ --target-dir ~/home/tmp/pages \\ --source-file-format the-secret-garden.%02d.png \\ --target-file-format page%02d.png \\ --geometry 1000x1600+22+41 \\ --pages 0-99lr \\ --image-mode color \\ --view-mode scan Generate color pages from the left sides of scan 0 and 1 and both sides of the scans 2 to 56: bookblock \\ --debug info \\ --source-dir ~/home/tmp/the-secret-garden/png \\ --target-dir ~/home/tmp/pages \\ --source-file-format the-secret-garden.%02d.png \\ --target-file-format page%02d.png \\ --geometry 1000x1600+22+41 \\ --pages 0-1l,2-56lr \\ --image-mode color \\ --view-mode scan Generate color pages from the left sides of scan 0 and 1, the right sides of scan 2, 6 and 7, both sides of the scans 8 to 9 and both sides of the scans 45 to 46: bookblock \\ --debug warning \\ --source-dir ~/home/tmp/the-secret-garden/png \\ --target-dir ~/home/tmp/pages \\ --source-file-format the-secret-garden.%02d.png \\ --target-file-format page%02d.png \\ --geometry 1000x1600+22+41 \\ --pages 0-1l,2r,6r,7r,8-9lr,45-46lr \\ --image-mode color \\ --view-mode scan Generate grayscale pages from the left sides of scan 0 and 1, the right sides of scan 2, 6 and 7, both sides of the scans 8 to 46: bookblock \\ --source-dir ~/home/tmp/the-secret-garden/png \\ --target-dir ~/home/tmp/pages \\ --source-file-format the-secret-garden.%02d.png \\ --target-file-format page%02d.png \\ --geometry 1000x1600+22+41 \\ --pages 0-1l,2r,6r,7r,8-46lr \\ --image-mode grayscale \\ --view-mode scan """) ## ========================================================= ## ========================================================= ## fin.
python
#!/usr/bin/env vpython # Copyright 2020 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. #pylint: disable=protected-access import json import os import tempfile import unittest import mock from pyfakefs import fake_filesystem_unittest from skia_gold_common import skia_gold_properties from skia_gold_common import skia_gold_session from skia_gold_common import unittest_utils createSkiaGoldArgs = unittest_utils.createSkiaGoldArgs def assertArgWith(test, arg_list, arg, value): i = arg_list.index(arg) test.assertEqual(arg_list[i + 1], value) class SkiaGoldSessionRunComparisonTest(fake_filesystem_unittest.TestCase): """Tests the functionality of SkiaGoldSession.RunComparison.""" def setUp(self): self.setUpPyfakefs() self._working_dir = tempfile.mkdtemp() @mock.patch.object(skia_gold_session.SkiaGoldSession, 'Diff') @mock.patch.object(skia_gold_session.SkiaGoldSession, 'Compare') @mock.patch.object(skia_gold_session.SkiaGoldSession, 'Initialize') @mock.patch.object(skia_gold_session.SkiaGoldSession, 'Authenticate') def test_comparisonSuccess(self, auth_mock, init_mock, compare_mock, diff_mock): auth_mock.return_value = (0, None) init_mock.return_value = (0, None) compare_mock.return_value = (0, None) keys_file = os.path.join(self._working_dir, 'keys.json') with open(os.path.join(self._working_dir, 'keys.json'), 'w') as f: json.dump({}, f) session = skia_gold_session.SkiaGoldSession(self._working_dir, None, keys_file, None, None) status, _ = session.RunComparison(None, None, None) self.assertEqual(status, skia_gold_session.SkiaGoldSession.StatusCodes.SUCCESS) self.assertEqual(auth_mock.call_count, 1) self.assertEqual(init_mock.call_count, 1) self.assertEqual(compare_mock.call_count, 1) self.assertEqual(diff_mock.call_count, 0) @mock.patch.object(skia_gold_session.SkiaGoldSession, 'Diff') @mock.patch.object(skia_gold_session.SkiaGoldSession, 'Compare') @mock.patch.object(skia_gold_session.SkiaGoldSession, 'Initialize') @mock.patch.object(skia_gold_session.SkiaGoldSession, 'Authenticate') def test_authFailure(self, auth_mock, init_mock, compare_mock, diff_mock): auth_mock.return_value = (1, 'Auth failed') session = skia_gold_session.SkiaGoldSession(self._working_dir, None, None, None, None) status, error = session.RunComparison(None, None, None) self.assertEqual(status, skia_gold_session.SkiaGoldSession.StatusCodes.AUTH_FAILURE) self.assertEqual(error, 'Auth failed') self.assertEqual(auth_mock.call_count, 1) self.assertEqual(init_mock.call_count, 0) self.assertEqual(compare_mock.call_count, 0) self.assertEqual(diff_mock.call_count, 0) @mock.patch.object(skia_gold_session.SkiaGoldSession, 'Diff') @mock.patch.object(skia_gold_session.SkiaGoldSession, 'Compare') @mock.patch.object(skia_gold_session.SkiaGoldSession, 'Initialize') @mock.patch.object(skia_gold_session.SkiaGoldSession, 'Authenticate') def test_initFailure(self, auth_mock, init_mock, compare_mock, diff_mock): auth_mock.return_value = (0, None) init_mock.return_value = (1, 'Init failed') session = skia_gold_session.SkiaGoldSession(self._working_dir, None, None, None, None) status, error = session.RunComparison(None, None, None) self.assertEqual(status, skia_gold_session.SkiaGoldSession.StatusCodes.INIT_FAILURE) self.assertEqual(error, 'Init failed') self.assertEqual(auth_mock.call_count, 1) self.assertEqual(init_mock.call_count, 1) self.assertEqual(compare_mock.call_count, 0) self.assertEqual(diff_mock.call_count, 0) @mock.patch.object(skia_gold_session.SkiaGoldSession, 'Diff') @mock.patch.object(skia_gold_session.SkiaGoldSession, 'Compare') @mock.patch.object(skia_gold_session.SkiaGoldSession, 'Initialize') @mock.patch.object(skia_gold_session.SkiaGoldSession, 'Authenticate') def test_compareFailureRemote(self, auth_mock, init_mock, compare_mock, diff_mock): auth_mock.return_value = (0, None) init_mock.return_value = (0, None) compare_mock.return_value = (1, 'Compare failed') args = createSkiaGoldArgs(local_pixel_tests=False) sgp = skia_gold_properties.SkiaGoldProperties(args) keys_file = os.path.join(self._working_dir, 'keys.json') with open(os.path.join(self._working_dir, 'keys.json'), 'w') as f: json.dump({}, f) session = skia_gold_session.SkiaGoldSession(self._working_dir, sgp, keys_file, None, None) status, error = session.RunComparison(None, None, None) self.assertEqual( status, skia_gold_session.SkiaGoldSession.StatusCodes.COMPARISON_FAILURE_REMOTE) self.assertEqual(error, 'Compare failed') self.assertEqual(auth_mock.call_count, 1) self.assertEqual(init_mock.call_count, 1) self.assertEqual(compare_mock.call_count, 1) self.assertEqual(diff_mock.call_count, 0) @mock.patch.object(skia_gold_session.SkiaGoldSession, 'Diff') @mock.patch.object(skia_gold_session.SkiaGoldSession, 'Compare') @mock.patch.object(skia_gold_session.SkiaGoldSession, 'Initialize') @mock.patch.object(skia_gold_session.SkiaGoldSession, 'Authenticate') def test_compareFailureLocal(self, auth_mock, init_mock, compare_mock, diff_mock): auth_mock.return_value = (0, None) init_mock.return_value = (0, None) compare_mock.return_value = (1, 'Compare failed') diff_mock.return_value = (0, None) args = createSkiaGoldArgs(local_pixel_tests=True) sgp = skia_gold_properties.SkiaGoldProperties(args) keys_file = os.path.join(self._working_dir, 'keys.json') with open(os.path.join(self._working_dir, 'keys.json'), 'w') as f: json.dump({}, f) session = skia_gold_session.SkiaGoldSession(self._working_dir, sgp, keys_file, None, None) status, error = session.RunComparison(None, None, 'Definitely an output manager') self.assertEqual( status, skia_gold_session.SkiaGoldSession.StatusCodes.COMPARISON_FAILURE_LOCAL) self.assertEqual(error, 'Compare failed') self.assertEqual(auth_mock.call_count, 1) self.assertEqual(init_mock.call_count, 1) self.assertEqual(compare_mock.call_count, 1) self.assertEqual(diff_mock.call_count, 1) @mock.patch.object(skia_gold_session.SkiaGoldSession, 'Diff') @mock.patch.object(skia_gold_session.SkiaGoldSession, 'Compare') @mock.patch.object(skia_gold_session.SkiaGoldSession, 'Initialize') @mock.patch.object(skia_gold_session.SkiaGoldSession, 'Authenticate') def test_diffFailure(self, auth_mock, init_mock, compare_mock, diff_mock): auth_mock.return_value = (0, None) init_mock.return_value = (0, None) compare_mock.return_value = (1, 'Compare failed') diff_mock.return_value = (1, 'Diff failed') args = createSkiaGoldArgs(local_pixel_tests=True) sgp = skia_gold_properties.SkiaGoldProperties(args) keys_file = os.path.join(self._working_dir, 'keys.json') with open(os.path.join(self._working_dir, 'keys.json'), 'w') as f: json.dump({}, f) session = skia_gold_session.SkiaGoldSession(self._working_dir, sgp, keys_file, None, None) status, error = session.RunComparison(None, None, 'Definitely an output manager') self.assertEqual( status, skia_gold_session.SkiaGoldSession.StatusCodes.LOCAL_DIFF_FAILURE) self.assertEqual(error, 'Diff failed') self.assertEqual(auth_mock.call_count, 1) self.assertEqual(init_mock.call_count, 1) self.assertEqual(compare_mock.call_count, 1) self.assertEqual(diff_mock.call_count, 1) @mock.patch.object(skia_gold_session.SkiaGoldSession, 'Diff') @mock.patch.object(skia_gold_session.SkiaGoldSession, 'Compare') @mock.patch.object(skia_gold_session.SkiaGoldSession, 'Initialize') @mock.patch.object(skia_gold_session.SkiaGoldSession, 'Authenticate') def test_noOutputManagerLocal(self, auth_mock, init_mock, compare_mock, diff_mock): auth_mock.return_value = (0, None) init_mock.return_value = (0, None) compare_mock.return_value = (1, 'Compare failed') diff_mock.return_value = (0, None) args = createSkiaGoldArgs(local_pixel_tests=True) sgp = skia_gold_properties.SkiaGoldProperties(args) keys_file = os.path.join(self._working_dir, 'keys.json') with open(os.path.join(self._working_dir, 'keys.json'), 'w') as f: json.dump({}, f) session = skia_gold_session.SkiaGoldSession(self._working_dir, sgp, keys_file, None, None) status, error = session.RunComparison(None, None, None) self.assertEqual( status, skia_gold_session.SkiaGoldSession.StatusCodes.NO_OUTPUT_MANAGER) self.assertEqual(error, 'No output manager for local diff images') self.assertEqual(auth_mock.call_count, 1) self.assertEqual(compare_mock.call_count, 1) self.assertEqual(diff_mock.call_count, 0) class SkiaGoldSessionAuthenticateTest(fake_filesystem_unittest.TestCase): """Tests the functionality of SkiaGoldSession.Authenticate.""" def setUp(self): self.setUpPyfakefs() self._working_dir = tempfile.mkdtemp() @mock.patch.object(skia_gold_session.SkiaGoldSession, '_RunCmdForRcAndOutput') def test_commandOutputReturned(self, cmd_mock): cmd_mock.return_value = (1, 'Something bad :(') args = createSkiaGoldArgs(git_revision='a') sgp = skia_gold_properties.SkiaGoldProperties(args) session = skia_gold_session.SkiaGoldSession(self._working_dir, sgp, None, None, None) rc, stdout = session.Authenticate() self.assertEqual(cmd_mock.call_count, 1) self.assertEqual(rc, 1) self.assertEqual(stdout, 'Something bad :(') @mock.patch.object(skia_gold_session.SkiaGoldSession, '_RunCmdForRcAndOutput') def test_bypassSkiaGoldFunctionality(self, cmd_mock): cmd_mock.return_value = (None, None) args = createSkiaGoldArgs(git_revision='a', bypass_skia_gold_functionality=True) sgp = skia_gold_properties.SkiaGoldProperties(args) session = skia_gold_session.SkiaGoldSession(self._working_dir, sgp, None, None, None) rc, _ = session.Authenticate() self.assertEqual(rc, 0) cmd_mock.assert_not_called() @mock.patch.object(skia_gold_session.SkiaGoldSession, '_RunCmdForRcAndOutput') def test_shortCircuitAlreadyAuthenticated(self, cmd_mock): cmd_mock.return_value = (None, None) args = createSkiaGoldArgs(git_revision='a') sgp = skia_gold_properties.SkiaGoldProperties(args) session = skia_gold_session.SkiaGoldSession(self._working_dir, sgp, None, None, None) session._authenticated = True rc, _ = session.Authenticate() self.assertEqual(rc, 0) cmd_mock.assert_not_called() @mock.patch.object(skia_gold_session.SkiaGoldSession, '_RunCmdForRcAndOutput') def test_successSetsShortCircuit(self, cmd_mock): cmd_mock.return_value = (0, None) args = createSkiaGoldArgs(git_revision='a') sgp = skia_gold_properties.SkiaGoldProperties(args) session = skia_gold_session.SkiaGoldSession(self._working_dir, sgp, None, None, None) self.assertFalse(session._authenticated) rc, _ = session.Authenticate() self.assertEqual(rc, 0) self.assertTrue(session._authenticated) cmd_mock.assert_called_once() @mock.patch.object(skia_gold_session.SkiaGoldSession, '_RunCmdForRcAndOutput') def test_failureDoesNotSetShortCircuit(self, cmd_mock): cmd_mock.return_value = (1, None) args = createSkiaGoldArgs(git_revision='a') sgp = skia_gold_properties.SkiaGoldProperties(args) session = skia_gold_session.SkiaGoldSession(self._working_dir, sgp, None, None, None) self.assertFalse(session._authenticated) rc, _ = session.Authenticate() self.assertEqual(rc, 1) self.assertFalse(session._authenticated) cmd_mock.assert_called_once() @mock.patch.object(skia_gold_session.SkiaGoldSession, '_RunCmdForRcAndOutput') def test_commandWithUseLuciTrue(self, cmd_mock): cmd_mock.return_value = (None, None) args = createSkiaGoldArgs(git_revision='a') sgp = skia_gold_properties.SkiaGoldProperties(args) session = skia_gold_session.SkiaGoldSession(self._working_dir, sgp, None, None, None) session.Authenticate(use_luci=True) self.assertIn('--luci', cmd_mock.call_args[0][0]) @mock.patch.object(skia_gold_session.SkiaGoldSession, '_RunCmdForRcAndOutput') def test_commandWithUseLuciFalse(self, cmd_mock): cmd_mock.return_value = (None, None) args = createSkiaGoldArgs(git_revision='a', local_pixel_tests=True) sgp = skia_gold_properties.SkiaGoldProperties(args) session = skia_gold_session.SkiaGoldSession(self._working_dir, sgp, None, None, None) session.Authenticate(use_luci=False) self.assertNotIn('--luci', cmd_mock.call_args[0][0]) @mock.patch.object(skia_gold_session.SkiaGoldSession, '_RunCmdForRcAndOutput') def test_commandWithUseLuciFalseNotLocal(self, cmd_mock): cmd_mock.return_value = (None, None) args = createSkiaGoldArgs(git_revision='a', local_pixel_tests=False) sgp = skia_gold_properties.SkiaGoldProperties(args) session = skia_gold_session.SkiaGoldSession(self._working_dir, sgp, None, None, None) with self.assertRaises(RuntimeError): session.Authenticate(use_luci=False) @mock.patch.object(skia_gold_session.SkiaGoldSession, '_RunCmdForRcAndOutput') def test_commandCommonArgs(self, cmd_mock): cmd_mock.return_value = (None, None) args = createSkiaGoldArgs(git_revision='a') sgp = skia_gold_properties.SkiaGoldProperties(args) session = skia_gold_session.SkiaGoldSession(self._working_dir, sgp, None, None, None) session.Authenticate() call_args = cmd_mock.call_args[0][0] self.assertIn('auth', call_args) assertArgWith(self, call_args, '--work-dir', self._working_dir) class SkiaGoldSessionInitializeTest(fake_filesystem_unittest.TestCase): """Tests the functionality of SkiaGoldSession.Initialize.""" def setUp(self): self.setUpPyfakefs() self._working_dir = tempfile.mkdtemp() @mock.patch.object(skia_gold_session.SkiaGoldSession, '_RunCmdForRcAndOutput') def test_bypassSkiaGoldFunctionality(self, cmd_mock): cmd_mock.return_value = (None, None) args = createSkiaGoldArgs(git_revision='a', bypass_skia_gold_functionality=True) sgp = skia_gold_properties.SkiaGoldProperties(args) session = skia_gold_session.SkiaGoldSession(self._working_dir, sgp, None, None, None) rc, _ = session.Initialize() self.assertEqual(rc, 0) cmd_mock.assert_not_called() @mock.patch.object(skia_gold_session.SkiaGoldSession, '_RunCmdForRcAndOutput') def test_shortCircuitAlreadyInitialized(self, cmd_mock): cmd_mock.return_value = (None, None) args = createSkiaGoldArgs(git_revision='a') sgp = skia_gold_properties.SkiaGoldProperties(args) session = skia_gold_session.SkiaGoldSession(self._working_dir, sgp, None, None, None) session._initialized = True rc, _ = session.Initialize() self.assertEqual(rc, 0) cmd_mock.assert_not_called() @mock.patch.object(skia_gold_session.SkiaGoldSession, '_RunCmdForRcAndOutput') def test_successSetsShortCircuit(self, cmd_mock): cmd_mock.return_value = (0, None) args = createSkiaGoldArgs(git_revision='a') sgp = skia_gold_properties.SkiaGoldProperties(args) session = skia_gold_session.SkiaGoldSession(self._working_dir, sgp, None, None, None) self.assertFalse(session._initialized) rc, _ = session.Initialize() self.assertEqual(rc, 0) self.assertTrue(session._initialized) cmd_mock.assert_called_once() @mock.patch.object(skia_gold_session.SkiaGoldSession, '_RunCmdForRcAndOutput') def test_failureDoesNotSetShortCircuit(self, cmd_mock): cmd_mock.return_value = (1, None) args = createSkiaGoldArgs(git_revision='a') sgp = skia_gold_properties.SkiaGoldProperties(args) session = skia_gold_session.SkiaGoldSession(self._working_dir, sgp, None, None, None) self.assertFalse(session._initialized) rc, _ = session.Initialize() self.assertEqual(rc, 1) self.assertFalse(session._initialized) cmd_mock.assert_called_once() @mock.patch.object(skia_gold_session.SkiaGoldSession, '_RunCmdForRcAndOutput') def test_commandCommonArgs(self, cmd_mock): cmd_mock.return_value = (None, None) args = createSkiaGoldArgs(git_revision='a') sgp = skia_gold_properties.SkiaGoldProperties(args) session = skia_gold_session.SkiaGoldSession(self._working_dir, sgp, 'keys_file', 'corpus', instance='instance') session.Initialize() call_args = cmd_mock.call_args[0][0] self.assertIn('imgtest', call_args) self.assertIn('init', call_args) self.assertIn('--passfail', call_args) assertArgWith(self, call_args, '--instance', 'instance') assertArgWith(self, call_args, '--corpus', 'corpus') assertArgWith(self, call_args, '--keys-file', 'keys_file') assertArgWith(self, call_args, '--work-dir', self._working_dir) assertArgWith(self, call_args, '--failure-file', session._triage_link_file) assertArgWith(self, call_args, '--commit', 'a') @mock.patch.object(skia_gold_session.SkiaGoldSession, '_RunCmdForRcAndOutput') def test_commandTryjobArgs(self, cmd_mock): cmd_mock.return_value = (None, None) args = createSkiaGoldArgs(git_revision='a', gerrit_issue=1, gerrit_patchset=2, buildbucket_id=3) sgp = skia_gold_properties.SkiaGoldProperties(args) session = skia_gold_session.SkiaGoldSession(self._working_dir, sgp, None, None, None) session.Initialize() call_args = cmd_mock.call_args[0][0] assertArgWith(self, call_args, '--issue', '1') assertArgWith(self, call_args, '--patchset', '2') assertArgWith(self, call_args, '--jobid', '3') assertArgWith(self, call_args, '--crs', 'gerrit') assertArgWith(self, call_args, '--cis', 'buildbucket') @mock.patch.object(skia_gold_session.SkiaGoldSession, '_RunCmdForRcAndOutput') def test_commandTryjobArgsMissing(self, cmd_mock): cmd_mock.return_value = (None, None) args = createSkiaGoldArgs(git_revision='a') sgp = skia_gold_properties.SkiaGoldProperties(args) session = skia_gold_session.SkiaGoldSession(self._working_dir, sgp, None, None, None) session.Initialize() call_args = cmd_mock.call_args[0][0] self.assertNotIn('--issue', call_args) self.assertNotIn('--patchset', call_args) self.assertNotIn('--jobid', call_args) self.assertNotIn('--crs', call_args) self.assertNotIn('--cis', call_args) class SkiaGoldSessionCompareTest(fake_filesystem_unittest.TestCase): """Tests the functionality of SkiaGoldSession.Compare.""" def setUp(self): self.setUpPyfakefs() self._working_dir = tempfile.mkdtemp() @mock.patch.object(skia_gold_session.SkiaGoldSession, '_RunCmdForRcAndOutput') def test_commandOutputReturned(self, cmd_mock): cmd_mock.return_value = (1, 'Something bad :(') args = createSkiaGoldArgs(git_revision='a') sgp = skia_gold_properties.SkiaGoldProperties(args) session = skia_gold_session.SkiaGoldSession(self._working_dir, sgp, None, None, None) rc, stdout = session.Compare(None, None) self.assertEqual(cmd_mock.call_count, 1) self.assertEqual(rc, 1) self.assertEqual(stdout, 'Something bad :(') @mock.patch.object(skia_gold_session.SkiaGoldSession, '_RunCmdForRcAndOutput') def test_bypassSkiaGoldFunctionality(self, cmd_mock): cmd_mock.return_value = (None, None) args = createSkiaGoldArgs(git_revision='a', bypass_skia_gold_functionality=True) sgp = skia_gold_properties.SkiaGoldProperties(args) session = skia_gold_session.SkiaGoldSession(self._working_dir, sgp, None, None, None) rc, _ = session.Compare(None, None) self.assertEqual(rc, 0) cmd_mock.assert_not_called() @mock.patch.object(skia_gold_session.SkiaGoldSession, '_RunCmdForRcAndOutput') def test_commandWithLocalPixelTestsTrue(self, cmd_mock): cmd_mock.return_value = (None, None) args = createSkiaGoldArgs(git_revision='a', local_pixel_tests=True) sgp = skia_gold_properties.SkiaGoldProperties(args) session = skia_gold_session.SkiaGoldSession(self._working_dir, sgp, None, None, None) session.Compare(None, None) self.assertIn('--dryrun', cmd_mock.call_args[0][0]) @mock.patch.object(skia_gold_session.SkiaGoldSession, '_RunCmdForRcAndOutput') def test_commandWithLocalPixelTestsFalse(self, cmd_mock): cmd_mock.return_value = (None, None) args = createSkiaGoldArgs(git_revision='a', local_pixel_tests=False) sgp = skia_gold_properties.SkiaGoldProperties(args) session = skia_gold_session.SkiaGoldSession(self._working_dir, sgp, None, None, None) session.Compare(None, None) self.assertNotIn('--dryrun', cmd_mock.call_args[0][0]) @mock.patch.object(skia_gold_session.SkiaGoldSession, '_RunCmdForRcAndOutput') def test_commandCommonArgs(self, cmd_mock): cmd_mock.return_value = (None, None) args = createSkiaGoldArgs(git_revision='a') sgp = skia_gold_properties.SkiaGoldProperties(args) session = skia_gold_session.SkiaGoldSession(self._working_dir, sgp, 'keys_file', 'corpus', instance='instance') session.Compare('name', 'png_file') call_args = cmd_mock.call_args[0][0] self.assertIn('imgtest', call_args) self.assertIn('add', call_args) assertArgWith(self, call_args, '--test-name', 'name') assertArgWith(self, call_args, '--png-file', 'png_file') assertArgWith(self, call_args, '--work-dir', self._working_dir) @mock.patch.object(skia_gold_session.SkiaGoldSession, '_RunCmdForRcAndOutput') def test_noLinkOnSuccess(self, cmd_mock): cmd_mock.return_value = (0, None) args = createSkiaGoldArgs(git_revision='a') sgp = skia_gold_properties.SkiaGoldProperties(args) session = skia_gold_session.SkiaGoldSession(self._working_dir, sgp, 'keys_file', None, None) rc, _ = session.Compare('name', 'png_file') self.assertEqual(rc, 0) self.assertEqual(session._comparison_results['name'].triage_link, None) self.assertNotEqual( session._comparison_results['name'].triage_link_omission_reason, None) @mock.patch.object(skia_gold_session.SkiaGoldSession, '_RunCmdForRcAndOutput') def test_clLinkOnTrybot(self, cmd_mock): cmd_mock.return_value = (1, None) args = createSkiaGoldArgs(git_revision='a', gerrit_issue=1, gerrit_patchset=2, buildbucket_id=3) sgp = skia_gold_properties.SkiaGoldProperties(args) session = skia_gold_session.SkiaGoldSession(self._working_dir, sgp, 'keys_file', None, instance='instance') rc, _ = session.Compare('name', 'png_file') self.assertEqual(rc, 1) self.assertNotEqual(session._comparison_results['name'].triage_link, None) self.assertEqual(session._comparison_results['name'].triage_link, 'https://instance-gold.skia.org/cl/gerrit/1') self.assertEqual( session._comparison_results['name'].triage_link_omission_reason, None) @mock.patch.object(skia_gold_session.SkiaGoldSession, '_RunCmdForRcAndOutput') def test_individualLinkOnCi(self, cmd_mock): args = createSkiaGoldArgs(git_revision='a') sgp = skia_gold_properties.SkiaGoldProperties(args) session = skia_gold_session.SkiaGoldSession(self._working_dir, sgp, 'keys_file', None, None) def WriteTriageLinkFile(_): with open(session._triage_link_file, 'w') as f: f.write('foobar') return (1, None) cmd_mock.side_effect = WriteTriageLinkFile rc, _ = session.Compare('name', 'png_file') self.assertEqual(rc, 1) self.assertNotEqual(session._comparison_results['name'].triage_link, None) self.assertEqual(session._comparison_results['name'].triage_link, 'foobar') self.assertEqual( session._comparison_results['name'].triage_link_omission_reason, None) @mock.patch.object(skia_gold_session.SkiaGoldSession, '_RunCmdForRcAndOutput') def test_validOmissionOnIoError(self, cmd_mock): cmd_mock.return_value = (1, None) args = createSkiaGoldArgs(git_revision='a') sgp = skia_gold_properties.SkiaGoldProperties(args) session = skia_gold_session.SkiaGoldSession(self._working_dir, sgp, 'keys_file', None, None) def DeleteTriageLinkFile(_): os.remove(session._triage_link_file) return (1, None) cmd_mock.side_effect = DeleteTriageLinkFile rc, _ = session.Compare('name', 'png_file') self.assertEqual(rc, 1) self.assertEqual(session._comparison_results['name'].triage_link, None) self.assertNotEqual( session._comparison_results['name'].triage_link_omission_reason, None) self.assertIn( 'Failed to read', session._comparison_results['name'].triage_link_omission_reason) class SkiaGoldSessionDiffTest(fake_filesystem_unittest.TestCase): """Tests the functionality of SkiaGoldSession.Diff.""" def setUp(self): self.setUpPyfakefs() self._working_dir = tempfile.mkdtemp() @mock.patch.object(skia_gold_session.SkiaGoldSession, '_StoreDiffLinks') @mock.patch.object(skia_gold_session.SkiaGoldSession, '_RunCmdForRcAndOutput') def test_commandOutputReturned(self, cmd_mock, _): cmd_mock.return_value = (1, 'Something bad :(') args = createSkiaGoldArgs(git_revision='a', local_pixel_tests=False) sgp = skia_gold_properties.SkiaGoldProperties(args) session = skia_gold_session.SkiaGoldSession(self._working_dir, sgp, None, None, None) rc, stdout = session.Diff(None, None, None) self.assertEqual(cmd_mock.call_count, 1) self.assertEqual(rc, 1) self.assertEqual(stdout, 'Something bad :(') @mock.patch.object(skia_gold_session.SkiaGoldSession, '_RunCmdForRcAndOutput') def test_bypassSkiaGoldFunctionality(self, cmd_mock): cmd_mock.return_value = (None, None) args = createSkiaGoldArgs(git_revision='a', bypass_skia_gold_functionality=True) sgp = skia_gold_properties.SkiaGoldProperties(args) session = skia_gold_session.SkiaGoldSession(self._working_dir, sgp, None, None, None) with self.assertRaises(RuntimeError): session.Diff(None, None, None) class SkiaGoldSessionTriageLinkOmissionTest(fake_filesystem_unittest.TestCase): """Tests the functionality of SkiaGoldSession.GetTriageLinkOmissionReason.""" def setUp(self): self.setUpPyfakefs() self._working_dir = tempfile.mkdtemp() def _CreateSession(self): session = skia_gold_session.SkiaGoldSession(self._working_dir, None, None, None, None) session._comparison_results = { 'foo': skia_gold_session.SkiaGoldSession.ComparisonResults(), } return session def test_noComparison(self): session = self._CreateSession() session._comparison_results = {} reason = session.GetTriageLinkOmissionReason('foo') self.assertEqual(reason, 'No image comparison performed for foo') def test_validReason(self): session = self._CreateSession() session._comparison_results['foo'].triage_link_omission_reason = 'bar' reason = session.GetTriageLinkOmissionReason('foo') self.assertEqual(reason, 'bar') def test_onlyLocal(self): session = self._CreateSession() session._comparison_results['foo'].local_diff_given_image = 'bar' reason = session.GetTriageLinkOmissionReason('foo') self.assertEqual(reason, 'Gold only used to do a local image diff') def test_onlyWithoutTriageLink(self): session = self._CreateSession() session._comparison_results['foo'].triage_link = 'bar' with self.assertRaises(AssertionError): session.GetTriageLinkOmissionReason('foo') def test_resultsShouldNotExist(self): session = self._CreateSession() with self.assertRaises(RuntimeError): session.GetTriageLinkOmissionReason('foo') if __name__ == '__main__': unittest.main(verbosity=2)
python
from conans import ConanFile, CMake, tools import os import shutil class PhysfsConan(ConanFile): name = "physfs" version = "3.0.1" description = "Provides abstract access to various archives" topics = ("conan", "physfs", "physicsfs", "archive") url = "https://github.com/bincrafters/conan-physfs" homepage = "https://icculus.org/physfs/" license = "ZLIB" exports = "LICENSE.md" exports_sources = "CMakeLists.txt" generators = "cmake" settings = "os", "compiler", "build_type", "arch" options = {"shared": [True, False], "fPIC": [True, False]} default_options = {"shared": False, "fPIC": True} _source_subfolder = "source_subfolder" _build_subfolder = "build_subfolder" def config_options(self): del self.settings.compiler.libcxx del self.settings.compiler.cppstd if self.settings.os == 'Windows': del self.options.fPIC def source(self): folder = "{}-{}".format(self.name, self.version) tools.get("https://icculus.org/physfs/downloads/{}.tar.bz2".format(folder)) os.rename(folder, self._source_subfolder) def build(self): cmake = CMake(self) cmake.definitions["PHYSFS_BUILD_TEST"] = False cmake.configure(build_folder=self._build_subfolder) cmake.build() def package(self): self.copy("LICENSE.txt", dst="licenses", src=self._source_subfolder) self.copy("physfs.h", dst="include", src=os.path.join(self._source_subfolder, "src")) if self.options.shared: self.copy("*.dll", dst="bin", keep_path=False) self.copy("*.lib", dst="lib", keep_path=False, excludes="*-static.lib") self.copy("*.so*", dst="lib", keep_path=False, symlinks=True) self.copy("*.dylib", dst="lib", keep_path=False, symlinks=True) if self.settings.os == "Windows" and self.settings.compiler == "gcc": self.copy("*.a", dst="lib", keep_path=False, symlinks=True) else: self.copy("*-static.lib", dst="lib", keep_path=False) self.copy("*.a", dst="lib", keep_path=False) self.copy("*.pdb", dst="lib", keep_path=False) if self.settings.os == "Windows" and self.settings.compiler == "gcc": with tools.chdir(os.path.join(self.package_folder, "lib")): if os.path.isfile("objects.a"): shutil.move("objects.a", "libobjects.a") def package_info(self): self.cpp_info.libs = tools.collect_libs(self) if self.settings.os == "Macos": self.cpp_info.exelinkflags.extend(["-framework IOKit", "-framework Foundation"]) self.cpp_info.sharedlinkflags = self.cpp_info.exelinkflags
python
import os.path import random def gerar_id(): return "AZ" + str(random.randrange(1, 1000)) def designar_arq(): return gerar_id() + ".txt" def formulario(fich): fich.write("Id: " + gerar_id() + "\n") fich.write("Nome: " + input("Nome: ").capitalize() + "\n") fich.write("Perfil: Docente\n") fich.write(input("Nome usuario: ") + "\n") fich.write(input("Palavra-passe: ") + "\n") fich.close() path = "professor.txt" def fill_up_list(): users = [] with open(path) as arquivo: for user in arquivo: users.append(user.strip()) return users def registar_prof(registar): if registar: arq = open(path, "w") formulario(arq) else: if fill_up_list() is None: fich = open(path, "w") formulario(fich) else: fich = open(path, "a") formulario(fich) def login(username, password): found = False for i in range(len(fill_up_list())): if username and password in fill_up_list(): found = True if found: return True else: return False def registar_est(): codigo_est = int(input("Codigo do estudade: ")) est = open(codigo_est+".txt","w") est.write("Codigo: " + str(codigo_est)+"\n") est.write("Nome do estudade: " + input("Nome do estudante")) est.write("Curso: "+ input("Curso")) est.close() def pesquisar_est(codigo_est): est = [] if os.path.exists(str(codigo_est) + ".txt"): with open(str(codigo_est) + ".txt") as arquivo: for estudante in arquivo: est.append(estudante.strip()) return est def actualizar_est(codigo_est): found = False for i in range(len(pesquisar_est(codigo_est))): if "Codigo: "+codigo_est in pesquisar_est(codigo_est): found = True if found: print(pesquisar_est(codigo_est)[0]) print() def main(): if not os.path.exists(path): registar_prof(registar=True) elif login(input("Nome de usuario: "), input("Palavra-passe: ")): print("CONSEGUIU") else: print("Nome de usuario ou palavra-passe incorrecto") main()
python
#!/usr/bin/env python3 """ ############################################################################# common resources for multiple scripts ############################################################################# Sylvain @ GIS / Biopolis / Singapore Sylvain RIONDET <[email protected]> PLoT-ME: Pre-classification of Long-reads for Memory Efficient Taxonomic assignment https://github.com/sylvain-ri/PLoT-ME ############################################################################# """ import argparse from datetime import datetime import logging from multiprocessing import cpu_count # from multiprocessing.pool import Pool import numpy as np import os import os.path as osp import pandas as pd from pathlib import Path import shutil import subprocess from tqdm import tqdm from plot_me import LOGS # ############################################################################# # https://docs.python.org/3/howto/logging-cookbook.html def init_logger(logger_name='reads_binning', verbose=True): # create formatter for the handlers formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') # create file handler which logs even debug messages fh = logging.FileHandler(LOGS) fh.setLevel(logging.DEBUG) fh.setFormatter(formatter) # create console handler with a higher log level ch = logging.StreamHandler() ch.setLevel(logging.INFO if verbose else logging.DEBUG) ch.setFormatter(formatter) # create logger with parse_DB.py and add the handlers to the logger new_logger = logging.getLogger(logger_name) new_logger.setLevel(logging.DEBUG) new_logger.addHandler(fh) new_logger.addHandler(ch) return new_logger logger = init_logger('tools') # ############################################################################# # File directory checking def is_valid_directory(x): if osp.isdir(x): return x else: reply = input(f'Folder not found, would like to create it ? y/[n] \n{x}') if 'y' in reply.lower(): os.makedirs(x) else: logger.error('directory does not exist and has not been created ' + x) raise NotADirectoryError(f'The path is not a folder : {x}') return x def is_valid_file(x): if osp.isfile(x): return x else: logger.error('file does not exist ' + x) raise FileNotFoundError(f'The path is not a file : {x}') def create_path(path): """ Create the intermediate folders if not existing. """ # consider that it's a file if the string after the "." is shorter than 4 character folder = osp.dirname(path) if "." in osp.basename(path) and len(osp.splitext(osp.basename(path))[1]) <= 4 else path if not osp.isdir(folder): logger.log(5, f"created folder {folder}") os.makedirs(folder, exist_ok=True) def delete_folder_if_exists(path_dir): if osp.isdir(path_dir): logger.warning(f"Folder exists, DELETE IT ? (need to delete to redo a clean install): {path_dir}") user_in = input("y/[n]").lower() logger.debug(f"user entered: {user_in}") if 'y' in user_in: shutil.rmtree(path_dir) def folder_today(path): s_today = f"{datetime.today()}" final_path = osp.join(path, s_today) if not osp.isdir(final_path): os.makedirs(final_path) return final_path def f_size(path_or_size): """ If supplied a string, try to get the file size (otherwise size can be directly feed), then format the file size with MB/GB/TB and return it as a string """ if isinstance(path_or_size, str): assert osp.isfile(path_or_size), FileNotFoundError(f"checking for file size, but file not found: {path_or_size}") size = osp.getsize(path_or_size) elif isinstance(path_or_size, (int, float)): assert path_or_size >= 0, ValueError(f"this function doesn't work with non positive value: {path_or_size}. supposed to be a file size") size = path_or_size else: raise NotImplementedError(f"Received neither a path (string) nor a number: {path_or_size}, can't return a file size") for threshold in f_size.splits.keys(): if size > threshold: return f"{size/threshold:.2f} {f_size.splits[threshold]}" elif size == 0: return "0 B" raise f_size.splits = { 10**12: "TB", 10**9 : "GB", 10**6 : "MB", 10**3 : "kB", 1 : "B", } def bash_process(cmd, msg=""): """ execute a bash command (list of string), redirect stream into logger encoding=utf-8 to have text stream (somehow text=True not accepted by PyCharm), redirecting all stream to the Pipe, shell on for commands with bash syntax like wild cards """ # https://docs.python.org/3/library/subprocess.html#subprocess.Popen if isinstance(cmd, str): shell = True else: shell = False assert isinstance(cmd, (list, tuple)), \ TypeError(f"the input should be a list or tuple, but got type:{type(cmd)}, {cmd}") logger.info((msg if msg != "" else "launching bash command") + ": " + (cmd.split()[0] if shell else cmd[0])) logger.debug(cmd if shell else " ".join(cmd)) # Combine stdout and stderr into the same stream, both as text (non binary) proc = subprocess.Popen(cmd, shell=shell, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding="utf-8") for line in iter(proc.stdout.readline, ''): logger.debug(line.replace("\n", "")) # Check that the process ended successfully proc.wait(60*60*24) # wait 24 hours max if proc.returncode == 123: logger.warning(f"Process {proc.pid} exited with exit status {proc.returncode}") elif proc.returncode != 0: logger.warning(f"Process {proc.pid} exited with exit status {proc.returncode}") raise ChildProcessError(f"see log file, bash command raised errors: " + cmd if isinstance(cmd, str) else " ".join(cmd)) def div_z(n, d): return n / d if d else 0 def time_to_hms(start, end, fstring=True, short=False): assert start <= end, ArithmeticError(f"The start time is later than the end time: {start} > {end}") delay = int(end - start) m, s = divmod(delay, 60) h, m = divmod(m, 60) if short: return f"{h:d}:{m:02d}:{s:02d}" elif fstring: return f"{h:d} hours, {m:02d} minutes, {s:02d} seconds" else: return h, m, s class ArgumentParserWithDefaults(argparse.ArgumentParser): """ Customized Argparser to get both formatted docstring and defaults arguments https://stackoverflow.com/a/52025430/4767645 """ def add_argument(self, *args, help=None, default=None, choices=None, **kwargs): if help is not None: kwargs['help'] = help if default not in (None, '') and args[0] != '-h': kwargs['default'] = default if help is not None: if default in (None, ''): pass # No default value to add if isinstance(default, list) or isinstance(default, tuple): formatted = " ".join(default) kwargs['help'] += f' ({type(default).__name__} - default: "{formatted})"' else: kwargs['help'] += f' ({type(default).__name__} - default: {default} )' if choices not in (None, [], ()) and args[0] != '-h': kwargs['default'] = default if help is not None: kwargs['help'] += " (choices: " + ", ".join(choices) + ")" super().add_argument(*args, **kwargs) def pll_scaling(serie): serie = pd.to_numeric(serie, downcast='float') serie *= pll_scaling.ratio return serie pll_scaling.ratio = 0 def scale_df_by_length(data, kmer_cols, k, w, single_row=False, cores=cpu_count()): """ Divide the kmer counts by the length of the segments, and multiply by the number kmer choices""" divider = w - k + 1 ratio = 4**k / divider if divider > 1 else 4**k # avoid divide by 0 ratio = np.float32(ratio) if single_row: return data * ratio else: logger.info(f"Scaling the dataframe {data.shape}, converting to float32") logger.debug(f"{data}") pll_scaling.ratio = ratio # Mono thread version (extremely slow for some reasons) for col in tqdm(kmer_cols): data[col] *= ratio # with Pool(cores) as pool: # results = list(tqdm(pool.imap(pll_scaling, (data.loc[:, col] for col in kmer_cols)), # total=len(kmer_cols), desc="scaling each Series")) # # much faster, but let's see if there an even faster assignment # # todo: build a new DataFrame from scratch ? # for i, col in tqdm(enumerate(kmer_cols), total=len(kmer_cols), desc="Assigning results back to DataFrame"): # data.assign[col] = results[i] # data[col] = results[i] logger.debug(f"{data}") # data.loc[:, col] = pd.to_numeric(data.loc[:, col], downcast='float') class ScanFolder: """ Set class attributes, root & target folder, extensions to find and create tqdm scan the folder and create abs, rel, target path """ obj_id = 0 folder_root = "" folder_target = "" count_files = None ext_find = () ext_check = "" ext_create = "" skip_folders = () def __init__(self, path): ScanFolder.obj_id += 1 self.logger = logging.getLogger('tools.ScanFolder') self.path_abs = os.path.abspath(path) self.path_rel = osp.relpath(self.path_abs, self.folder_root) self.base = osp.splitext(osp.split(self.path_abs)[1])[0] @property def path_check(self): """ Check if a file in the same folder, but different extension, is also in the same folder """ assert self.ext_check != "", logger.error(f"No file extension provided to check files " f"(define with ScanFolder.ext_check") return osp.splitext(self.path_abs)[0] + self.ext_check @property def path_target(self): if ScanFolder.folder_root == "": self.logger.warning("no root folder, set it with ScanFolder.folder_root = <path>") return "" elif ScanFolder.ext_create == "": self.logger.warning("no extension specified for the target file name") return "" else: path_to_target = osp.join(ScanFolder.folder_target, self.path_rel) res = osp.splitext(path_to_target)[0] + ScanFolder.ext_create create_path(res) return res def file_matches_ext(self): """ does the folder contains the file we are looking for (=with these extensions) """ return self.path_rel.lower().endswith(self.ext_find) def file_complies(self, log=True): """ Find files with the extension to find, check if related file (check) """ if not self.file_matches_ext(): return False if self.ext_check != "" and not osp.isfile(self.path_check): self.logger.warning(f"Related file with extension {self.ext_check} not found in root directory for {self}") return False if log: self.logger.log(5, f"file complies {self}") return True @classmethod def set_folder_scan_options(cls, scanning="", target="", ext_find=(), ext_check="", ext_create="", skip_folders=()): """ Set the options to scan a folder, filter files to find, files to check, and create the target path """ assert osp.isdir(scanning), logger.error(f"the provided path to scan is not a directory {scanning}") assert target == "" or osp.isdir(target), logger.error(f"the provided path as target is not a directory {target}") cls.folder_root = scanning cls.folder_target = target cls.ext_find = ext_find cls.ext_check = ext_check cls.ext_create = ext_create cls.skip_folders = skip_folders @classmethod def tqdm_scan(cls, folder="", with_tqdm=True): """ replicated os.walk, with total file count, for a folder (default root folder) yields a ScanFolder object """ if folder != "": cls.folder_root = folder assert osp.isdir(cls.folder_root), logger.error(f"the provided path to scan is not a directory {cls.folder_root}") n = 0 if with_tqdm: if cls.count_files is None: cls.count_root_files() logger.info(f"Yielding the {cls.count_files} files found in folder {cls.folder_root}") for obj in tqdm(cls.walk_dir(log=False), total=cls.count_files): n += 1 yield obj else: for obj in cls.walk_dir(log=False): n += 1 yield obj logger.debug(f"{n} have been processed") @classmethod def walk_dir(cls, log=True): """ Walk through every files in a directory (default root folder) and yield FileInDir """ for dir_path, dirs, files in os.walk(cls.folder_root): # Skip folders rel_path = osp.relpath(dir_path, cls.folder_root) if any((name_to_skip in rel_path for name_to_skip in cls.skip_folders)): logger.debug(f"omitting folder {rel_path}") continue for filename in files: file = ScanFolder(os.path.join(dir_path, filename)) if file.file_complies(log): yield file @classmethod def count_root_files(cls): logger.debug(f"counting matching files in {cls.folder_root}") file_count = 0 for _ in tqdm(cls.walk_dir()): file_count += 1 cls.count_files = file_count return file_count def __repr__(self): return self.path_abs # ############################################################################# # Save for programming # logging.debug('This is a debug message') # logging.info('This is an info message') # logging.warning('This is a warning message') # logging.error('This is an error message') # logging.critical('This is a critical message')
python
from typing import Generic, Iterator, Optional, Type, TypeVar from fastapi.encoders import jsonable_encoder from mongoengine import DoesNotExist from pydantic import BaseModel from app.models.base import BaseModel as BaseDBModel ModelType = TypeVar("ModelType", bound=BaseDBModel) CreateSchemaType = TypeVar("CreateSchemaType", bound=BaseModel) UpdateSchemaType = TypeVar("UpdateSchemaType", bound=BaseModel) class CRUDBase(Generic[ModelType, CreateSchemaType, UpdateSchemaType]): def __init__(self, model: Type[ModelType]): """ CRUD object with default methods to Create, Read, Update, Delete (CRUD). **Parameters** * `model`: A Mongodb Document model class * `schema`: A Pydantic model (schema) class """ self.model = model def get(self, id: str) -> Optional[ModelType]: try: return self.model.objects(id=id).get() except DoesNotExist: return None def get_multi(self, *, skip=0, limit=100) -> Iterator[ModelType]: return iter(self.model.objects[skip:limit]) def create(self, *, obj_in: CreateSchemaType) -> ModelType: obj_in_data = jsonable_encoder(obj_in) db_obj = self.model(**obj_in_data).save() return db_obj def update(self, *, db_obj: ModelType, obj_in: UpdateSchemaType) -> ModelType: obj_data = db_obj.to_mongo() update_data = obj_in.dict(exclude_unset=True) for field in obj_data: if field in update_data: setattr(db_obj, field, update_data[field]) db_obj.save() return db_obj def remove(self, *, id_: str) -> ModelType: obj = self.get(id_) if obj: obj.delete() return obj
python
import datetime import os import uuid from django.core.urlresolvers import reverse from django.db import models from django.db.models import Q from django.utils.translation import ugettext_lazy as _ from django.contrib.auth.models import User import reversion from markitup.fields import MarkupField from model_utils.managers import InheritanceManager from symposion.conference.models import Section class ProposalSection(models.Model): """ configuration of proposal submissions for a specific Section. a section is available for proposals iff: * it is after start (if there is one) and * it is before end (if there is one) and * closed is NULL or False """ section = models.OneToOneField(Section) start = models.DateTimeField(null=True, blank=True) end = models.DateTimeField(null=True, blank=True) closed = models.NullBooleanField() published = models.NullBooleanField() @classmethod def available(cls): now = datetime.datetime.now() return cls._default_manager.filter( Q(start__lt=now) | Q(start=None), Q(end__gt=now) | Q(end=None), Q(closed=False) | Q(closed=None), ) def is_available(self): if self.closed: return False now = datetime.datetime.now() if self.start and self.start > now: return False if self.end and self.end < now: return False return True def __unicode__(self): return self.section.name class ProposalKind(models.Model): """ e.g. talk vs panel vs tutorial vs poster Note that if you have different deadlines, reviewers, etc. you'll want to distinguish the section as well as the kind. """ section = models.ForeignKey(Section, related_name="proposal_kinds") name = models.CharField(_("Name"), max_length=100) slug = models.SlugField() def __unicode__(self): return self.name class ProposalBase(models.Model): objects = InheritanceManager() kind = models.ForeignKey(ProposalKind) title = models.CharField(max_length=100) description = models.TextField( _("Brief Outline"), max_length=400, # @@@ need to enforce 400 in UI help_text="If your talk is accepted this will be made public and printed in the program. Should be one paragraph, maximum 400 characters." ) abstract = MarkupField( _("Detailed Abstract"), help_text=_("Detailed description and outline. Will be made public if your talk is accepted. Edit using <a href='http://daringfireball.net/projects/markdown/basics' target='_blank'>Markdown</a>.") ) additional_notes = MarkupField( blank=True, help_text=_("Anything else you'd like the program committee to know when making their selection: your past speaking experience, open source community experience, etc. Edit using <a href='http://daringfireball.net/projects/markdown/basics' target='_blank'>Markdown</a>.") ) submitted = models.DateTimeField( default=datetime.datetime.now, editable=False, ) speaker = models.ForeignKey("speakers.Speaker", related_name="proposals") additional_speakers = models.ManyToManyField("speakers.Speaker", through="AdditionalSpeaker", blank=True) cancelled = models.BooleanField(default=False) def can_edit(self): return True @property def section(self): return self.kind.section @property def speaker_email(self): return self.speaker.email @property def number(self): return str(self.pk).zfill(3) def speakers(self): yield self.speaker for speaker in self.additional_speakers.exclude(additionalspeaker__status=AdditionalSpeaker.SPEAKING_STATUS_DECLINED): yield speaker def notification_email_context(self): return { "title": self.title, "speaker": self.speaker.name, "kind": self.kind.name, } reversion.register(ProposalBase) class AdditionalSpeaker(models.Model): SPEAKING_STATUS_PENDING = 1 SPEAKING_STATUS_ACCEPTED = 2 SPEAKING_STATUS_DECLINED = 3 SPEAKING_STATUS = [ (SPEAKING_STATUS_PENDING, _("Pending")), (SPEAKING_STATUS_ACCEPTED, _("Accepted")), (SPEAKING_STATUS_DECLINED, _("Declined")), ] speaker = models.ForeignKey("speakers.Speaker") proposalbase = models.ForeignKey(ProposalBase) status = models.IntegerField(choices=SPEAKING_STATUS, default=SPEAKING_STATUS_PENDING) class Meta: db_table = "proposals_proposalbase_additional_speakers" unique_together = ("speaker", "proposalbase") def uuid_filename(instance, filename): ext = filename.split(".")[-1] filename = "%s.%s" % (uuid.uuid4(), ext) return os.path.join("document", filename) class SupportingDocument(models.Model): proposal = models.ForeignKey(ProposalBase, related_name="supporting_documents") uploaded_by = models.ForeignKey(User) created_at = models.DateTimeField(default=datetime.datetime.now) file = models.FileField(upload_to=uuid_filename) description = models.CharField(max_length=140) def download_url(self): return reverse("proposal_document_download", args=[self.pk, os.path.basename(self.file.name).lower()])
python
#!/usr/bin/env python # Bird Feeder - Feed Birds & Capture Images! # Copyright (C) 2020 redlogo # # This program is under MIT license import socket from imutils.video import VideoStream class RPiCamera: """ This is a class to get video stream from RPi. """ __slots__ = 'width', 'height', 'name', 'camera' def __init__(self, width, height): # image info self.width = width self.height = height # RPi's name self.name = socket.gethostname() # RPi's video stream class self.camera = VideoStream(usePiCamera=True, resolution=(width, height)) def start(self): """ Start streaming. :return: nothing """ self.camera.start() def get_image(self): """ Get individual image (frame) from streaming source. :return: An individual image """ return self.camera.read()
python
class Vehiculo(object): def __init__(self, name, date_of_release, passengers, number_of_wheels, terrain, type_of_vehicle): self.name = name self.date_of_release = date_of_release self.passengers = passengers self.number_of_wheels = number_of_wheels self.terrain = terrain self.type_of_vehicle = type_of_vehicle def specs(self): print(f"{self.name} salió en la fecha: {self.date_of_release}, tiene el número de llantas: {self.number_of_wheels} y es para el terreno tipo: {self.terrain}") print('Numero de pasajeros = ' + str(self.passengers)) def move_vehicle(self, km): print(f"{self.name} se ha movido {km} unidades") def stop_vehicle(self): print(self.name +' se ha detenido') class Camion(Vehiculo): numero_actual_de_pasajeros = 0 cuenta = 0.000 cobro = 0.000 def __init__(self, name, date_of_release, passengers, number_of_wheels, terrain, type_of_vehicle,): super().__init__(name, date_of_release, passengers, number_of_wheels, terrain, type_of_vehicle) def sube_pasajero (self, numero_de_pasajeros_que_subieron): numero_de_pasajeros_que_subieron = int(numero_de_pasajeros_que_subieron) caja = float(numero_de_pasajeros_que_subieron) * self.cobro self.cuenta = self.cuenta + caja self.numero_actual_de_pasajeros = self.numero_actual_de_pasajeros + numero_de_pasajeros_que_subieron print(f"Se han subido {numero_de_pasajeros_que_subieron}, se ha ingresado: {caja}") def baja_pasajero(self, numero_de_pasajeros_que_bajaron): self.numero_actual_de_pasajeros = self.numero_actual_de_pasajeros - numero_de_pasajeros_que_bajaron print(f"Se han bajado {numero_de_pasajeros_que_bajaron}") def camion_status(self): print(f"Hay {self.numero_actual_de_pasajeros} actualmente en el camión y hay {self.cuenta} en la cuenta") def set_cuota(self,cta): cta = float(cta) self.cobro = cta class Jet(Vehiculo): def __init__(self, name, date_of_release, passengers, number_of_wheels, terrain, type_of_vehicle, mg_ammo, missile_ammo): self.mg_ammo = mg_ammo self.missile_ammo = missile_ammo super().__init__(name, date_of_release, passengers, number_of_wheels, terrain, type_of_vehicle) def shoot_mg(self,shoots): if shoots > self.mg_ammo: data1 = shoots - self.mg_ammo print(f"No se pueden hacer la cantidad de disparos solicitados, faltan {data1} unidades de munición") else: self.mg_ammo = self.mg_ammo - shoots print(f"Se ha disparado satisfactoriamente, se usaron {shoots} unidades de munición") def shoot_missiles(self,shoots): if shoots > self.missile_ammo: data1 = shoots - self.missile_ammo print(f"No se pueden hacer la cantidad de disparos solicitados, faltan {data1} misiles") else: self.missile_ammo = self.missile_ammo - shoots print(f"Se ha disparado satisfactoriamente, se usaron {shoots} misiles") def jet_status(self): print(f"Munición de MG = {self.mg_ammo}") print(f"Munición de Misiles = {self.missile_ammo}") def send_message_to_base(self, mess): print(f"Tu mensaje :\n{mess}\nha sido recibido por nuestra base") camion_uno = Camion('Mercedes-Benz O371', 'Decada de 1980', 28, 4, 'Urbano', 'Transporte publico') jet_uno = Jet('F-22 Raptor', 'Año 2003', 1, 3, 'Aereo', 'Combate', 5000, 15) camion_uno.specs() camion_uno.move_vehicle(12) camion_uno.stop_vehicle() camion_uno.set_cuota(11.5) camion_uno.sube_pasajero(5) camion_uno.baja_pasajero(2) camion_uno.camion_status() print('\n') jet_uno.specs() jet_uno.move_vehicle(1000) jet_uno.stop_vehicle() jet_uno.shoot_mg(1000) jet_uno.shoot_missiles(7) jet_uno.jet_status() jet_uno.send_message_to_base('Acabo de cometer un crimen de guerra')
python
"""exceptions.py: Custom exceptions used by Miscreant""" class CryptoError(Exception): """Parent of all cryptography-related errors""" class IntegrityError(CryptoError): """Ciphertext failed to verify as authentic""" class OverflowError(Exception): """Integer value overflowed""" class FinishedError(Exception): """STREAM is already finished"""
python
import turtle turtle.setup(500,600) turtle.penup() turtle.hideturtle() # CREATE NAMED CONSTANTS FOR THE STARS LEFT_SHOULDER_X = -70 LEFT_SHOULDER_Y = 200 RIGHT_SHOULDER_X = 80 RIGHT_SHOULDER_Y = 180 LEFT_BELTSTAR_X = -40 LEFT_BELTSTAR_Y = -20 MIDDLE_BELTSTAR_X = 0 MIDDLE_BELTSTAR_Y = 0 RIGHT_BELTSTAR_X = 40 RIGHT_BELTSTAR_Y = 20 LEFT_KNEE_X = -90 LEFT_KNEE_Y = -180 RIGHT_KNEE_X = 120 RIGHT_KNEE_Y = -140 # DRAW THE STARS turtle.goto(LEFT_SHOULDER_X, LEFT_SHOULDER_Y) turtle.dot() turtle.goto(RIGHT_SHOULDER_X, RIGHT_SHOULDER_Y) turtle.dot() turtle.goto(LEFT_BELTSTAR_X, LEFT_BELTSTAR_Y) turtle.dot() turtle.goto(MIDDLE_BELTSTAR_X, MIDDLE_BELTSTAR_Y) turtle.dot() turtle.goto(RIGHT_BELTSTAR_X, RIGHT_BELTSTAR_Y) turtle.dot() turtle.goto(LEFT_KNEE_X,LEFT_KNEE_Y) turtle.dot() turtle.goto(RIGHT_KNEE_X,RIGHT_KNEE_Y) turtle.dot() # DISPLAY THE STAR NAMES turtle.goto(LEFT_SHOULDER_X, LEFT_SHOULDER_Y) turtle.write('Betequese') turtle.goto(RIGHT_SHOULDER_X, RIGHT_SHOULDER_Y) turtle.write('Meissa') turtle.goto(LEFT_BELTSTAR_X, LEFT_BELTSTAR_Y) turtle.write('Alnitak') turtle.goto(MIDDLE_BELTSTAR_X, MIDDLE_BELTSTAR_Y) turtle.write('Alnilam') turtle.goto(RIGHT_BELTSTAR_X, RIGHT_BELTSTAR_Y) turtle.write('Mintaka') turtle.goto(LEFT_KNEE_X,LEFT_KNEE_Y) turtle.write('Saiph') turtle.goto(RIGHT_KNEE_X,RIGHT_KNEE_Y) turtle.write('Rigel') turtle.goto(LEFT_SHOULDER_X, LEFT_SHOULDER_Y) turtle.pendown() turtle.goto(LEFT_BELTSTAR_X, LEFT_BELTSTAR_Y) turtle.goto(LEFT_KNEE_X, LEFT_KNEE_Y) turtle.penup() turtle.goto(LEFT_BELTSTAR_X, LEFT_BELTSTAR_Y) turtle.pendown() turtle.goto(MIDDLE_BELTSTAR_X,MIDDLE_BELTSTAR_Y) turtle.goto(RIGHT_BELTSTAR_X,RIGHT_BELTSTAR_Y) turtle.goto(RIGHT_SHOULDER_X,RIGHT_SHOULDER_Y) turtle.penup() turtle.goto(RIGHT_BELTSTAR_X,RIGHT_BELTSTAR_Y) turtle.pendown() turtle.goto(RIGHT_KNEE_X,RIGHT_KNEE_Y) turtle.done()
python
import numpy as np import matplotlib.pyplot as plt import plotly.plotly as py from sys import argv #%matplotlib inline from tf_shuffle import shuffle def check_shuffle(deck): count = 0 for i in range(len(deck)-2): diff = deck[i+1] - deck[i] if (abs(deck[i+2] - deck[i+1]) == diff) and (abs(deck[i+1] - deck[i]) == diff): count += 1 else: count = count return count def recurse(deck): count = 0 for i in range(len(deck)-1): if deck[i] == deck[i+1]: count+=1 else: count = count return count D0 = np.array(range(0,0)) S0 = shuffle(D0) DT26 = list(range(0, 26)) DT52 = list(range(0, 52)) DT104 = list(range(0, 104)) deck_list = np.array([DT26, DT52, DT104]) n = len(deck_list) num_shuffles = 10 shuffle_deck_2 = np.zeros((num_shuffles+1, len(DT26))) shuffle_deck_3 = np.zeros((num_shuffles+1, len(DT52))) shuffle_deck_4 = np.zeros((num_shuffles+1, len(DT104))) shuffle_deck_2[0] = DT26 shuffle_deck_3[0] = DT52 shuffle_deck_4[0] = DT104 print("Let's consider where the original top and bottom cards of the unshuffled deck end up after %s shuffles." %(num_shuffles)) print() top_card_num_arr = np.zeros(n) bottom_card_num_arr = np.zeros(n) init_top_card_index = np.zeros(n) init_bottom_card_index = np.zeros(n) new_top_card_index = np.zeros(n) new_bottom_card_index = np.zeros(n) S2 = DT26 S3 = DT52 S4 = DT104 for i in range(1, num_shuffles): S2 = shuffle(S2).tolist() S3 = shuffle(S3).tolist() S4 = shuffle(S4).tolist() shuffle_deck_2[i] = S2 shuffle_deck_3[i] = S3 shuffle_deck_4[i] = S4 shuffled_deck_list = [S2, S3, S4] for i in range(n): top_card_num_arr[i] = deck_list[0][0] bottom_card_num_arr[i] = deck_list[i][-1] init_bottom_card_index[i] = len(deck_list[i]) - 1 new_top_card_index[i] = shuffled_deck_list[i].index(top_card_num_arr[i]) new_bottom_card_index[i] = shuffled_deck_list[i].index(bottom_card_num_arr[i]) print("The shuffled deck %s is: \n %s \n" %(i+1, shuffled_deck_list[i]) ) for i in range(len(deck_list)): print("%s cards: \n%s" %(len(deck_list[i]), shuffled_deck_list[i])) print() print("%s cards, initial index %s (top card) --> index %s" %(len(deck_list[i]), init_top_card_index[i], new_top_card_index[i])) print("Top card moved %s positions" %(new_top_card_index[i] - init_top_card_index[i])) print("%s cards, initial index %s (bottom card) --> index %s" %(len(deck_list[i]), init_bottom_card_index[i], new_bottom_card_index[i])) print("Bottom card moved %s positions" %(init_bottom_card_index[i] - new_bottom_card_index[i])) print() ### #Test Cases print("Let's look at whether there are still groups of consecutive cards.") print("We'll consider a consecutive group to be 3 ordered cards in a row.") print() print("Let's compare consecutive shuffles of 26 cards:") print() grps = np.zeros(num_shuffles) for row in range(num_shuffles): print("Shuffle %s: %s\n" %(row, shuffle_deck_2[row])) grps[row] = check_shuffle(shuffle_deck_2[row]) print("List of number of ordered sequences at each iteration: ", grps) plt.plot(grps) plt.show() print("Let's compare consecutive shuffles of 52 cards:") print() grps = np.zeros(num_shuffles) for row in range(num_shuffles): print("Shuffle %s: %s\n" %(row, shuffle_deck_3[row])) grps[row] = check_shuffle(shuffle_deck_3[row]) print("List of number of ordered sequences at each iteration: ", grps) print("And we can see that at around 7 shuffles, we stop seeing two number groupings as much as well.") plt.plot(grps) plt.show() print("Let's compare consecutive shuffles of 104 cards:") print() grps = np.zeros(num_shuffles) for row in range(num_shuffles): ## print("Shuffle %s: %s\n" %(row, shuffle_deck_4[row])) grps[row] = check_shuffle(shuffle_deck_4[row]) print("List of number of ordered sequences at each iteration: ", grps) plt.plot(grps) plt.show()
python
import argparse import simplePicStegoEmbed import simplePicStegoError import simplePicStegoReveal class UnknownFunctionError(simplePicStegoError.Error): """ Raise error when unknown commands are given """ def __init__(self, message): self.message = message; version = "1.0" def init_program(): parser = argparse.ArgumentParser(description="An app that embeds strings into images") # parser.add_argument("--version", action="version", version="%(prog)s %s" % version) parser.add_argument("-e", action="store", dest="encode_file", help="The file name to store the string", default=False) parser.add_argument("-m", action="store", dest="message", help="The message to store. Combine with -e", default=None) parser.add_argument("-d", action="store", dest="decode_file", help="The file to extract the message") results = parser.parse_args() if (results.encode_file and results.decode_file) or (not results.emcode_file and not results.decode_file): raise UnknownFunctionError("Must either encode or decode a file") elif results.encode_file: # create object to encode message into file and perform operation if results.encode_file.split(".")[1] != "png": raise simplePicStegoError.Error("Can only support png file right now") simplePicStegoEmbed.PicEmbed(results.encode_file, results.message).embed_message() elif results.decode_file: # create object to attempt to find a message within an image file if results.decode_file.split(".")[1] != "png": raise simplePicStegoError.Error("Can only support png file right now") message = simplePicStegoReveal.SimplePicStegoReveal(results.decode_file).reveal() print(message) def main(): init_program() if __name__ == '__main__': main()
python
import ast import csv from typing import Iterable from fastNLP import DataSet, Instance, Vocabulary from fastNLP.core.vocabulary import VocabularyOption from fastNLP.io import JsonLoader from fastNLP.io.base_loader import DataBundle,DataSetLoader from fastNLP.io.embed_loader import EmbeddingOption from fastNLP.io.file_reader import _read_json from typing import Union, Dict from reproduction.utils import check_dataloader_paths, get_tokenizer def clean_str(sentence, tokenizer, char_lower=False): """ heavily borrowed from github https://github.com/LukeZhuang/Hierarchical-Attention-Network/blob/master/yelp-preprocess.ipynb :param sentence: is a str :return: """ if char_lower: sentence = sentence.lower() import re nonalpnum = re.compile('[^0-9a-zA-Z?!\']+') words = tokenizer(sentence) words_collection = [] for word in words: if word in ['-lrb-', '-rrb-', '<sssss>', '-r', '-l', 'b-']: continue tt = nonalpnum.split(word) t = ''.join(tt) if t != '': words_collection.append(t) return words_collection class yelpLoader(DataSetLoader): """ 读取Yelp_full/Yelp_polarity数据集, DataSet包含fields: words: list(str), 需要分类的文本 target: str, 文本的标签 chars:list(str),未index的字符列表 数据集:yelp_full/yelp_polarity :param fine_grained: 是否使用SST-5标准,若 ``False`` , 使用SST-2。Default: ``False`` """ def __init__(self, fine_grained=False,lower=False): super(yelpLoader, self).__init__() tag_v = {'1.0': 'very negative', '2.0': 'negative', '3.0': 'neutral', '4.0': 'positive', '5.0': 'very positive'} if not fine_grained: tag_v['1.0'] = tag_v['2.0'] tag_v['5.0'] = tag_v['4.0'] self.fine_grained = fine_grained self.tag_v = tag_v self.lower = lower self.tokenizer = get_tokenizer() ''' 读取Yelp数据集, DataSet包含fields: review_id: str, 22 character unique review id user_id: str, 22 character unique user id business_id: str, 22 character business id useful: int, number of useful votes received funny: int, number of funny votes received cool: int, number of cool votes received date: str, date formatted YYYY-MM-DD words: list(str), 需要分类的文本 target: str, 文本的标签 数据来源: https://www.yelp.com/dataset/download def _load_json(self, path): ds = DataSet() for idx, d in _read_json(path, fields=self.fields_list, dropna=self.dropna): d = ast.literal_eval(d) d["words"] = d.pop("text").split() d["target"] = self.tag_v[str(d.pop("stars"))] ds.append(Instance(**d)) return ds def _load_yelp2015_broken(self,path): ds = DataSet() with open (path,encoding='ISO 8859-1') as f: row=f.readline() all_count=0 exp_count=0 while row: row=row.split("\t\t") all_count+=1 if len(row)>=3: words=row[-1].split() try: target=self.tag_v[str(row[-2])+".0"] ds.append(Instance(words=words, target=target)) except KeyError: exp_count+=1 else: exp_count+=1 row = f.readline() print("error sample count:",exp_count) print("all count:",all_count) return ds ''' def _load(self, path): ds = DataSet() csv_reader=csv.reader(open(path,encoding='utf-8')) all_count=0 real_count=0 for row in csv_reader: all_count+=1 if len(row)==2: target=self.tag_v[row[0]+".0"] words = clean_str(row[1], self.tokenizer, self.lower) if len(words)!=0: ds.append(Instance(words=words,target=target)) real_count += 1 print("all count:", all_count) print("real count:", real_count) return ds def process(self, paths: Union[str, Dict[str, str]], train_ds: Iterable[str] = None, src_vocab_op: VocabularyOption = None, tgt_vocab_op: VocabularyOption = None, embed_opt: EmbeddingOption = None, char_level_op=False, split_dev_op=True ): paths = check_dataloader_paths(paths) datasets = {} info = DataBundle(datasets=self.load(paths)) src_vocab = Vocabulary() if src_vocab_op is None else Vocabulary(**src_vocab_op) tgt_vocab = Vocabulary(unknown=None, padding=None) \ if tgt_vocab_op is None else Vocabulary(**tgt_vocab_op) _train_ds = [info.datasets[name] for name in train_ds] if train_ds else info.datasets.values() def wordtochar(words): chars = [] for word in words: word = word.lower() for char in word: chars.append(char) chars.append('') chars.pop() return chars input_name, target_name = 'words', 'target' info.vocabs={} #就分隔为char形式 if char_level_op: for dataset in info.datasets.values(): dataset.apply_field(wordtochar, field_name="words",new_field_name='chars') # if embed_opt is not None: # embed = EmbedLoader.load_with_vocab(**embed_opt, vocab=vocab) # info.embeddings['words'] = embed else: src_vocab.from_dataset(*_train_ds, field_name=input_name) src_vocab.index_dataset(*info.datasets.values(),field_name=input_name, new_field_name=input_name) info.vocabs[input_name]=src_vocab tgt_vocab.from_dataset(*_train_ds, field_name=target_name) tgt_vocab.index_dataset( *info.datasets.values(), field_name=target_name, new_field_name=target_name) info.vocabs[target_name]=tgt_vocab if split_dev_op: info.datasets['train'], info.datasets['dev'] = info.datasets['train'].split(0.1, shuffle=False) for name, dataset in info.datasets.items(): dataset.set_input("words") dataset.set_target("target") return info if __name__=="__main__": testloader=yelpLoader() # datapath = {"train": "/remote-home/ygwang/yelp_full/train.csv", # "test": "/remote-home/ygwang/yelp_full/test.csv"} #datapath={"train": "/remote-home/ygwang/yelp_full/test.csv"} datapath = {"train": "/remote-home/ygwang/yelp_polarity/train.csv", "test": "/remote-home/ygwang/yelp_polarity/test.csv"} datainfo=testloader.process(datapath,char_level_op=True) len_count=0 for instance in datainfo.datasets["train"]: len_count+=len(instance["chars"]) ave_len=len_count/len(datainfo.datasets["train"]) print(ave_len)
python
import warnings from ploceidae import core warnings.filterwarnings("ignore", category=DeprecationWarning) __all__ = ["core"]
python
#-*- coding: UTF-8 -*- # 读取数据 bin 文件 import os import struct def read_data(file): file_path = file_dir+"/"+file final_text = open('final.txt', 'a') data_bin = open(file_path, 'rb') data_size = os.path.getsize(file_path) for i in range(data_size): for index in range(4): data_i = data_bin.read(4) # 每次输出4个字节 if len(data_i)== 4: num = struct.unpack('f', data_i) max_list[index].append(num[0]) #记录最大值 min_list[index].append(num[0]) #记录最小值 write = file +'\t' for index in range(4): max_list[index] = [max(max_list[index])] #最大列表中只保留最大值 min_list[index] = [min(min_list[index])] #最小列表中只保留最小值 write += str(max_list[index][0]) +'\t'+ str(min_list[index][0])+'\t' #输出目前的最大最小值 print(write) final_text.write(write +'\n') #储存 data_bin.close() final_text.close() file_dir = '/root/pvrcnn/POINTCLOUD' #文件夹目录 files = os.listdir(file_dir) #得到文件夹下的所有文件名称 max_list = [[620.970458984375],[278.865478515625],[1.0],[1.0]] min_list = [[2.3114852905273438],[-534.9176635742188],[-101.55160522460938],[1.0]] #004231.bin for file in files: #遍历文件夹 read_data(file)
python
class Node: def __init__(self, data, next_node=None, previous=None): self.data = data self.next_node = next_node self.previous = previous class DoublyLinkedList: def __init__(self): self.head = None def insert_at_beginning(self, data): if self.head is None: self.head = Node(data) return self.head = Node(data, self.head, self.head.previous) def append_element(self, data): if self.head is None: # the new node with value data will be the new head, # the next and previous nodes are null, since the list contains only one element. self.head = Node(data) return pointer = self.head while pointer.next_node: pointer = pointer.next_node pointer.next_node = Node(data, previous=pointer, next_node=None) def get_list_length(self): counter = 0 pointer = self.head while pointer: counter += 1 pointer = pointer.next_node return counter def get_tail(self): pointer = self.head while pointer.next_node: pointer = pointer.next_node tail = pointer return tail def remove_element_at_beginning(self): if self.head is None: print("The list is already empty") elif self.head.next_node is None: self.head = None else: self.head = self.head.next_node self.head.previous = None def remove_last_element(self): pointer = self.head if self.head is None: print("The list is already empty") elif self.head.next_node is None: self.head = None else: while pointer.next_node is not None: pointer = pointer.next_node pointer.previous.next_node = None def print_forwards(self): if self.head is None: print("List is empty") return doubly_linked_list = "" pointer = self.head while pointer is not None: doubly_linked_list += f"{pointer.data}" if pointer.next_node is not None: doubly_linked_list += " --> " pointer = pointer.next_node print(doubly_linked_list) def reverse_linked_list(self): if self.head is None: print("The list has no element to delete") return pointer1 = self.head pointer2 = pointer1.next_node pointer1.next_node = None pointer1.previous = pointer2 while pointer2 is not None: pointer2.previous = pointer2.next_node pointer2.next_node = pointer1 pointer1 = pointer2 pointer2 = pointer2.previous self.head = pointer1 def print_reversed_linked_list(self): reversed_dls = "" pointer = None while self.get_tail() != self.head: pointer = self.head.next_node pointer.previous = self.head self.head = pointer while pointer is not None: reversed_dls += f"{pointer.data}" if pointer.previous is not None: reversed_dls += " --> " pointer = pointer.previous print(reversed_dls) def check_valid_index(self, index): if self.get_list_length() < index or index < 0: raise IndexError("Index out of bounds") else: return True if __name__ == "__main__": dls = DoublyLinkedList() dls.insert_at_beginning(10) dls.insert_at_beginning(100) dls.insert_at_beginning(1000) dls.print_forwards() dls.append_element("k") print(dls.get_list_length()) dls.print_forwards() print(dls.get_list_length()) dls.print_forwards() print("remove last element:") dls.remove_last_element() dls.print_forwards() dls.remove_element_at_beginning() print("remove first element:") dls.print_forwards() print("Reverse list") dls.reverse_linked_list() dls.print_forwards()
python
# bilibili # @Bio-Hazard, @xue_tao_lu, @Medit_4 from math import log from math import e # 各种类型的竖直加速度以及助力,单位为 block/tick^2 DataTable = { 1:{'g':-0.08, 'f':0.02}, 2:{'g':-0.04, 'f':0.02}, 3:{'g':-0.04, 'f':0.05}, 4:{'g':-0.03, 'f':0.01}, 5:{'g':-0.05, 'f':0.01}, } # 各种实体对应的类型id EntityType={ 'player':1, 'living':1, 'item':2, 'fallingBlock':2, 'tnt':2, 'boat':3, 'minecart':3, 'egg':4, 'snowball':4, 'potion':4, 'enderPearl':4, 'arrow':5 } def ln(x): return log(x, e) def getGFById(_id): data = DataTable[_id] return data['g'], data['f'] def getGFByType(_type): return getGFById(EntityType[_type]) # 通用公式 def getVtByV0(g, f, v0, t): '''通过 v0 求 vt,水平方向 g=0''' return (v0+g-g/f)*(1-f)**(t-1) + g/f def getStByV0(g, f, v0, t): '''通过 v0 求 St,水平方向 g=0''' return (v0+g-g/f)*(1-(1-f)**t)/f + g/f*t def getV0BySt(g, f, St, t): '''通过 St 方向求 v0,水平方向 g=0''' return (f*St-g*t)/(1-(1-f)**t) + g/f - g def getTopT(g, f, vy): '''最高时刻 t''' return ( ln(-g) - ln(-ln(1-f)) - ln(vy+g-g/f) ) / ln(1-f) def getTopY(g, f, vy): '''最高高度 t''' if vy <= 0: print(f'[Warning]: In api_motion getTopY, vy({vy}) <= 0') return getVtByV0(g, f, vy, 0) else: t = getTopT(g, f, vy) return getStByV0(g, f, vy, t) def getTopTY(g, f, vy): t = getTopT(g, f, vy) return t, getStByV0(g, f, vy, t) def getSyBySx(g, f, vx, vy, Sx): '''已知Vx0, Vy0, 通过 Sx 求 Sy''' return (vy+g-g/f)*x/vx + ( g/f * (1-ln(f*Sx)/vx) / ln(1-f) ) def getTByStWithTop(g,f, St, Top, limit=0.5): '''给定St,求固定高度代码方程 Top = maxY - S0''' t0, t1 = 0, 600 while (t1-t0) > limit: t = 0.5*(t1+t0) v0= getV0BySt(g,f, St, t) _t, _top = getTopTY(g,f, v0) if _top < Top: t0 = t else: t1 = t return 0.5*(t1+t0) def getDownTBySt(g, f, St, limit=0.5): '''求自由落体 St 需要的时间, St > 0''' v0 = 0.0 t0, t1 = 0.0, 600.0 while (t1-t0) > limit: t = 0.5*(t0+t1) S = -getStByV0(g,f, v0, t) if S > St: t1 = t else: t0 = t return 0.5*(t1+t0) def getUpTBySt(g, f, St, limit=0.5): '''求上升 St 刚好 vy = 0, 需要的时间, St > 0''' v0 = 0.0 t0, t1 = -600.0, 0.0 while (t1-t0) > limit: t = 0.5*(t0+t1) S = -getStByV0(g,f, v0, t) if S > St: t0 = t else: t1 = t return 0.5*(t1+t0) if __name__ == '__main__': # print(getGFById(1)) # print(getGFByType('fallingBlock')) g,f = getGFByType('fallingBlock') print(f'g:{g}, f:{f}') # for tick in range(50): # print(tick, getStByV0(g,f,1.0,tick)) # print(getTopTY(g,f, 1.0)) # print(getTopTY(g,f, -1.0)) # # height = 20 t0 = getUpTBySt(g,f, height) t1 = getDownTBySt(g,f, height) print(f'h:{height}, t0:{t0}, t1:{t1}') pass
python
#!/usr/bin/env python # # Copyright 2013-2014 Mike Stirling # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # This file is part of the Tiny Home Area Network stack. # # http://www.tinyhan.co.uk/ # # tinymon.py # # GnuRadio based monitoring tool. DSP core. # from gnuradio import gr import gnuradio.filter as gr_filter import gnuradio.analog as gr_analog import gnuradio.digital as gr_digital import gnuradio.blocks as gr_blocks import gnuradio.gr.gr_threading as _threading import osmosdr from math import pi from binascii import hexlify from PyQt4 import Qt from gnuradio import qtgui import sys,sip from datetime import datetime from tinyhan_mac import * TIME_FORMAT='%Y-%m-%d %H:%M:%S.%f' class queue_thread(_threading.Thread): def __init__(self, queue, callback): _threading.Thread.__init__(self) self.setDaemon(1) self.payload_queue = queue self.keep_running = True self.start() def run(self): sr = 0 synced = False bitcount = 0 bytecount = 0 packet = '' while self.keep_running: msg = self.payload_queue.delete_head() if msg == None: break for b in msg.to_string(): b = ord(b) sr = ((sr << 1) | (b & 1)) & 0xff if b & 2: bitcount = 0 bytecount = 0 length = 0 synced = True packet = '' if synced: bitcount = bitcount + 1 if bitcount == 8: packet = packet + chr(sr) bitcount = 0 bytecount = bytecount + 1 if bytecount == 1: length = sr + 2 + 1 # allow for CRC and length byte if length > 0 and bytecount == length: bytecount = 0 synced = False # Decode and display try: msg = parse_mac(packet) except Exception as a: msg = str(a) # Print with timestamp print datetime.now().strftime(TIME_FORMAT) + ': ' + msg class tinymon(gr.top_block): qtwidgets = [] def __init__(self): gr.top_block.__init__(self) sdr_device = '' # Front end error_ppm = 40 freq_c0 = 869000000 # Modulation parameters sample_rate = 1200000 bit_rate = 50000 deviation = 25000 max_freq_error = 50000 decim = 2 squelch_threshold = -20 sync_word = "01010010110111010010" # preamble + 2dd2 # Source self.src = osmosdr.source(sdr_device) self.src.set_sample_rate(sample_rate) self.src.set_center_freq(freq_c0) self.src.set_freq_corr(error_ppm) self.src.set_dc_offset_mode(0, 0) self.src.set_iq_balance_mode(0, 0) self.src.set_gain_mode(False, 0) self.src.set_gain(20, 0) self.src.set_if_gain(20, 0) self.src.set_bb_gain(20, 0) # Channel filter (bandwidth is relative to centre of channel so /2 bandwidth = 2. * (deviation + bit_rate / 2) filter_taps = gr_filter.firdes.low_pass(1, sample_rate, max_freq_error + bandwidth / 2., bit_rate / 2., gr_filter.firdes.WIN_BLACKMAN, 6.76) self.filt = gr_filter.freq_xlating_fir_filter_ccc(decim, filter_taps, 0.0, sample_rate) # FSK demod m = 2. * deviation / bit_rate # Modulation index demod_gain = float(sample_rate) / decim / bit_rate / (pi * m) squelch = gr_analog.simple_squelch_cc(squelch_threshold, 1.) demod = gr_analog.quadrature_demod_cf(demod_gain) # AM demod (RSSI) ctof = gr_blocks.complex_to_mag() # Clock recovery and slicer gain_mu = 0.175 gain_omega = 0.25 * gain_mu * gain_mu omega_rel_limit = 0.005 clock = gr_digital.clock_recovery_mm_ff(sample_rate / decim / bit_rate, gain_omega, 0.5, gain_mu, omega_rel_limit) slicer = gr_digital.binary_slicer_fb() sync = gr_digital.correlate_access_code_bb(sync_word, 0) # Sink to queue self.queue = gr.msg_queue() self.watcher = queue_thread(self.queue, None) sink = gr_blocks.message_sink(gr.sizeof_char, self.queue, False) # GUI elements fft = qtgui.freq_sink_c(512, gr_filter.firdes.WIN_BLACKMAN, freq_c0, sample_rate/decim, "Spectrum", 1) fft.enable_grid(True) fft.set_line_label(0, 'Signal') qtfft = sip.wrapinstance(fft.pyqwidget(), Qt.QWidget) self.qtwidgets.append(qtfft) plot = qtgui.time_sink_f(int(0.1 * sample_rate / decim), sample_rate / decim, "Scope", 2) plot.enable_grid(True) plot.set_update_time(0.1) plot.set_y_axis(-2, 2) plot.set_line_label(0, 'RSSI') plot.set_line_label(1, 'FSK') plot.set_trigger_mode(qtgui.TRIG_MODE_AUTO, qtgui.TRIG_SLOPE_POS, 0.1, 0, 0, '') qtplot = sip.wrapinstance(plot.pyqwidget(), Qt.QWidget) self.qtwidgets.append(qtplot) plot2 = qtgui.time_sink_f(int(0.005 * sample_rate / decim), sample_rate / decim, "Packet View", 1) plot2.enable_grid(True) plot2.set_update_time(0.1) plot2.set_y_axis(-2, 2) plot2.set_line_label(0, 'FSK') plot2.set_trigger_mode(qtgui.TRIG_MODE_AUTO, qtgui.TRIG_SLOPE_POS, 0.1, 0, 0, '') qtplot2 = sip.wrapinstance(plot2.pyqwidget(), Qt.QWidget) self.qtwidgets.append(qtplot2) # Flowgraph self.connect(self.src, self.filt, squelch, demod, clock, slicer, sync, sink) self.connect(self.src, fft) self.connect(demod, (plot, 0)) self.connect(self.filt, ctof, (plot, 1)) self.connect(demod, (plot2, 0)) def tune_offset(self, freq): self.filt.set_center_freq(freq) def get_qtwidgets(self): return self.qtwidgets if __name__ == '__main__': a = tinymon() a.run()
python
""" Ethereum Spurious Dragon Hardfork ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The fifth Ethereum hardfork. """ MAINNET_FORK_BLOCK = 2675000 CHAIN_ID = 1
python
import libkol from ..Error import InvalidActionError, UnknownError from ..Trophy import Trophy from .request import Request class trophy_buy(Request[bool]): def __init__(self, session: "libkol.Session", trophy: Trophy) -> None: super().__init__(session) data = {"action": "buytrophy", "whichtrophy": trophy.id} self.request = session.request("trophy.php", data=data) @staticmethod async def parser(content: str, **kwargs) -> bool: if "<td>You don't meet the requirements for that trophy.</td>" in content: raise InvalidActionError("Cannot get that trophy") if "<td>Your trophy has been installed at your campsite.</td>" not in content: raise UnknownError("Unknown error buying trophy") return True
python
""" Methods for validating input params given via url or ajax """ import logging from typing import Optional, Union from dbas.lib import Relations from .database import DBDiscussionSession from .database.discussion_model import Argument, Statement, Premise, StatementToIssue LOG = logging.getLogger(__name__) def is_integer(variable, ignore_empty_case=False): """ Validates if variable is an integer. :param variable: some input :param ignore_empty_case: :rtype: boolean """ if variable is None: return False if ignore_empty_case and len(str(variable)) == 0: return True try: int(variable) return True except (ValueError, TypeError): return False def check_reaction(attacked_arg_uid: Union[int, str], attacking_arg_uid: Union[int, str], relation: Relations): """ Checks whether the attacked argument uid and the attacking argument uid are connected via the given relation :param attacked_arg_uid: Argument.uid :param attacking_arg_uid: Argument.uid :param relation: Relations :return: Boolean """ LOG.debug("%s from %s to %s", relation.value, attacking_arg_uid, attacked_arg_uid) malicious_val = [ not is_integer(attacked_arg_uid), not is_integer(attacking_arg_uid), is_argument_forbidden(attacked_arg_uid), is_argument_forbidden(attacking_arg_uid) ] if any(malicious_val): return False relation_mapper = { Relations.UNDERMINE: related_with_undermine, Relations.UNDERCUT: related_with_undercut, Relations.REBUT: related_with_rebut, Relations.SUPPORT: related_with_support } if relation in relation_mapper: return relation_mapper[relation](attacked_arg_uid, attacking_arg_uid) LOG.debug("else-case") return False def check_belonging_of_statement(issue_uid, statement_uid): """ Check whether current Statement.uid belongs to the given Issue :param issue_uid: Issue.uid :param statement_uid: Statement.uid :return: """ db_statement2issue = DBDiscussionSession.query(StatementToIssue).filter( StatementToIssue.statement_uid == statement_uid, StatementToIssue.issue_uid == issue_uid).first() return db_statement2issue is not None def check_belonging_of_arguments(issue_uid: int, argument_uids: list) -> bool: """ Check whether current Argument.uid belongs to the given Issue :param issue_uid: Issue.uid :param argument_uids: Argument.uid :return: Boolean """ db_argument = DBDiscussionSession.query(Argument).filter(Argument.uid.in_(argument_uids), Argument.issue_uid == issue_uid).all() return len(db_argument) == len(argument_uids) def check_belonging_of_premisegroups(issue_uid, premisegroups): """ Check whether all Groups in Premisgroups belongs to the given Issue :param issue_uid: Issue.uid :param premisegroups: [PremiseGroup.uid] :return: Boolean """ all_premises = [] for pgroup in premisegroups: all_premises += DBDiscussionSession.query(Premise).filter_by(premisegroup_uid=pgroup).all() related = [premise.issue_uid == issue_uid for premise in all_premises] return all(related) def is_position(statement_uid): """ True if current statement is a position :param statement_uid: Statement.uid :return: Boolean """ db_statement = DBDiscussionSession.query(Statement).get(statement_uid) return db_statement.is_position def related_with_undermine(attacked_arg_uid, attacking_arg_uid): """ Check if first argument is undermines by the second one :param attacked_arg_uid: Argument.uid :param attacking_arg_uid: Argument.uid :return: Boolean """ # conclusion of the attacking argument db_attacking_arg = DBDiscussionSession.query(Argument).filter_by(uid=attacking_arg_uid).first() if not db_attacking_arg: return False # which pgroups has the conclusion as premise db_attacked_premises = DBDiscussionSession.query(Premise).filter_by( statement_uid=db_attacking_arg.conclusion_uid).all() if not db_attacked_premises: return False attacked_args = DBDiscussionSession.query(Argument).filter_by(uid=attacked_arg_uid) undermines = [attacked_args.filter_by(premisegroup_uid=p.premisegroup_uid).first() for p in db_attacked_premises] return any(undermines) def related_with_undercut(attacked_arg_uid, attacking_arg_uid): """ Check if first argument is undercutted by the second one :param attacked_arg_uid: Argument.uid :param attacking_arg_uid: Argument.uid :return: Boolean """ db_attacking_arg = DBDiscussionSession.query(Argument).filter(Argument.uid == attacking_arg_uid, Argument.argument_uid == attacked_arg_uid).first() return db_attacking_arg is not None def related_with_rebut(attacked_arg_uid, attacking_arg_uid): """ Check if first argument is rebutted by the second one :param attacked_arg_uid: Argument.uid :param attacking_arg_uid: Argument.uid :return: Boolean """ db_attacking_arg = DBDiscussionSession.query(Argument).get(attacking_arg_uid) db_attacked_arg = DBDiscussionSession.query(Argument).get(attacked_arg_uid) if not db_attacked_arg or not db_attacking_arg or not db_attacked_arg.conclusion_uid: return False # do have both arguments the same conclusion? same_conclusion = db_attacking_arg.conclusion_uid == db_attacked_arg.conclusion_uid attacking1 = not db_attacking_arg.is_supportive and db_attacked_arg.is_supportive attacking2 = not db_attacked_arg.is_supportive and db_attacking_arg.is_supportive attacking = attacking1 or attacking2 return same_conclusion and attacking def related_with_support(attacked_arg_uid, attacking_arg_uid): """ Check if both arguments support/attack the same conclusion :param attacked_arg_uid: Argument.uid :param attacking_arg_uid: Argument.uid :return: Boolean """ db_first_arg = DBDiscussionSession.query(Argument).get(attacking_arg_uid) db_second_arg = DBDiscussionSession.query(Argument).get(attacked_arg_uid) if not db_first_arg or not db_second_arg: return False not_none = db_first_arg.conclusion_uid is not None same_conclusion = db_first_arg.conclusion_uid == db_second_arg.conclusion_uid supportive = db_first_arg.is_supportive is db_second_arg.is_supportive return same_conclusion and not_none and supportive def get_relation_between_arguments(arg1_uid: int, arg2_uid: int) -> Optional[Relations]: """ Get the relation between given arguments :param arg1_uid: Argument.uid :param arg2_uid: Argument.uid :return: String or None """ if related_with_undermine(arg1_uid, arg2_uid): return Relations.UNDERMINE if related_with_undercut(arg1_uid, arg2_uid): return Relations.UNDERCUT if related_with_rebut(arg1_uid, arg2_uid): return Relations.REBUT if related_with_support(arg1_uid, arg2_uid): return Relations.SUPPORT LOG.debug("%s NONE %s", arg1_uid, arg2_uid) return None def is_argument_forbidden(uid): """ Is the given argument disabled? :param uid: Argument.uid :return: Boolean """ if not is_integer(uid): return False db_argument = DBDiscussionSession.query(Argument).get(uid) if not db_argument: return False return db_argument.is_disabled
python
# Android Device Testing Framework ("dtf") # Copyright 2013-2016 Jake Valletta (@jake_valletta) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Wrapper API for using colors in dtf modules""" from __future__ import absolute_import from colored import fg, attr import dtf.globals as glbl COLOR_ERR = fg(1) COLOR_WARN = fg(3) COLOR_INFO = fg(2) COLOR_VERB = fg(6) COLOR_DEB = fg(5) def __use_colors(): """Check if colors should be used""" return bool(glbl.get_generic_global('Config', 'use_colors') == '1') def error(message): """Color format a message for errors""" if __use_colors(): return "%s%s%s" % (COLOR_ERR, message, attr(0)) else: return message def warning(message): """Color format a message for warnings""" if __use_colors(): return "%s%s%s" % (COLOR_WARN, message, attr(0)) else: return message def info(message): """Color format a message for informational messages""" if __use_colors(): return "%s%s%s" % (COLOR_INFO, message, attr(0)) else: return message def verbose(message): """Color format a message for verbose messages""" if __use_colors(): return "%s%s%s" % (COLOR_VERB, message, attr(0)) else: return message def debug(message): """Color format a message for debugging""" if __use_colors(): return "%s%s%s" % (COLOR_DEB, message, attr(0)) else: return message def bold(message): """Format a bold message""" if __use_colors(): return "%s%s%s" % (attr('bold'), message, attr(0)) else: return message
python
# -*- coding: utf-8 -*- from django.contrib import admin from .models import Topic, Course, Document @admin.register(Topic) class TopicAdmin(admin.ModelAdmin): list_display = ('id', 'name', 'description') search_fields = ('name',) @admin.register(Course) class CourseAdmin(admin.ModelAdmin): list_display = ( 'id', 'topic', 'name', 'abstract', 'created_date', 'updated_date', ) list_filter = ('topic', 'created_date', 'updated_date') search_fields = ('name',) @admin.register(Document) class DocumentAdmin(admin.ModelAdmin): list_display = ( 'id', 'course', 'name', 'file', 'created_date', 'updated_date', ) list_filter = ('course', 'created_date', 'updated_date') search_fields = ('name',)
python
import requests from indra.statements import * base_url = 'http://localhost:8080' def test_filter_by_type(): st1 = Phosphorylation(Agent('a'), Agent('b')) st2 = Complex([Agent('a'), Agent('b')]) stmts_json = stmts_to_json([st1, st2]) url = base_url + '/preassembly/filter_by_type' data = {'statements': stmts_json, 'type': 'phosphorylation'} res = requests.post(url, json=data) res_json = res.json() stmts_json = res_json.get('statements') stmts = stmts_from_json(stmts_json) assert(len(stmts) == 1) def test_filter_grounded_only(): a = Agent('a', db_refs={'HGNC': '1234'}) b = Agent('b', db_refs={'HGNC': '1235'}) c = Agent('c', db_refs={'TEXT': 'c'}) d = Agent('d', db_refs={}) st1 = Phosphorylation(a, b) st2 = Phosphorylation(a, c) st3 = Phosphorylation(a, d) stmts_json = stmts_to_json([st1, st2, st3]) url = base_url + '/preassembly/filter_grounded_only' data = {'statements': stmts_json, 'type': 'phosphorylation'} res = requests.post(url, json=data) res_json = res.json() stmts_json = res_json.get('statements') stmts = stmts_from_json(stmts_json) assert(len(stmts) == 1) def test_loopy(): url = base_url + '/reach/process_text' res = requests.post(url, json={'text': 'MEK activates ERK.'}) url = base_url + '/assemblers/sif/loopy' res = requests.post(url, json=res.json()) res_json = res.json() print(res_json.get('loopy_url'))
python
def main(j, args, params, tags, tasklet): page = args.page infomgr = j.apps.actorsloader.getActor("system", "infomgr") args = args.tags.getValues(id=None, start=0, stop=0) id = args["id"] data = infomgr.extensions.infomgr.getInfo5Min(id, args["start"], args["stop"], epoch2human=True) if data is not None: page.addList(data) else: page.addMessage("No data for %s" % id) params.result = page return params def match(j, args, params, tags, tasklet): return True
python
"""Package devops entry point.""" from pkg_resources import get_distribution, DistributionNotFound try: # The name must be the same as the value of the "name" key in the setup.py file __version__ = get_distribution(__package__).version except DistributionNotFound: pass
python
from numa import bitmask_t, LIBNUMA from typing import List def get_bitset_list(bitmask: bitmask_t) -> List[int]: return list(filter(lambda node: LIBNUMA.numa_bitmask_isbitset(bitmask, node) != 0, range(bitmask.contents.size)))
python
from pylab import * import plotly.graph_objs as go from scipy.interpolate import interp1d from plotly.offline import iplot, _plot_html from IPython.display import HTML, display from plotly.tools import FigureFactory as FF from .riemannian_manifold import RManifold class SurfaceOfRevolution(RManifold) : "Encodes a surface of revolution in R^3, typically a torus or a sphere." def __init__(self, R, Rp, Rpp, Z, Zp, D, vis_mode='3D') : """ Creates a Surface (d=2) of Revolution from function handles. Arguments : R -- @(r,t) -> R(r,t), the distance to z-axis Rp -- its first derivative Rpp -- its second derivative Z -- elevation function Zp -- derivative of the elevation function D -- periodic domain bounds, [[min_r, max_r], [min_t, max_t]] """ g = lambda q : array([[1, 0], [0, R(q[0])**2]]) RManifold.__init__(self, 2, g) self.D = D self.R = R self.Rp = Rp self.Rpp = Rpp self.Z = Z self.Zp = Zp self.vis_mode = vis_mode self.upsample_trajs = False def K(self,q,p, *args) : """Overrides the generic kernel function for better efficiency. K(r, theta) = [ 1 , 0 ] [ 0, 1/R(r)^2 ] """ assert q.shape == (2, ), 'Wrong dimension of the starting point.' f = self.R(q[0])**2 p = atleast_2d(p) # if len(p) == 1 : return array( (p[0,0], p[0,1] / f) ) else : return atleast_2d(vstack((p[:,0], p[:,1] / f))).T def L2_repr_p(self,q,p, *args) : """Mapping from the cotangent plane endowed with Kernel metric to R^2 endowed with the standard dot product. K(r, theta)^.5 = [ 1 , 0 ] [ 0, 1/R(r) ] """ assert q.shape == (2, ), 'Wrong dimension of the starting point.' f = self.R(q[0]) p = atleast_2d(p) return atleast_2d(vstack((p[:,0], p[:,1] / f))).T def upP(self,q,p, *args) : """Returns an update step of the momentum p in the geodesic equations. - .5*d_(r,theta) (p, K_(r,theta) p) = [ p_theta^2 * R'(r) / R(r)^3 ] [ 0 ] """ return array( [ p[1]**2 * self.Rp(q[0]) / (self.R(q[0])**3) , 0] ) def gradq_pKqz(self, p, q, z, *args) : """Useful for the adjoint integration scheme. d_(r,theta) (p, K_(r,theta) z) = [ -2*p_t * z_t * R'(r) / R(r)^3 ] [ 0 ] """ return array([ -2 * p[1] * z[1] * self.Rp(q[0]) / (self.R(q[0])**3) , 0] ) def dq_gradq_pKqp_a(self, q, p, a, *args) : """Useful for the adjoint integration scheme.""" r = q[0]; return array([ -2 * a[0] * p[1]**2 * ( self.Rpp(r) * self.R(r) - 3 * self.Rp(r)**2 ) / self.R(r)**4 , 0] ) def dq_Kqp_a(self,q,p,a, *args) : """Useful for the adjoint integration scheme. d_(r,theta) (K_(r,theta) p) . a = [ 0 ] [ -2*a_r p_theta * R'(r) / R(r)^3 ] """ return array( [0 , -2* a[0] * p[1] * self.Rp(q[0]) / (self.R(q[0])**3)] ) def I(self, q=None, R=None, Theta=None) : """Isometrically embeds a collection of points in the euclidean space (typically, R^2 -> R^3). Input points are identified 'modulo D'. Two usages : I(q=...), with a 2-by-npoints array I(R=..., Theta=...), with two arrays of same shape """ if q is not None : # plotting a line q = atleast_2d(q) assert (q.shape[1] == self.d) , 'q does not have the right size - dim x npoints.' R = q[:,0] Theta = q[:,1] return vstack( ( (self.R(R)) * cos(Theta) , (self.R(R)) * sin(Theta) , self.Z(R) ) ).T elif (R is not None) and (Theta is not None) : # [X,Y,Z] = self.I(R, Theta) assert (R.shape == Theta.shape), 'I should be applied on two matrices of the same size' return ( (self.R(R)) * cos(Theta) , # X (self.R(R)) * sin(Theta) , # Y self.Z(R) # Z ) else : raise Exception ('Incorrect Usage.') def tangent_basis(self, q) : """Returns the standard basis (e_r,e_t) in (R^3)x(R^3) at positions given by q.""" q = atleast_2d(q) assert (q.shape[1] == self.d) , 'q does not have the right size - dim x npoints.' r = q[:,0] Theta = q[:,1] padding = zeros(Theta.shape) e_r = self.Rp(r) * vstack( ( cos(Theta), sin(Theta), padding )) \ + vstack( (padding, padding, self.Zp(r)) ) e_t = self.R(r) * vstack( (-sin(Theta), cos(Theta), padding )) return (e_r.T, e_t.T) def unit_tangent_basis(self, q) : """Same as tangent_basis, but normalized wrt the dot product in R^3.""" (e_cr, e_t) = self.tangent_basis(q) e_ct = e_t.T / self.R(q[:,0]) return (e_cr, e_ct.T) def dI(self, q, v) : """Differential of I at the points q, applied to v.""" (e_r, e_t) = self.tangent_basis(q) return (atleast_2d(v[:,0]).T * e_r.T + atleast_2d(v[:,1]).T * e_t.T).T """ Distances """ def squared_distance(self, Q, Xt, *args) : """Returns 1/2 * |I(Q) - Xt|^2 and its Q-gradient.""" X = self.I(q = Q) d2 = .5 * sum( (Xt - X)**2, 1) dX = .5 * 2 * (X - Xt) (e_cr, e_ct) = self.tangent_basis(Q) # NONONO ! We're not inverting the differential, # but *transposing* it : no need for renormalization ! # n2_r = sum(e_cr**2, 1) # n2_t = sum(e_ct**2, 1) #dQ = vstack( (sum( e_cr * dX , 1) / n2_r, # sum( e_ct * dX , 1) / n2_t ) ) dQ = vstack( (sum( e_cr * dX , 1), sum( e_ct * dX , 1) ) ) return (d2, dQ) def distance(self, Q, Xt, *args) : """Returns |I(Q) - Xt| and its Q-gradient.""" X = self.I(q = Q) Xt = Xt.reshape(X.shape) # In case of line/column confusion d = sqrt(sum( (Xt - X)**2, 1)) dX = (X - Xt) / (d+0.00000001) (e_cr, e_ct) = self.tangent_basis(Q) n2_r = sum(e_cr**2, 1) n2_t = sum(e_ct**2, 1) dQ = vstack(( sum( e_cr * dX , 1) / n2_r, sum( e_ct * dX , 1) / n2_t ) ) return (d, dQ) """Periodization & Co.""" def periodize(self, q) : """q is a n-by-d array of coordinates nq gives their representations in the fundamental domain as required by self.D """ nq = q.astype(float) # We're using mod, so we have to be careful ! assert(q.shape[1] == self.d) for d in range(self.d) : nq[:,d] = mod(nq[:,d]- self.D[d,0], self.D[d,1] - self.D[d,0]) + self.D[d,0] return nq def periodize_traj(self, qt) : """qt is a 2xn trajectory trajs is a list of trajectories on the rectangle domain""" pqt = self.periodize(qt) tile_dims = self.D[:,1] - self.D[:,0] tiles = ( (qt - pqt) / tile_dims).round() cuts = tiles[1:-1,:] != tiles[0:-2,:] cuts = any(cuts, 1) cutlocs = concatenate( (find(cuts), [qt.shape[0]-1]) ) ncuts = len(cutlocs) trajs = [] ind = 0 for i in range(ncuts) : to_concat = [] if ind > 0 : to_concat.append( pqt[ind - 1] + tile_dims * (tiles[ind - 1] - tiles[ind ]) ) to_concat.append( pqt[range(ind,cutlocs[i]+1)] ) if cutlocs[i] < qt.shape[0]-1 : to_concat.append( (pqt[cutlocs[i] + 1] + tile_dims * (tiles[cutlocs[i] + 1] - tiles[cutlocs[i]])) ) trajs += [vstack( to_concat )] ind = cutlocs[i] + 1 return trajs def upsample(self, qt) : # !!! to be tested !!! """upsample a trajectory by linear interpolation useful for 3D-plotting a not-so-well sampled trajectory""" if self.dt > 0.1 : #return numpy.interp(linspace(0, qt.shape[1]), range(qt.shape[1]), qt) f = interp1d( range(qt.shape[0]), qt , axis = 0) return f(linspace(0, qt.shape[0]-1, qt.shape[0]*round(self.dt / 0.001))) else : return qt """ Manifold display """ def show(self, mode, ax=None) : self.vis_mode = mode if ax == None : ax = [] self.layout = go.Layout( title='', width=800, height=800, legend = dict( x = .8, y = 1) ) self.current_axis = ax if self.vis_mode == '2D' : self.layout['legend']['x'] = 1 self.show_2D() elif self.vis_mode == '3D': self.show_3D() def show_2D(self) : # (r,theta) -> (y,x) self.layout['xaxis'] = dict( range = [-pi,pi]) #tickvals = [-pi,0,pi] #ticktext = ['$-\\pi$', '$0$', '$\\pi$'] ) self.layout['yaxis'] = dict( range = [-pi*self.b,pi*self.b]) #tickvals = [-pi*self.b,0,pi*self.b], #ticktext = ['$-\\pi b$', '$0$', '$\\pi b$'] ) def show_3D(self) : r = linspace(self.D[0,0],self.D[0,1], 45) th = linspace(self.D[1,0],self.D[1,1], 45) (R, TH) = meshgrid(r, th) b_foo = self.b self.b = 0.99*self.b (X,Y,Z) = self.I(R = R, Theta = TH) self.b = b_foo surface = go.Surface(x=X, y=Y, z=Z, opacity = 0.99, colorscale = [[0, 'rgb(255,100,0)'], [1, 'rgb(255,255,0)']], autocolorscale = False, showscale = False, hoverinfo = "none", contours = {'x' : {'highlight' : False, 'highlightwidth' : 1}, 'y' : {'highlight' : False, 'highlightwidth' : 1}, 'z' : {'highlight' : False, 'highlightwidth' : 1}} ) self.layout['scene']['aspectmode'] = 'cube' m = 1.2 * (self.a + self.b) self.layout['scene']['xaxis'] = dict( range = [-m, m] ) self.layout['scene']['yaxis'] = dict( range = [-m, m] ) self.layout['scene']['zaxis'] = dict( range = [-m, m] ) self.current_axis.append(surface) def plot_traj(self, qt, **kwargs) : if self.vis_mode == '2D' : trajs = self.periodize_traj(qt) for traj in trajs : # (r,theta) -> (y,x) curve = go.Scatter(x = traj[:,1], y = traj[:,0], mode = 'lines', hoverinfo='none', **kwargs) self.current_axis.append(curve) elif self.vis_mode == '3D' : if type(qt[0]) is not list : qt = [qt] if self.upsample_trajs : qt = list( self.upsample(q) for q in qt ) traj = list( self.I(q = q) for q in qt ) separator = array([None]* 3).reshape((1,3)) traj = vstack( vstack((i, separator)) for i in traj ) curve = go.Scatter3d(x = traj[:,0], y = traj[:,1], z = traj[:,2], mode = 'lines', hoverinfo='none', **kwargs) self.current_axis.append(curve) # Vector field display def quiver(self, qt, vt, **kwargs) : if self.vis_mode == '2D' : self.quiver_2D(qt, vt, **kwargs) elif self.vis_mode == '3D': self.quiver_3D(qt, vt, **kwargs) def quiver_2D(self, qt, vt, **kwargs) : # (r,theta) -> (y,x) qt = self.periodize(qt) arrows = FF.create_quiver(qt[:,1], qt[:,0], vt[:,1], vt[:,0], **kwargs) self.current_axis.append(arrows) def quiver_3D(self, qt, vt, **kwargs) : if qt.shape[1] == 2 : Qt = self.I(qt) Vt = self.dI(qt, vt) elif qt.shape[1] == 3 : Qt = qt Vt = vt # quiver3 is not implemented by plotly.js : # we have to settle for a poor derivative... H = Qt T = H + Vt arrows = go.Scatter3d( x = (hstack(tuple( (H[i,0], T[i,0], None) for i in range(T.shape[0]) ))), y = (hstack(tuple( (H[i,1], T[i,1], None) for i in range(T.shape[0]) ))), z = (hstack(tuple( (H[i,2], T[i,2], None) for i in range(T.shape[0]) ))), mode = 'lines', **kwargs ) self.current_axis.append(arrows) """Marker field display""" def marker(self, q, **kwargs) : q = atleast_2d(q) if self.vis_mode == '2D' : self.marker_2D(q, **kwargs) elif self.vis_mode == '3D' : self.marker_3D(q, **kwargs) def marker_2D(self, q, **kwargs) : # (r,theta) -> (y,x) Q = self.periodize(q) points = go.Scatter(x = array([Q[:,1]]), y = array([Q[:,0]]), mode = 'markers', hoverinfo='name', **kwargs) self.current_axis.append(points) def marker_3D(self, q, **kwargs) : if q.shape[1] == 2 : Q = self.I(q = q) elif q.shape[1] == 3 : Q = q points = go.Scatter3d(x = Q[:,0], y = Q[:,1], z = Q[:,2], mode = 'markers', hoverinfo='name', **kwargs) self.current_axis.append(points)
python
from PyQt5.QtCore import * from PyQt5.QtWidgets import * from PyQt5.QtGui import * from PyQt5.QtWebEngineWidgets import * import sys import click msg = QMessageBox() urls = { 'github': "https://github.com/" , 'youtube': "https://youtube.com", 'discord': "https://discord.com/", 'pypi': "https://pypi.org" , 'stackoverflow': "https://stackoverflow.com", 'AISC': "https://aistudent.community/", 'reddit': "https://reddit.com", 'gmail': "https://www.gmail.com/", 'spotify': "https://spotify.com", 'udemy': "https://www.udemy.com", 'linuxmint': "https://linuxmint.com/", 'dogemeet': "https://doge-meet-demo.up.railway.app/", 'google':"https://google.com", 'duckduckgo': "https://duckduckgo.com", 'titanurl':"https://titul.herokuapp.com/", 'lolacli':"http://lolacli.herokuapp.com", 'whatsapp-web':"https://web.whatsapp.com/", 'netflix':"https://netflix.com/", 'prime':"https://primevideo.com/", 'disney+':"https://disneyplus.com/", 'amazon':"https://amazon.com/", 'pdfdrive':"https://pdfdrive.com/", 'duolingo':"https://www.duolingo.com/", 'wikipedia':"https://wikipedia.org/", 'facebook':"https://facebook.com/", 'instagram':"https://instagram.com/" } class WebEnginePage(QWebEnginePage): def __init__(self, *args, **kwargs): QWebEnginePage.__init__(self, *args, **kwargs) self.featurePermissionRequested.connect(self.onFeaturePermissionRequested) def onFeaturePermissionRequested(self, url, feature): if feature in (QWebEnginePage.MediaAudioCapture, QWebEnginePage.MediaVideoCapture, QWebEnginePage.MediaAudioVideoCapture): self.setFeaturePermission(url, feature, QWebEnginePage.PermissionGrantedByUser) else: self.setFeaturePermission(url, feature, QWebEnginePage.PermissionDeniedByUser) def _downloadRequested(item): # QWebEngineDownloadItem print('downloading to', item.path()) item.accept() msg.setWindowTitle("Downloading") msg.setText("Downloading file...") msg.setIcon(QMessageBox.Information) x = msg.exec_() application= QApplication(sys.argv) @click.group() @click.version_option('0.3.0') def main(): """RSB - webpages in GUI VIEW""" pass @main.command('open', help= '"rsb open <url>" opens your desired URL in RSB window' ) @click.argument('url', nargs=1) def open(url): webpage = WebEnginePage() webengine= QWebEngineView() webengine.setWindowTitle("R S B") webengine.page().profile().downloadRequested.connect(_downloadRequested) webengine.setPage(webpage) webengine.load(QUrl(url)) webengine.show() sys.exit(application.exec_()) @main.command('open_pre', help = "Allows you to use presets for websites. See the presets using the 'presets' command. Syntax : 'rsb open_pre '") @click.argument('i', nargs = 1) def open_pre(i): webpage = WebEnginePage() webengine= QWebEngineView() webengine.setWindowTitle("R S B") webengine.page().profile().downloadRequested.connect(_downloadRequested) webengine.setPage(webpage) url = urls[i] webengine.load(QUrl(url)) webengine.show() sys.exit(application.exec_()) @main.command('presets', help = 'type "presets" to get a list of preset websites. Syntax to run presets: "rsb open urls[enter the index]"') def presets(): for key, item in urls.items(): print(f"{key} {item}") if __name__ == "__main__": main()
python
N = int(input()) K = int(input()) dp1 = {0: 1} for _ in range(K): d = {} for k in dp1: for i in range(4, 6 + 1): d.setdefault(k + i, 0) d[k + i] += dp1[k] * 2 dp1 = d for _ in range(N - K): d = {} for k in dp1: for i in range(1, 6 + 1): d.setdefault(k + i, 0) d[k + i] += dp1[k] dp1 = d dp2 = {0: 1} for _ in range(N): d = {} for k in dp2: for i in range(1, 6 + 1): d.setdefault(k + i, 0) d[k + i] += dp2[k] dp2 = d t = 0 for a in dp1: for b in dp2: if a <= b: continue t += dp1[a] * dp2[b] result = t / pow(6, N * 2) print(result)
python
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! """Client and server classes corresponding to protobuf-defined services.""" import grpc from cs3.auth.provider.v1beta1 import provider_api_pb2 as cs3_dot_auth_dot_provider_dot_v1beta1_dot_provider__api__pb2 class ProviderAPIStub(object): """Auth Provider API The Auth Provider API is meant to authenticate a client. The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this document are to be interpreted as described in RFC 2119. The following are global requirements that apply to all methods: Any method MUST return CODE_OK on a succesful operation. Any method MAY return NOT_IMPLEMENTED. Any method MAY return INTERNAL. Any method MAY return UNKNOWN. Any method MAY return UNAUTHENTICATED. """ def __init__(self, channel): """Constructor. Args: channel: A grpc.Channel. """ self.Authenticate = channel.unary_unary( '/cs3.auth.provider.v1beta1.ProviderAPI/Authenticate', request_serializer=cs3_dot_auth_dot_provider_dot_v1beta1_dot_provider__api__pb2.AuthenticateRequest.SerializeToString, response_deserializer=cs3_dot_auth_dot_provider_dot_v1beta1_dot_provider__api__pb2.AuthenticateResponse.FromString, ) class ProviderAPIServicer(object): """Auth Provider API The Auth Provider API is meant to authenticate a client. The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this document are to be interpreted as described in RFC 2119. The following are global requirements that apply to all methods: Any method MUST return CODE_OK on a succesful operation. Any method MAY return NOT_IMPLEMENTED. Any method MAY return INTERNAL. Any method MAY return UNKNOWN. Any method MAY return UNAUTHENTICATED. """ def Authenticate(self, request, context): """Authenticate authenticates a client. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_ProviderAPIServicer_to_server(servicer, server): rpc_method_handlers = { 'Authenticate': grpc.unary_unary_rpc_method_handler( servicer.Authenticate, request_deserializer=cs3_dot_auth_dot_provider_dot_v1beta1_dot_provider__api__pb2.AuthenticateRequest.FromString, response_serializer=cs3_dot_auth_dot_provider_dot_v1beta1_dot_provider__api__pb2.AuthenticateResponse.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'cs3.auth.provider.v1beta1.ProviderAPI', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) # This class is part of an EXPERIMENTAL API. class ProviderAPI(object): """Auth Provider API The Auth Provider API is meant to authenticate a client. The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this document are to be interpreted as described in RFC 2119. The following are global requirements that apply to all methods: Any method MUST return CODE_OK on a succesful operation. Any method MAY return NOT_IMPLEMENTED. Any method MAY return INTERNAL. Any method MAY return UNKNOWN. Any method MAY return UNAUTHENTICATED. """ @staticmethod def Authenticate(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/cs3.auth.provider.v1beta1.ProviderAPI/Authenticate', cs3_dot_auth_dot_provider_dot_v1beta1_dot_provider__api__pb2.AuthenticateRequest.SerializeToString, cs3_dot_auth_dot_provider_dot_v1beta1_dot_provider__api__pb2.AuthenticateResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
python
#!/usr/bin/python ''' GUI interface for extracting any level of image from an SVS file as a new TIFF. Uses OpenSlide library to extract and decode the image stack. Tkinter for GUI operations. Code Quality: http://xkcd.com/1513/ , ''' import os from openslide import * from Tkinter import * # import Tkinter.filedialog import tkFileDialog import string import math from PIL import Image, ImageTk import numpy as np # # class imgOutOfBoundsError(Exception.exception): # pass class ImageRepack(): #### FUNCTIONS #### def __init__(self, parent): self.parent = parent self.image = None # Buttons: self.buttonPallet = Frame(parent).grid(row=2, column=1) self.New = Button(self.buttonPallet, command = self.buttonNew) self.New.configure(text='New Image') self.New.grid(row=4, column=1) self.Crop = Button(self.buttonPallet, command = self.buttonCrop) self.Crop.configure(text='Crop') self.Crop.grid(row=4, column=2) self.Close = Button(self.buttonPallet, command = lambda arg1=self.parent: self.buttonClose(arg1)) self.Close.configure(text='Close') self.Close.grid(row=4, column=4) # Text Boxes: self.textPallet = Frame(parent).grid(row=1, column=1) self.formatlabel = Label(self.textPallet, text='Format') self.formatlabel.grid(row=2, column=1, sticky=W) self.format = Text(self.textPallet, height=15, width=20, bg='Aquamarine') self.format.config(state=DISABLED) self.format.grid(row=3, column=1) self.formatlabel = Label(self.textPallet, text='Levels') self.formatlabel.grid(row=2, column=2, sticky=W) self.levels = Text(self.textPallet, height=15, width=20, bg='NavajoWhite') self.levels.config(state=DISABLED) self.levels.grid(row=3, column=2) self.formatlabel = Label(self.textPallet, text='Dimensions') self.formatlabel.grid(row=2, column=3, sticky=W) self.dimensions = Text(self.textPallet, height=15, width=20, bg='LightSteelBlue') self.dimensions.config(state=DISABLED) self.dimensions.grid(row=3, column=3) self.filenamebox = Entry(self.textPallet, bg='Linen') self.filenamebox.grid(row=1,column=1,columnspan=4,sticky=W+E) def buttonNew(self): # pathname = tkinter.filedialog.askopenfilename() pathname = tkFileDialog.askopenfilename() if pathname != '': self.image = SVSImage(pathname) if self.image.success: self.showMeta() self.image.showPreview() else: print( "Failed to load image.") def showMeta(self): # Populate the text fields nLevels = self.image.metadata['levels'] self.format.config(state=NORMAL) self.levels.config(state=NORMAL) self.dimensions.config(state=NORMAL) self.format.delete('1.0',END) self.levels.delete('1.0',END) self.dimensions.delete('1.0',END) self.format.insert(END, self.image.metadata['format']) for n in range(0,nLevels): self.levels.insert(END, str(n)+'\n\n') self.dimensions.insert(END, str(self.image.metadata['dimensions'][n])+'\n\n') self.filenamebox.delete(0, END) self.filenamebox.insert(INSERT, self.image.pn) self.filenamebox.xview_moveto(0.5) self.filenamebox.icursor(END) self.format.config(state=DISABLED) self.levels.config(state=DISABLED) self.dimensions.config(state=DISABLED) def buttonCrop(self): # Check all settings and selections for validity. ''' - Compare the file path and basename in the entry box against the original. They must be different. - Check any custom ROI boundaries for validity - LATER: Check if output file exits and suggest a solution - Create a new UI box with (2), (2), (1), (1) entry widgets ''' try: # Create a box with text fields and two buttons: Accept and Cancel self.image.active = True fp = self.filenamebox.get() sfD = Toplevel() frame1 = Frame(sfD) (fpBase, finame) = os.path.split(fp) finame = finame[0:len(finame)-4]+'.tif' xcl = Label(frame1, text='Top X:').grid(row=1, column=1, sticky=E) self.xcorner = Entry(frame1, bg='Linen') self.xcorner.bind('<Return>', self.updatePVBox) self.xcorner.bind('<KP_Enter>', self.updatePVBox) self.xcorner.insert(END, 0) self.xcorner.grid(row=1, column=2) ycl = Label(frame1, text='Top Y:').grid(row=2, column=1, sticky=E) self.ycorner = Entry(frame1) self.ycorner.bind('<Return>', self.updatePVBox) self.ycorner.bind('<KP_Enter>', self.updatePVBox) self.ycorner.insert(END, 0) self.ycorner.grid(row=2, column=2) xsl = Label(frame1, text='X size:').grid(row=3, column=1) self.xsize = Entry(frame1) self.xsize.bind('<Return>', self.updatePVBox) self.xsize.bind('<KP_Enter>', self.updatePVBox) self.xsize.insert(END, 1200) self.xsize.grid(row=3, column=2) ysl = Label(frame1, text='Y size:').grid(row=4, column=1) self.ysize = Entry(frame1) self.ysize.bind('<Return>', self.updatePVBox) self.ysize.bind('<KP_Enter>', self.updatePVBox) self.ysize.insert(END, 1200) self.ysize.grid(row=4, column=2) lvl = Label(frame1, text='Level:').grid(row=5, column=1) self.level = Entry(frame1) self.level.bind('<Return>', self.scaleBoxSize) self.level.bind('<KP_Enter>', self.scaleBoxSize) self.level.insert(END, 0) self.level.grid(row=5, column=2) nfnl = Label(frame1, text='Out name:').grid(row=6, column=1) self.nfn = Entry(frame1) self.nfn.grid(row=6, column=2) self.nfn.insert(END, finame) self.image.cropPropPane = [self.xcorner, self.ycorner, self.xsize, self.ysize, self.level, self.nfn] # Lets you control output folder from the "main" window (fpBase, temp) = os.path.split(self.filenamebox.get()) buttonOK = Button(frame1, text='OK', command = self.buttonCropOK) buttonOK.grid(row=7, column=1) buttonClose2 = Button(frame1, text='Done', command = lambda arg1 = sfD: self.buttonClose(arg1)) buttonClose2.grid(row=7, column=3) frame1.pack() except AttributeError: print( "Open an image first") def updatePVBox(self, event): properties = self.pullCropSets() if self.allLegal(properties): targetLvl = int(self.level.get()) factor = self.image.metadata['downsamples'][targetLvl] # w.r.t. Level0: rawx1 = int(self.xcorner.get()) rawy1 = int(self.ycorner.get()) rawx2 = rawx1+(int(self.xsize.get())*factor) rawy2 = rawy1+(int(self.ysize.get())*factor) print( 'raw: ', rawx1, rawy1, rawx2, rawy2, ' fact:', factor) # w.r.t. Preview level & scale x1 = int(rawx1/self.image.dispLvlfact) y1 = int(rawy1/self.image.dispLvlfact) x2 = int(rawx2/self.image.dispLvlfact) y2 = int(rawy2/self.image.dispLvlfact) print( 'disp: ', x1, y1, x2, y2, 'scl:', self.image.dispLvlfact) self.image.canvas.delete(self.image.activebox) B = self.image.canvas.create_rectangle(x1,y1,x2,y2) self.image.activebox = B self.writeProps(properties) def scaleBoxSize(self, event): try: newlvl = int(self.level.get()) oldlvl = self.image.cropProps['lvl'] old = self.image.SVS.level_downsamples[oldlvl] new = self.image.SVS.level_downsamples[newlvl] factor = old/new print( 'old:', oldlvl, ' new:', newlvl, ' old fact:', old,' fact:', new) oldx = int(self.xsize.get()) oldy = int(self.ysize.get()) print( 'oldx:', oldx, ' oldy', oldy) scx = int(math.floor(oldx*factor)) scy = int(math.floor(oldy*factor)) print( 'scaledx:', scx, ' scaledy:', scy) self.xsize.delete(0,END) self.xsize.insert(END, scx) self.ysize.delete(0,END) self.ysize.insert(END, scy) properties = self.pullCropSets() # Now cropProps are up-to-date and we can use them. if self.allLegal(properties): self.writeProps(properties) except KeyError: print( 'New Level out of bounds.') def pullCropSets(self): ''' Populate a dictionary with crop box, new level, and filename from the Entry boxes ''' (fpBase, temp) = os.path.split(self.filenamebox.get()) xcotemp = int(self.xcorner.get()) ycotemp = int(self.ycorner.get()) xstemp = int(self.xsize.get()) ystemp = int(self.ysize.get()) lvltemp = int(self.level.get()) ## now "writeProps" function # self.image.cropProps['xco'] = int(self.xcorner.get()) # self.image.cropProps['yco'] = int(self.ycorner.get()) # self.image.cropProps['xs'] = int(self.xsize.get()) # self.image.cropProps['ys'] = int(self.ysize.get()) # self.image.cropProps['lvl'] = int(self.level.get()) outname = self.nfn.get() fptemp = fpBase+os.sep+outname return {'xco': xcotemp, 'yco': ycotemp, 'xs': xstemp, 'ys': ystemp, 'lvl': lvltemp, 'fp': fptemp} def allLegal(self, properties): ''' To be executed before writing to cropProps. ''' imgprops = self.image.metadata cp = properties # pull the level. two-tuple (x,y) lv0dims = imgprops['dimensions'][0] imgdims = imgprops['dimensions'][cp['lvl']] if cp['xco'] < 0 or cp['xco'] > lv0dims[0]: print( 'x out of bounds') return False if cp['yco'] < 0 or cp['yco'] > lv0dims[1]: print( 'y out of bounds') return False if cp['xs']-cp['xco'] > imgdims[0]: print( 'x size too large') return False if cp['ys']-cp['yco'] > imgdims[1]: print( 'y size too large') return False if cp['fp']==self.image.pn: # There's a better way to compare str print( 'Invalid file name.') return False return True # default to true...? good idea? def writeProps(self, properties): self.image.cropProps['xco'] = properties['xco'] self.image.cropProps['yco'] = properties['yco'] self.image.cropProps['xs'] = properties['xs'] self.image.cropProps['ys'] = properties['ys'] self.image.cropProps['lvl'] = properties['lvl'] self.image.cropProps['fp'] = properties['fp'] def buttonCropOK(self): properties = self.pullCropSets() if self.allLegal(properties): self.writeProps(properties) self.SaveImg() print( 'Section saved ', self.image.cropProps['fp']) def buttonClose(self, target): self.image.canvas.delete(self.image.activebox) target.destroy() def SaveImg(self): # Save the image #temp: corner = (self.image.cropProps['xco'], self.image.cropProps['yco']) size = (self.image.cropProps['xs'], self.image.cropProps['ys']) fp = self.image.cropProps['fp'] level = self.image.cropProps['lvl'] print( 'Cropping...') print( 'From Level ', level) print( corner, ' to ', size) print( 'Destination: \n', fp) out = self.image.SVS.read_region(corner, level, size) out.save(fp) ''' Child class to the OpenSlide object, which has methods for reading and parsing information from SVS images. Includes attributes: metadata cropProps - a Dictionary holding the active settings for a crop active - Boolean indicating if this instance is active and interfacing to the crop diologue window. pvScale - Integer to downsample the lowest level image for preview etc. ''' class SVSImage(OpenSlide): # Holds an OpenSlide object, with image properties in a dictionary def __init__(self, pn): self.metadata = {} self.cropProps = {'xco':0, 'yco':0, 'xs':0, 'ys':0, 'lvl':0, 'fp':''} self.active = False self.pvScale = 3 self.cropPropPane = None self.activebox = 0 self.pn = pn self.fn = os.path.basename(self.pn) self.preview = Toplevel() try: self.SVS = OpenSlide(pn) self.success = True except OpenSlideError: print( 'Caught file type exception!') self.preview.destroy() self.success = False else: self.metadata['format'] = self.SVS.detect_format(pn) self.metadata['levels'] = self.SVS.level_count self.metadata['dimensions'] = self.SVS.level_dimensions self.metadata['downsamples'] = self.SVS.level_downsamples self.dispLvlfact = self.SVS.level_downsamples[self.SVS.level_count-1]*self.pvScale def showPreview(self): # Create a new window, draw on it the lowest level image, scaled down self.dispLvl = self.metadata['levels'] - 1 (x,y) = self.metadata['dimensions'][self.dispLvl] xx = int(math.floor(x/self.pvScale)) yy = int(math.floor(y/self.pvScale)) i = self.SVS.read_region( (0,0), self.dispLvl, (x,y) ).resize((xx,yy)) self.preview.title(self.fn+' Level '+str(self.dispLvl)) self.canvas = Canvas(self.preview, width=xx, height=yy) self.canvas.pack() # self.PVImage = self.canvas.create_image(0,0) self.canvas.bind("<ButtonPress-1>", self.clickPress) self.canvas.bind("<B1-Motion>", self.clickMotion) self.canvas.bind("<ButtonRelease-1>", self.clickRelease) self.canvas.myIm = ImageTk.PhotoImage(i) self.PVImage = self.canvas.create_image(xx/2,yy/2, image=self.canvas.myIm) # self.PVImage.config(image=self.canvas.myIm) # self.PVImage.pack(fill=BOTH) def clickPress(self, event): if self.active: self.x0 = event.x self.y0 = event.y else: print( 'not active ', event.x, event.y) def clickMotion(self, event): # Use this function to live draw the selection rectangle Not needed now. ''' Must un-draw any existing rectangle before drawing the next one. ''' if self.active: # box = self.getBox(event.x-self.x0, event.y-self.y0, scaled=False) # print( box) dx = event.x-self.x0 dy = event.y-self.y0 # box = (self.x0, self.y0, self.x0+dx, self.y0+dy) # print( box) self.canvas.delete(self.activebox) self.activebox = self.canvas.create_rectangle(self.x0, self.y0, self.x0+dx, self.y0+dy) def clickRelease(self,event): '''Use this function to update the Entry widgets showing x and y for the upper corner and x and y size. Here check if the mouse has passed the image boundary, and also scale the pixels to the proper scale pulled from the Level entry widget, and using the property (self.level_downsamples).''' if self.active: self.xf = event.x self.yf = event.y dx = self.xf - self.x0 dy = self.yf - self.y0 boundingbox = self.getBox(dx, dy) self.cropPropPane[0].delete(0,END) self.cropPropPane[0].insert(INSERT, boundingbox[0]) self.cropPropPane[1].delete(0,END) self.cropPropPane[1].insert(INSERT, boundingbox[1]) self.cropPropPane[2].delete(0,END) self.cropPropPane[2].insert(INSERT, boundingbox[2]) self.cropPropPane[3].delete(0,END) self.cropPropPane[3].insert(INSERT, boundingbox[3]) else: print( 'not active ', event.x, event.y) def getBox(self, dx, dy): '''1=TOP-LEFT to BOTTOM-RIGHT 2=BOTTOM-LEFT to TOP-RIGHT 3=TOP-RIGHT to BOTTOM-LEFT 4=BOTTOM-RIGHT to TOP-LEFT Also handle if dx and/or dy = 0, which is a line Here, do the scaling to whatever the selected level is. The top-left corner is always in the Level0 reference scale. Returns a box: [x, y, height, width] Input scaled: return scaled or un-scaled box. ''' targetLvl = int(self.cropProps['lvl']) cornerfactor = int(self.SVS.level_downsamples[self.dispLvl]*self.pvScale) factor = int(cornerfactor/self.SVS.level_downsamples[targetLvl]) # print( 'dx:', dx, ' dy:', dy, ' target:', targetLvl, ' scale:', factor,' cornerscale:', cornerfactor) if dx>0 and dy>0: #1 result = [self.x0, self.y0, dx, dy] elif dx>0 and dy<0: #2 result = [self.x0, self.y0+dy, dx, -1*dy] elif dx<0 and dy>0: #3 result = [self.x0+dx, self.y0, -1*dx, dy] elif dx<0 and dy<0: #4 result = [self.x0+dx, self.y0+dy, -1*dx, -1*dy] elif 0 in (dx,dy): return [0, 0, 0, 0] for i in range(0,2): result[i] = int(math.floor(result[i]*cornerfactor)) for i in range(2,4): result[i] = int(math.floor(result[i]*factor)) return result def killPreview(self): self.preview.destroy() ''' Message box for............. ?? ''' class messageBox(): def __init__(self): self.window = Toplevel() pass def killWindow(self): self.window.destroy() def main(): print( '\n'*5) root = Tk() root.wm_title('SVS Repack GUI') IV = ImageRepack(root) root.mainloop() if __name__ == '__main__': main()
python
import pytest import sightreading.randnotes as s def test_diatonic(): assert s.is_sharp(("A#2", 5)) assert s.is_flat(("Bb3", 3)) def test_frets(): assert s.fretnote(6, 0) == ["E2"] assert set(s.fretnote(1, 2)) == set(["F#4", "Gb4"]) def test_padding(): notes = ["C2"] * 7 s.pad_line(notes, start=True, end=True) assert notes == ["treble-clef", "time-signature", "C2", "C2", "C2", "C2", "bar", "C2", "C2", "C2", "rest", "double-bar"] def test_staff_dim(): lines = s.rand_staff([1], range(13), 1, 2, False) assert len(lines) == 2 assert lines[0][-1] == "end-bar" assert lines[1][6] == "bar" assert lines[1][8] == "rest"
python
# ---------------------------------------------------------------------- # Test noc.core.hash functions # ---------------------------------------------------------------------- # Copyright (C) 2007-2020 The NOC Project # See LICENSE for details # ---------------------------------------------------------------------- # Third-party modules import pytest # NOC modules from noc.core.hash import hash_str, hash_int, dict_hash_int, dict_hash_int_args @pytest.mark.parametrize( "value,expected", [ (0, b"J^\xa04\xb0\x0b\xaf\xb6"), ("0", b"J^\xa04\xb0\x0b\xaf\xb6"), (None, b"\x1a3\x12\x943.\xcdm"), ("None", b"\x1a3\x12\x943.\xcdm"), ], ) def test_hash_str(value, expected): assert hash_str(value) == expected @pytest.mark.parametrize( "value,expected", [ (0, 5358896754769768374), ("0", 5358896754769768374), (None, 1887873096521534829), ("None", 1887873096521534829), ], ) def test_hash_int(value, expected): assert hash_int(value) == expected @pytest.mark.parametrize( "value,expected", [ ({}, -2954230017111125474), ({"k": 1}, -7829327169641555127), ({"k": "1"}, -7829327169641555127), ({"k": 1, "v": "2"}, 6473659485526827658), ({"k": 1, "v": None}, 1975760527053142894), ({"k": 1, "v": "None"}, 1975760527053142894), ], ) def test_dict_hash_int(value, expected): assert dict_hash_int(value) == expected @pytest.mark.parametrize( "value,expected", [ ({}, -2954230017111125474), ({"k": 1}, -7829327169641555127), ({"k": "1"}, -7829327169641555127), ({"k": 1, "v": "2"}, 6473659485526827658), ({"k": 1, "v": None}, 1975760527053142894), ({"k": 1, "v": "None"}, 1975760527053142894), ], ) def test_dict_hash_int_args(value, expected): assert dict_hash_int_args(**value) == expected
python
import json import math import sys import traceback import numpy as np from sqlalchemy.orm import sessionmaker import EOSS.historian.models as models import EOSS.data.problem_specific as problem_specific from EOSS.analyst.helpers import get_feature_unsatisfied, get_feature_satisfied, \ feature_expression_to_string from EOSS.data.problem_specific import assignation_problems, partition_problems from EOSS.data_mining.interface.ttypes import BinaryInputArchitecture, DiscreteInputArchitecture from EOSS.models import Design, EOSSContext from EOSS.vassar.api import VASSARClient from EOSS.data_mining.api import DataMiningClient class Critic: def __init__(self, context: EOSSContext, session_key): # Connect to the CEOS database self.engine = models.db_connect() self.session = sessionmaker(bind=self.engine)() self.context = context self.instruments_dataset = problem_specific.get_instrument_dataset(context.problem) self.orbits_dataset = problem_specific.get_orbit_dataset(context.problem) self.session_key = session_key def get_missions_from_genome(self, problem_type, genome): missions = [] if problem_type == 'binary': missions = self.get_missions_from_bitstring(genome) elif problem_type == 'discrete': missions = self.get_missions_from_partition(genome) return missions def get_missions_from_bitstring(self, bitstring): missions = [] num_instr = len(self.instruments_dataset) num_orbits = len(self.orbits_dataset) for orbit in range(num_orbits): mission = {"orbit": self.orbits_dataset[orbit]["name"], "instruments": []} for instr in range(num_instr): idx = orbit*num_instr + instr if bitstring[idx]: mission["instruments"].append(self.instruments_dataset[instr]) missions.append(mission) return missions def get_missions_from_partition(self, genome): missions = [] # TODO: Retrieve all missions from genome return missions def orbits_similarity(self, mission_orbit, hist_mission): score = 0 # Score orbit type if mission_orbit["type"] == hist_mission.orbit_type: score += 1 # Score orbit altitude if hist_mission.orbit_altitude_num is not None and \ mission_orbit["altitude"] - 50 < hist_mission.orbit_altitude_num < mission_orbit["altitude"] + 50: score += 1 # Score orbit LST if mission_orbit["LST"] == hist_mission.orbit_LST: score += 1 # Return orbit score return score def instruments_score(self, mission_instrument, hist_instrument): score = 0.0 # Score instrument type for type2 in hist_instrument.types: if mission_instrument["type"] == type2.name: score += 1 break # Score instrument technology if mission_instrument["technology"] == hist_instrument.technology: score += 1 # Score instrument geometry for geometry2 in hist_instrument.geometries: if mission_instrument["geometry"] == geometry2.name: score += 1 break # Score instrument wavebands for waveband1 in mission_instrument["wavebands"]: for waveband2 in hist_instrument.wavebands: if waveband1 == waveband2.name: score += 1/len(mission_instrument["wavebands"]) break # Return instruments score return score def instruments_similarity(self, instruments1, instruments2): score = 0.0 # Compute similarity matrix N = max(len(instruments1), len(instruments2)) sim = np.zeros((N, N)) for i1 in range(len(instruments1)): for i2 in range(len(instruments2)): sim[i1, i2] = self.instruments_score(instruments1[i1], instruments2[i2]) # Find the best matches for i1xi2 (greedy) for k in range(len(instruments1)): i1i2 = np.argmax(sim) i1 = int(i1i2 / N) i2 = i1i2 % N score += sim[i1, i2]/len(instruments1) sim[i1, :] = 0 sim[:, i2] = 0 return score def missions_similarity(self, mission_orbit, mission_instruments, missions_database): max_score = -1 max_mission = None # Iterate over all the missions in the database for hist_mission in missions_database: score = 0 # Get orbits similarity score += self.orbits_similarity(mission_orbit, hist_mission) # If score bigger than a threshold if(score > 1): # Get instruments similarities score += self.instruments_similarity(mission_instruments, hist_mission.instruments) if score > max_score: max_score = score max_mission = hist_mission # Return result return [(max_score*10)/7, max_mission] def expert_critic(self, design): # Criticize architecture (based on rules) port = self.context.vassar_port problem = self.context.problem client = VASSARClient(port) client.start_connection() result_list = client.critique_architecture(problem, design) client.end_connection() result = [] for advice in result_list: result.append({ "type": "Expert", "advice": advice }) return result def explorer_critic(self, design): def get_advices_from_bit_string_diff(difference): out = [] ninstr = len(self.instruments_dataset) for i in range(len(difference)): advice = [] if difference[i] == 1: advice.append("add") elif difference[i] == -1: advice.append("remove") else: continue orbit_index = i // ninstr # Floor division instr_index = i % ninstr # Get the remainder advice.append("instrument {}".format(self.instruments_dataset[instr_index]['name'])) if difference[i] == 1: advice.append("to") elif difference[i] == -1: advice.append("from") advice.append("orbit {}".format(self.orbits_dataset[orbit_index]['name'])) advice = " ".join(advice) out.append(advice) out = ", and ".join(out) out = out[0].upper() + out[1:] return out original_outputs = json.loads(design.outputs) original_inputs = json.loads(design.inputs) problem = self.context.problem port = self.context.vassar_port client = VASSARClient(port) client.start_connection() archs = None advices = [] if problem in assignation_problems: archs = client.run_local_search(problem, design) for arch in archs: new_outputs = arch["outputs"] new_design_inputs = arch["inputs"] diff = [a - b for a, b in zip(new_design_inputs, original_inputs)] advice = [get_advices_from_bit_string_diff(diff)] # TODO: Generalize the code for comparing each metric. Currently it assumes two metrics: science and cost if new_outputs[0] > original_outputs[0] and new_outputs[1] < original_outputs[1]: # New solution dominates the original solution advice.append(" to increase the science benefit and lower the cost.") elif new_outputs[0] > original_outputs[0]: advice.append(" to increase the science benefit (but cost may increase!).") elif new_outputs[1] < original_outputs[1]: advice.append(" to lower the cost (but science may decrease too!).") else: continue advice = "".join(advice) advices.append(advice) elif problem in partition_problems: archs = client.run_local_search(problem, design.inputs) # TODO: Add the delta code for discrete architectures client.end_connection() result = [] for advice in advices: result.append({ "type": "Explorer", "advice": advice }) return result def historian_critic(self, design): historian_feedback = [] problem = self.context.problem if problem in assignation_problems: problem_type = 'binary' elif problem in partition_problems: problem_type = 'discrete' else: problem_type = 'unknown' # Convert architecture format missions = self.get_missions_from_genome(problem_type, json.loads(design.inputs)) # Type 2: Mission by mission missions_database = self.session.query(models.Mission) for mission in missions: # Find the orbit information based in the name orbit_info = {} for orbit in self.orbits_dataset: if orbit["name"] == mission["orbit"]: orbit_info = orbit break # Find similar past missions from the information on the current mission, including orbit and instruments res = self.missions_similarity(orbit_info, mission["instruments"], missions_database) if len(mission["instruments"]) > 0: if res[0] < 6: historian_feedback.append("""I noticed that nobody has ever flown a satellite with these instruments: {} in the {} orbit. This is great from an innovation standpoint, but be sure to check the Expert for some reasons this might not be a good idea!""" .format(", ".join([instr["name"] for instr in mission["instruments"]]), mission["orbit"])) else: historian_feedback.append("""I found a mission that is similar to your design in orbit {}: {}. Would you like to see more information? Click <a target="_blank" href="http://database.eohandbook.com/database/missionsummary.aspx?missionID={}">here</a>""" .format(mission["orbit"], res[1].name, res[1].id)) # + # '<br>'.join(["Instrument similar to %s (score: %.2f)" % \ # (i[0], i[2]) for i in self.instruments_match_dataset(res[1].instruments)]) + '.') result = [] for advice in historian_feedback: result.append({ "type": "Historian", "advice": advice }) return result def analyst_critic(self, this_design): result = [] client = DataMiningClient() problem = self.context.problem if problem in assignation_problems: problem_type = 'binary' elif problem in partition_problems: problem_type = 'discrete' else: problem_type = 'unknown' try: # Start connection with data_mining client.startConnection() support_threshold = 0.02 confidence_threshold = 0.2 lift_threshold = 1 behavioral = [] non_behavioral = [] dataset = Design.objects.filter(eosscontext_id__exact=self.context.id).all() if len(dataset) < 10: raise ValueError("Could not run data mining: the number of samples is less than 10") else: utopiaPoint = [0.26, 0] temp = [] # Select the top N% archs based on the distance to the utopia point for design in dataset: outputs = json.loads(this_design.outputs) id = design.id dist = math.sqrt((outputs[0] - utopiaPoint[0]) ** 2 + (outputs[1] - utopiaPoint[1]) ** 2) temp.append((id, dist)) # Sort the list based on the distance to the utopia point temp = sorted(temp, key=lambda x: x[1]) for i in range(len(temp)): if i <= len(temp) // 10: # Label the top 10% architectures as behavioral behavioral.append(temp[i][0]) else: non_behavioral.append(temp[i][0]) # Extract feature _archs = [] if problem_type == "binary": for arch in dataset: _archs.append(BinaryInputArchitecture(arch.id, json.loads(arch.inputs), json.loads(arch.outputs))) _features = client.client.getDrivingFeaturesEpsilonMOEABinary(self.session_key, problem, behavioral, non_behavioral, _archs) elif problem_type == "discrete": for arch in dataset: _archs.append(DiscreteInputArchitecture(arch.id, json.loads(arch.inputs), json.loads(arch.outputs))) _features = client.client.getDrivingFeaturesEpsilonMOEADiscrete(self.session_key, problem, behavioral, non_behavioral, _archs) else: raise ValueError("Problem type not implemented") features = [] for df in _features: features.append({'id': df.id, 'name': df.name, 'expression': df.expression, 'metrics': df.metrics}) advices = [] if not len(features) == 0: # Compare features to the current design unsatisfied = get_feature_unsatisfied(features[0]['name'], this_design, self.context) satisfied = get_feature_satisfied(features[0]['name'], this_design, self.context) if type(unsatisfied) is not list: unsatisfied = [unsatisfied] if type(satisfied) is not list: satisfied = [satisfied] for exp in unsatisfied: if exp == "": continue advices.append( "Based on the data mining result, I advise you to make the following change: " + feature_expression_to_string(exp, is_critique=True, context=self.context)) for exp in satisfied: if exp == "": continue advices.append( "Based on the data mining result, these are the good features. Consider keeping them: " + feature_expression_to_string(exp, is_critique=False, context=self.context)) # End the connection before return statement client.endConnection() for i in range(len(advices)): # Generate answers for the first 5 features advice = advices[i] result.append({ "type": "Analyst", "advice": advice }) except Exception as e: print("Exc in generating critic from data mining: " + str(e)) traceback.print_exc(file=sys.stdout) client.endConnection() return result
python
#!/usr/bin/python # coding: utf8 import sys import mysql.connector import facesearch.twittersearch as tw import dbconf if __name__ == '__main__': mode = sys.argv[1] conn = mysql.connector.connect( host=dbconf.HOST, port=dbconf.PORT, db=dbconf.DB_NAME, user=dbconf.DB_USER, password=dbconf.DB_PASSWORD, charset=dbconf.DB_CHARSET ) cur = conn.cursor(buffered=True) try: base_max_id = 0 max_id = 0 if mode != 'init': cur.execute('select id from max_id') base_max_id = cur.fetchall()[0][0] links, max_id1 = tw.query_twitter('suwa nanaka', 100, 1, base_max_id) links2, max_id2 = tw.query_twitter('諏訪ななか', 100, 1, base_max_id) links3, max_id3 = tw.query_twitter('すわわ', 100, 1, base_max_id) links4, max_id4 = tw.query_twitter('ラブライブ', 100, 1, base_max_id) links5, max_id5 = tw.query_twitter('Aqours', 100, 1, base_max_id) links6, max_id6 = tw.query_twitter('ふわさた', 100, 1, base_max_id) links7, max_id7 = tw.query_twitter('サンシャイン', 100, 1, base_max_id) links.extend(links2) links.extend(links3) links.extend(links4) links.extend(links5) links.extend(links6) links.extend(links7) max_id = max(max_id1, max_id2, max_id3, max_id4, max_id5, max_id6, max_id7) else: links, max_id = tw.query_twitter('suwa nanaka', 100, 10, 0) links2, max_id2 = tw.query_twitter('諏訪ななか', 100, 10, 0) links3, max_id3 = tw.query_twitter('すわわ', 100, 10, 0) links4, max_id4 = tw.query_twitter('ラブライブ', 100, 10, 0) links5, max_id5 = tw.query_twitter('Aqours', 100, 10, 0) links6, max_id6 = tw.query_twitter('ふわさた', 100, 10, 0) links7, max_id7 = tw.query_twitter('サンシャイン', 100, 10, 0) links.extend(links2) links.extend(links3) links.extend(links4) links.extend(links5) links.extend(links6) links.extend(links7) max_id = max(max_id1, max_id2, max_id3, max_id4, max_id5, max_id6, max_id7) if max_id > 0: cur.execute('update max_id set id=%d' % max_id) conn.commit() finally: cur.close() conn.close() links = list(set(links)) print '%d images to be processed...' % len(links) personmap = tw.identify(links, 'aqours') conn = mysql.connector.connect( host=dbconf.HOST, port=dbconf.PORT, db=dbconf.DB_NAME, user=dbconf.DB_USER, password=dbconf.DB_PASSWORD, charset=dbconf.DB_CHARSET ) cur = conn.cursor(buffered=True) try: suwawa = personmap['nanaka_suwa'] for url in suwawa: cur.execute('insert into suwawa(url) values(\'%s\')' % url) conn.commit() for k, v in personmap.items(): if k == 'nanaka_suwa': continue for url in v: if url in suwawa: cur.execute('insert into photo(name, url) values(\'%s\', \'%s\')' % (k, url)) cur.execute('select distinct url from photo where name=\'%s\'' % k) count = len(cur.fetchall()) cur.execute('update member set count=%d where name=\'%s\'' % (count, k)) conn.commit() finally: cur.close() conn.close()
python
import types def _clean_acc(acc): out = {} for attr in ['genomic', 'protein', 'rna']: if attr in acc: v = acc[attr] if type(v) is types.ListType: out[attr] = [x.split('.')[0] for x in v] else: out[attr] = v.split('.')[0] return out def diff_doc1(doc_1, doc_2): diff_d = {'update': {}, 'delete': [], 'add': {}} for attr in set(doc_1) | set(doc_2): if attr in ['_rev', 'pir', 'Vega']: continue if attr in doc_1 and attr in doc_2: _v1 = doc_1[attr] _v2 = doc_2[attr] if attr == 'MGI': _v2 = _v2.split(':')[1] elif attr in ['refseq', 'accession']: _v1 = _clean_acc(_v1) elif attr == 'interpro': if type(_v1) is types.ListType: _v1.sort() if type(_v2) is types.ListType: _v2.sort() elif attr == 'reagent': for k in _v1.keys(): if k.find('.') != -1: _v1[k.replace('.', '_')] = _v1[k] del _v1[k] if _v1 != _v2: diff_d['update'][attr] = _v2 elif attr in doc_1 and attr not in doc_2: diff_d['delete'].append(attr) else: diff_d['add'][attr] = doc_2[attr] if diff_d['update'] or diff_d['delete'] or diff_d['add']: return diff_d
python
# Copyright (C) 2020 TU Dresden # Licensed under the ISC license (see LICENSE.txt) # # Authors: Andres Goens
python
#!/usr/bin/env python3 # # Copyright 2021 Graviti. Licensed under MIT License. # """User, Commit, Tag, Branch and Draft classes. :class:`User` defines the basic concept of a user with an action. :class:`Commit` defines the structure of a commit. :class:`Tag` defines the structure of a commit tag. :class:`Branch` defines the structure of a branch. :class:`Draft` defines the structure of a draft. """ from typing import Any, Dict, Optional, Tuple, Type, TypeVar from ..utility import AttrsMixin, ReprMixin, attr, camel, common_loads class User(AttrsMixin, ReprMixin): """This class defines the basic concept of a user with an action. Arguments: name: The name of the user. date: The date of the user action. """ _T = TypeVar("_T", bound="User") _repr_attrs = ("date",) name: str = attr(is_dynamic=False) date: int = attr(is_dynamic=False) def __init__(self, name: str, date: int) -> None: self.name = name self.date = date def _repr_head(self) -> str: return f'{self.__class__.__name__}("{self.name}")' @classmethod def loads(cls: Type[_T], contents: Dict[str, Any]) -> _T: """Loads a :class:`User` instance from the given contents. Arguments: contents: A dict containing all the information of the commit:: { "name": <str> "date": <int> } Returns: A :class:`User` instance containing all the information in the given contents. """ return common_loads(cls, contents) def dumps(self) -> Dict[str, Any]: """Dumps all the user information into a dict. Returns: A dict containing all the information of the user:: { "name": <str> "date": <int> } """ return self._dumps() class Commit(AttrsMixin, ReprMixin): """This class defines the structure of a commit. Arguments: commit_id: The commit id. parent_commit_id: The parent commit id. message: The commit message. committer: The commit user. """ _T = TypeVar("_T", bound="Commit") _repr_attrs: Tuple[str, ...] = ("parent_commit_id", "message", "committer") _repr_maxlevel = 2 commit_id: str = attr(is_dynamic=False, key=camel) parent_commit_id: Optional[str] = attr(is_dynamic=False, key=camel) message: str = attr(is_dynamic=False) committer: User = attr(is_dynamic=False) def __init__( self, commit_id: str, parent_commit_id: Optional[str], message: str, committer: User, ) -> None: self.commit_id = commit_id self.parent_commit_id = parent_commit_id self.message = message self.committer = committer def _repr_head(self) -> str: return f'{self.__class__.__name__}("{self.commit_id}")' @classmethod def loads(cls: Type[_T], contents: Dict[str, Any]) -> _T: """Loads a :class:`Commit` instance for the given contents. Arguments: contents: A dict containing all the information of the commit:: { "commitId": <str> "parentCommitId": <str> or None "message": <str> "committer": { "name": <str> "date": <int> } } Returns: A :class:`Commit` instance containing all the information in the given contents. """ return common_loads(cls, contents) def dumps(self) -> Dict[str, Any]: """Dumps all the commit information into a dict. Returns: A dict containing all the information of the commit:: { "commitId": <str> "parentCommitId": <str> or None "message": <str> "committer": { "name": <str> "date": <int> } } """ return self._dumps() class _NamedCommit(Commit): """This class defines the structure of a named commit. :class:`_NamedCommit` is the base class of :class:`Tag` and :class:`Branch`. Arguments: name: The name of the named commit. commit_id: The commit id. parent_commit_id: The parent commit id. message: The commit message. committer: The commit user. """ _T = TypeVar("_T", bound="_NamedCommit") _repr_attrs = ("commit_id",) + Commit._repr_attrs name: str = attr(is_dynamic=False) def __init__( # pylint: disable=too-many-arguments self, name: str, commit_id: str, parent_commit_id: Optional[str], message: str, committer: User, ) -> None: super().__init__(commit_id, parent_commit_id, message, committer) self.name = name def _repr_head(self) -> str: return f'{self.__class__.__name__}("{self.name}")' @classmethod def loads(cls: Type[_T], contents: Dict[str, Any]) -> _T: """Loads a :class:`_NamedCommit` instance for the given contents. Arguments: contents: A dict containing all the information of the named commit:: { "name": <str> "commitId": <str> "parentCommitId": <str> or None "message": <str> "committer": { "name": <str> "date": <int> } } Returns: A :class:`_NamedCommit` instance containing all the information in the given contents. """ return common_loads(cls, contents) def dumps(self) -> Dict[str, Any]: """Dumps all the named commit information into a dict. Returns: A dict containing all the information of the named commit:: { "name": <str> "commitId": <str> "parentCommitId": <str> or None "message": <str> "committer": { "name": <str> "date": <int> } } """ return self._dumps() class Tag(_NamedCommit): """This class defines the structure of the tag of a commit. Arguments: name: The name of the tag. commit_id: The commit id. parent_commit_id: The parent commit id. message: The commit message. committer: The commit user. """ class Branch(_NamedCommit): """This class defines the structure of a branch. Arguments: name: The name of the branch. commit_id: The commit id. parent_commit_id: The parent commit id. message: The commit message. committer: The commit user. """ class Draft(AttrsMixin, ReprMixin): """This class defines the basic structure of a draft. Arguments: number: The number of the draft. title: The title of the draft. branch_name: The branch name. """ _T = TypeVar("_T", bound="Draft") _repr_attrs = ("title",) number: int = attr(is_dynamic=False) title: str = attr(is_dynamic=False) branch_name: str = attr(is_dynamic=False, key=camel) def __init__(self, number: int, title: str, branch_name: str) -> None: self.number = number self.title = title self.branch_name = branch_name def _repr_head(self) -> str: return f"{self.__class__.__name__}({self.number})" @classmethod def loads(cls: Type[_T], contents: Dict[str, Any]) -> _T: """Loads a :class:`Draft` instance from the given contents. Arguments: contents: A dict containing all the information of the draft:: { "number": <int> "title": <str> "branchName": <str> } Returns: A :class:`Draft` instance containing all the information in the given contents. """ return common_loads(cls, contents) def dumps(self) -> Dict[str, Any]: """Dumps all the information of the draft into a dict. Returns: A dict containing all the information of the draft:: { "number": <int> "title": <str> "branchName": <str> } """ return self._dumps()
python
# coding: UTF-8 val_1 = 24 val_2 = 67 val_3 = 88 val_4 = 89 def p(): try: print("\tval_1: {}".format(val_1)) except Exception as e: print(e) try: print("\tval_2: {}".format(val_2)) except Exception as e: print(e) try: print("\tval_3: {}".format(val_3)) except Exception as e: print(e) try: print("\tval_4: {}".format(val_4)) except Exception as e: print(e) print("\ninit value:") p() print("\nexe: del val_1!") del val_1 p() print("\nexe: del val_2 and val_3!") del val_2, val_3 p() print("\nat last result: ") p()
python
# -*- coding: utf-8-*- import platform import logging import argparse import os import sys from abstract_tts import AbstractTTSEngine path = os.path.dirname(os.path.abspath(__file__)) for py in [f[:-3] for f in os.listdir(path) if f.endswith('.py') and f != '__init__.py']: mod = __import__(__name__ + '.' + py, fromlist=[py]) classes = [getattr(mod, x) for x in dir(mod) if isinstance(getattr(mod, x), type)] for cls in classes: setattr(sys.modules[__name__], cls.__name__, cls) def get_default_engine_slug(): return 'osx-tts' if platform.system().lower() == 'darwin' else 'espeak-tts' def get_engine_by_slug(slug=None): """ Returns: A speaker implementation available on the current platform Raises: ValueError if no speaker implementation is supported on this platform """ if not slug or type(slug) is not str: raise TypeError("Invalid slug '%s'", slug) selected_engines = filter(lambda engine: hasattr(engine, "SLUG") and engine.SLUG == slug, get_engines()) if len(selected_engines) == 0: raise ValueError("No TTS engine found for slug '%s'" % slug) else: if len(selected_engines) > 1: print("WARNING: Multiple TTS engines found for slug '%s'. " + "This is most certainly a bug." % slug) engine = selected_engines[0] if not engine.is_available(): raise ValueError(("TTS engine '%s' is not available (due to " + "missing dependencies, etc.)") % slug) return engine def get_engines(): def get_subclasses(cls): subclasses = set() for subclass in cls.__subclasses__(): subclasses.add(subclass) subclasses.update(get_subclasses(subclass)) return subclasses return [tts_engine for tts_engine in list(get_subclasses(AbstractTTSEngine)) if hasattr(tts_engine, 'SLUG') and tts_engine.SLUG] if __name__ == '__main__': parser = argparse.ArgumentParser(description='Jasper TTS module') parser.add_argument('--debug', action='store_true', help='Show debug messages') args = parser.parse_args() logging.basicConfig() if args.debug: logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) engines = get_engines() available_engines = [] for engine in get_engines(): if engine.is_available(): available_engines.append(engine) disabled_engines = list(set(engines).difference(set(available_engines))) print("Available TTS engines:") for i, engine in enumerate(available_engines, start=1): print("%d. %s" % (i, engine.SLUG)) print("") print("Disabled TTS engines:") for i, engine in enumerate(disabled_engines, start=1): print("%d. %s" % (i, engine.SLUG)) print("") for i, engine in enumerate(available_engines, start=1): print("%d. Testing engine '%s'..." % (i, engine.SLUG)) engine.get_instance().say("This is a test.") print("Done.")
python
# coding=utf-8 import tensorflow as tf import wml_tfutils as wmlt import wnn from basic_tftools import channel import functools import tfop import object_detection2.bboxes as odbox from object_detection2.standard_names import * import wmodule from .onestage_tools import * from object_detection2.datadef import * from object_detection2.config.config import global_cfg from object_detection2.modeling.build import HEAD_OUTPUTS import object_detection2.wlayers as odl import numpy as np from object_detection2.data.dataloader import DataLoader import wsummary from functools import partial import wnn @HEAD_OUTPUTS.register() class CenterNet2Outputs(wmodule.WChildModule): def __init__( self, cfg, parent, box2box_transform, head_outputs, gt_boxes=None, gt_labels=None, gt_length=None, max_detections_per_image=100, **kwargs, ): """ Args: cfg: Only the child part box2box_transform (Box2BoxTransform): :class:`Box2BoxTransform` instance for anchor-proposal transformations. gt_boxes: [B,N,4] (ymin,xmin,ymax,xmax) gt_labels: [B,N] gt_length: [B] """ super().__init__(cfg, parent=parent, **kwargs) self.score_threshold = cfg.SCORE_THRESH_TEST self.nms_threshold = cfg.NMS_THRESH_TEST self.max_detections_per_image = max_detections_per_image self.box2box_transform = box2box_transform self.head_outputs = head_outputs self.k = self.cfg.K self.size_threshold = self.cfg.SIZE_THRESHOLD self.dis_threshold = self.cfg.DIS_THRESHOLD self.gt_boxes = gt_boxes self.gt_labels = gt_labels self.gt_length = gt_length self.mid_results = {} def _get_ground_truth(self): """ Returns: """ res = [] for i,outputs in enumerate(self.head_outputs): shape = wmlt.combined_static_and_dynamic_shape(outputs['heatmaps_ct'])[1:3] t_res = self.box2box_transform.get_deltas(self.gt_boxes, self.gt_labels, self.gt_length, output_size=shape) res.append(t_res) return res @wmlt.add_name_scope def losses(self): """ Args: Returns: """ all_encoded_datas = self._get_ground_truth() all_loss0 = [] all_loss1 = [] all_loss2 = [] for i,(encoded_datas,head_outputs) in enumerate(zip(all_encoded_datas,self.head_outputs)): loss0 = wnn.focal_loss_for_heat_map(labels=encoded_datas["g_heatmaps_ct"], logits=head_outputs["heatmaps_ct"],scope="ct_loss", alpha=self.cfg.LOSS_ALPHA, beta=self.cfg.LOSS_BETA, pos_threshold=self.cfg.LOSS_POS_THRESHOLD) tmp_w = tf.reduce_sum(encoded_datas['g_offset_mask'])+1e-3 offset_loss = tf.reduce_sum(tf.abs((encoded_datas['g_offset']-head_outputs['offset'])*encoded_datas['g_offset_mask']))/tmp_w tmp_w = tf.reduce_sum(encoded_datas['g_hw_mask'])+1e-3 hw_loss = tf.reduce_sum(tf.abs((encoded_datas['g_hw']-head_outputs['hw'])*encoded_datas['g_hw_mask']))/tmp_w all_loss0.append(loss0) all_loss1.append(offset_loss) all_loss2.append(hw_loss) loss0 = tf.add_n(all_loss0) loss1 = tf.add_n(all_loss1)*self.cfg.LOSS_LAMBDA_OFFSET loss2 = tf.add_n(all_loss2)*self.cfg.LOSS_LAMBDA_SIZE return {"heatmaps_ct_loss": loss0, "offset_loss": loss1, "hw_loss":loss2} @wmlt.add_name_scope def inference(self,inputs,head_outputs): """ Arguments: inputs: same as CenterNet.forward's batched_inputs Returns: results: RD_BOXES: [B,N,4] RD_LABELS: [B,N] RD_PROBABILITY:[ B,N] RD_LENGTH:[B] """ self.inputs = inputs all_bboxes = [] all_scores = [] all_clses = [] all_length = [] img_size = tf.shape(inputs[IMAGE])[1:3] assert len(head_outputs)==1,f"Error head outputs len {len(head_outputs)}" nms = partial(odl.boxes_nms,threshold=self.nms_threshold) bboxes,clses, scores,length = self.get_box_in_a_single_layer(head_outputs[0],self.cfg.SCORE_THRESH_TEST) bboxes, labels, nms_indexs, lens = odl.batch_nms_wrapper(bboxes, clses, length, confidence=None, nms=nms, k=self.max_detections_per_image, sort=True) scores = wmlt.batch_gather(scores,nms_indexs) outdata = {RD_BOXES:bboxes,RD_LABELS:labels,RD_PROBABILITY:scores,RD_LENGTH:lens} if global_cfg.GLOBAL.SUMMARY_LEVEL<=SummaryLevel.DEBUG: wsummary.detection_image_summary(images=inputs[IMAGE], boxes=outdata[RD_BOXES], classes=outdata[RD_LABELS], lengths=outdata[RD_LENGTH], scores=outdata[RD_PROBABILITY], name="CenterNetOutput", category_index=DataLoader.category_index) return outdata @wmlt.add_name_scope def get_box_in_a_single_layer(self,datas,threshold): bboxes,clses,scores,_ = self.box2box_transform.apply_deltas(datas) mask = tf.cast(tf.greater_equal(scores,threshold),tf.int32) length = tf.reduce_sum(mask,axis=-1) return bboxes,clses,scores,length
python
# # PySNMP MIB module BRIDGE-MIB (http://snmplabs.com/pysmi) # ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/BRIDGE-MIB # Produced by pysmi-0.3.4 at Mon Apr 29 16:50:13 2019 # On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 # Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) # OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier") NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues") ConstraintsIntersection, ConstraintsUnion, ValueRangeConstraint, ValueSizeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ConstraintsUnion", "ValueRangeConstraint", "ValueSizeConstraint", "SingleValueConstraint") ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup") mib_2, Counter32, TimeTicks, ObjectIdentity, ModuleIdentity, MibIdentifier, iso, Counter64, Bits, Integer32, NotificationType, IpAddress, MibScalar, MibTable, MibTableRow, MibTableColumn, Unsigned32, NotificationType, Gauge32 = mibBuilder.importSymbols("SNMPv2-SMI", "mib-2", "Counter32", "TimeTicks", "ObjectIdentity", "ModuleIdentity", "MibIdentifier", "iso", "Counter64", "Bits", "Integer32", "NotificationType", "IpAddress", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Unsigned32", "NotificationType", "Gauge32") TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString") class MacAddress(OctetString): subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(6, 6) fixedLength = 6 class BridgeId(OctetString): subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(8, 8) fixedLength = 8 class Timeout(Integer32): pass dot1dBridge = MibIdentifier((1, 3, 6, 1, 2, 1, 17)) dot1dBase = MibIdentifier((1, 3, 6, 1, 2, 1, 17, 1)) dot1dStp = MibIdentifier((1, 3, 6, 1, 2, 1, 17, 2)) dot1dSr = MibIdentifier((1, 3, 6, 1, 2, 1, 17, 3)) dot1dTp = MibIdentifier((1, 3, 6, 1, 2, 1, 17, 4)) dot1dStatic = MibIdentifier((1, 3, 6, 1, 2, 1, 17, 5)) dot1dBaseBridgeAddress = MibScalar((1, 3, 6, 1, 2, 1, 17, 1, 1), MacAddress()).setMaxAccess("readonly") if mibBuilder.loadTexts: dot1dBaseBridgeAddress.setStatus('mandatory') dot1dBaseNumPorts = MibScalar((1, 3, 6, 1, 2, 1, 17, 1, 2), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: dot1dBaseNumPorts.setStatus('mandatory') dot1dBaseType = MibScalar((1, 3, 6, 1, 2, 1, 17, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("unknown", 1), ("transparent-only", 2), ("sourceroute-only", 3), ("srt", 4)))).setMaxAccess("readonly") if mibBuilder.loadTexts: dot1dBaseType.setStatus('mandatory') dot1dBasePortTable = MibTable((1, 3, 6, 1, 2, 1, 17, 1, 4), ) if mibBuilder.loadTexts: dot1dBasePortTable.setStatus('mandatory') dot1dBasePortEntry = MibTableRow((1, 3, 6, 1, 2, 1, 17, 1, 4, 1), ).setIndexNames((0, "BRIDGE-MIB", "dot1dBasePort")) if mibBuilder.loadTexts: dot1dBasePortEntry.setStatus('mandatory') dot1dBasePort = MibTableColumn((1, 3, 6, 1, 2, 1, 17, 1, 4, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readonly") if mibBuilder.loadTexts: dot1dBasePort.setStatus('mandatory') dot1dBasePortIfIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 17, 1, 4, 1, 2), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: dot1dBasePortIfIndex.setStatus('mandatory') dot1dBasePortCircuit = MibTableColumn((1, 3, 6, 1, 2, 1, 17, 1, 4, 1, 3), ObjectIdentifier()).setMaxAccess("readonly") if mibBuilder.loadTexts: dot1dBasePortCircuit.setStatus('mandatory') dot1dBasePortDelayExceededDiscards = MibTableColumn((1, 3, 6, 1, 2, 1, 17, 1, 4, 1, 4), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: dot1dBasePortDelayExceededDiscards.setStatus('mandatory') dot1dBasePortMtuExceededDiscards = MibTableColumn((1, 3, 6, 1, 2, 1, 17, 1, 4, 1, 5), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: dot1dBasePortMtuExceededDiscards.setStatus('mandatory') dot1dStpProtocolSpecification = MibScalar((1, 3, 6, 1, 2, 1, 17, 2, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("unknown", 1), ("decLb100", 2), ("ieee8021d", 3)))).setMaxAccess("readonly") if mibBuilder.loadTexts: dot1dStpProtocolSpecification.setStatus('mandatory') dot1dStpPriority = MibScalar((1, 3, 6, 1, 2, 1, 17, 2, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readwrite") if mibBuilder.loadTexts: dot1dStpPriority.setStatus('mandatory') dot1dStpTimeSinceTopologyChange = MibScalar((1, 3, 6, 1, 2, 1, 17, 2, 3), TimeTicks()).setMaxAccess("readonly") if mibBuilder.loadTexts: dot1dStpTimeSinceTopologyChange.setStatus('mandatory') dot1dStpTopChanges = MibScalar((1, 3, 6, 1, 2, 1, 17, 2, 4), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: dot1dStpTopChanges.setStatus('mandatory') dot1dStpDesignatedRoot = MibScalar((1, 3, 6, 1, 2, 1, 17, 2, 5), BridgeId()).setMaxAccess("readonly") if mibBuilder.loadTexts: dot1dStpDesignatedRoot.setStatus('mandatory') dot1dStpRootCost = MibScalar((1, 3, 6, 1, 2, 1, 17, 2, 6), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: dot1dStpRootCost.setStatus('mandatory') dot1dStpRootPort = MibScalar((1, 3, 6, 1, 2, 1, 17, 2, 7), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: dot1dStpRootPort.setStatus('mandatory') dot1dStpMaxAge = MibScalar((1, 3, 6, 1, 2, 1, 17, 2, 8), Timeout()).setMaxAccess("readonly") if mibBuilder.loadTexts: dot1dStpMaxAge.setStatus('mandatory') dot1dStpHelloTime = MibScalar((1, 3, 6, 1, 2, 1, 17, 2, 9), Timeout()).setMaxAccess("readonly") if mibBuilder.loadTexts: dot1dStpHelloTime.setStatus('mandatory') dot1dStpHoldTime = MibScalar((1, 3, 6, 1, 2, 1, 17, 2, 10), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: dot1dStpHoldTime.setStatus('mandatory') dot1dStpForwardDelay = MibScalar((1, 3, 6, 1, 2, 1, 17, 2, 11), Timeout()).setMaxAccess("readonly") if mibBuilder.loadTexts: dot1dStpForwardDelay.setStatus('mandatory') dot1dStpBridgeMaxAge = MibScalar((1, 3, 6, 1, 2, 1, 17, 2, 12), Timeout().subtype(subtypeSpec=ValueRangeConstraint(600, 4000))).setMaxAccess("readwrite") if mibBuilder.loadTexts: dot1dStpBridgeMaxAge.setStatus('mandatory') dot1dStpBridgeHelloTime = MibScalar((1, 3, 6, 1, 2, 1, 17, 2, 13), Timeout().subtype(subtypeSpec=ValueRangeConstraint(100, 1000))).setMaxAccess("readwrite") if mibBuilder.loadTexts: dot1dStpBridgeHelloTime.setStatus('mandatory') dot1dStpBridgeForwardDelay = MibScalar((1, 3, 6, 1, 2, 1, 17, 2, 14), Timeout().subtype(subtypeSpec=ValueRangeConstraint(400, 3000))).setMaxAccess("readwrite") if mibBuilder.loadTexts: dot1dStpBridgeForwardDelay.setStatus('mandatory') dot1dStpPortTable = MibTable((1, 3, 6, 1, 2, 1, 17, 2, 15), ) if mibBuilder.loadTexts: dot1dStpPortTable.setStatus('mandatory') dot1dStpPortEntry = MibTableRow((1, 3, 6, 1, 2, 1, 17, 2, 15, 1), ).setIndexNames((0, "BRIDGE-MIB", "dot1dStpPort")) if mibBuilder.loadTexts: dot1dStpPortEntry.setStatus('mandatory') dot1dStpPort = MibTableColumn((1, 3, 6, 1, 2, 1, 17, 2, 15, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readonly") if mibBuilder.loadTexts: dot1dStpPort.setStatus('mandatory') dot1dStpPortPriority = MibTableColumn((1, 3, 6, 1, 2, 1, 17, 2, 15, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readwrite") if mibBuilder.loadTexts: dot1dStpPortPriority.setStatus('mandatory') dot1dStpPortState = MibTableColumn((1, 3, 6, 1, 2, 1, 17, 2, 15, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("disabled", 1), ("blocking", 2), ("listening", 3), ("learning", 4), ("forwarding", 5), ("broken", 6)))).setMaxAccess("readonly") if mibBuilder.loadTexts: dot1dStpPortState.setStatus('mandatory') dot1dStpPortEnable = MibTableColumn((1, 3, 6, 1, 2, 1, 17, 2, 15, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: dot1dStpPortEnable.setStatus('mandatory') dot1dStpPortPathCost = MibTableColumn((1, 3, 6, 1, 2, 1, 17, 2, 15, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readwrite") if mibBuilder.loadTexts: dot1dStpPortPathCost.setStatus('mandatory') dot1dStpPortDesignatedRoot = MibTableColumn((1, 3, 6, 1, 2, 1, 17, 2, 15, 1, 6), BridgeId()).setMaxAccess("readonly") if mibBuilder.loadTexts: dot1dStpPortDesignatedRoot.setStatus('mandatory') dot1dStpPortDesignatedCost = MibTableColumn((1, 3, 6, 1, 2, 1, 17, 2, 15, 1, 7), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: dot1dStpPortDesignatedCost.setStatus('mandatory') dot1dStpPortDesignatedBridge = MibTableColumn((1, 3, 6, 1, 2, 1, 17, 2, 15, 1, 8), BridgeId()).setMaxAccess("readonly") if mibBuilder.loadTexts: dot1dStpPortDesignatedBridge.setStatus('mandatory') dot1dStpPortDesignatedPort = MibTableColumn((1, 3, 6, 1, 2, 1, 17, 2, 15, 1, 9), OctetString().subtype(subtypeSpec=ValueSizeConstraint(2, 2)).setFixedLength(2)).setMaxAccess("readonly") if mibBuilder.loadTexts: dot1dStpPortDesignatedPort.setStatus('mandatory') dot1dStpPortForwardTransitions = MibTableColumn((1, 3, 6, 1, 2, 1, 17, 2, 15, 1, 10), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: dot1dStpPortForwardTransitions.setStatus('mandatory') dot1dTpLearnedEntryDiscards = MibScalar((1, 3, 6, 1, 2, 1, 17, 4, 1), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: dot1dTpLearnedEntryDiscards.setStatus('mandatory') dot1dTpAgingTime = MibScalar((1, 3, 6, 1, 2, 1, 17, 4, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(10, 1000000))).setMaxAccess("readwrite") if mibBuilder.loadTexts: dot1dTpAgingTime.setStatus('mandatory') dot1dTpFdbTable = MibTable((1, 3, 6, 1, 2, 1, 17, 4, 3), ) if mibBuilder.loadTexts: dot1dTpFdbTable.setStatus('mandatory') dot1dTpFdbEntry = MibTableRow((1, 3, 6, 1, 2, 1, 17, 4, 3, 1), ).setIndexNames((0, "BRIDGE-MIB", "dot1dTpFdbAddress")) if mibBuilder.loadTexts: dot1dTpFdbEntry.setStatus('mandatory') dot1dTpFdbAddress = MibTableColumn((1, 3, 6, 1, 2, 1, 17, 4, 3, 1, 1), MacAddress()).setMaxAccess("readonly") if mibBuilder.loadTexts: dot1dTpFdbAddress.setStatus('mandatory') dot1dTpFdbPort = MibTableColumn((1, 3, 6, 1, 2, 1, 17, 4, 3, 1, 2), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: dot1dTpFdbPort.setStatus('mandatory') dot1dTpFdbStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 17, 4, 3, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("other", 1), ("invalid", 2), ("learned", 3), ("self", 4), ("mgmt", 5)))).setMaxAccess("readonly") if mibBuilder.loadTexts: dot1dTpFdbStatus.setStatus('mandatory') dot1dTpPortTable = MibTable((1, 3, 6, 1, 2, 1, 17, 4, 4), ) if mibBuilder.loadTexts: dot1dTpPortTable.setStatus('mandatory') dot1dTpPortEntry = MibTableRow((1, 3, 6, 1, 2, 1, 17, 4, 4, 1), ).setIndexNames((0, "BRIDGE-MIB", "dot1dTpPort")) if mibBuilder.loadTexts: dot1dTpPortEntry.setStatus('mandatory') dot1dTpPort = MibTableColumn((1, 3, 6, 1, 2, 1, 17, 4, 4, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readonly") if mibBuilder.loadTexts: dot1dTpPort.setStatus('mandatory') dot1dTpPortMaxInfo = MibTableColumn((1, 3, 6, 1, 2, 1, 17, 4, 4, 1, 2), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: dot1dTpPortMaxInfo.setStatus('mandatory') dot1dTpPortInFrames = MibTableColumn((1, 3, 6, 1, 2, 1, 17, 4, 4, 1, 3), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: dot1dTpPortInFrames.setStatus('mandatory') dot1dTpPortOutFrames = MibTableColumn((1, 3, 6, 1, 2, 1, 17, 4, 4, 1, 4), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: dot1dTpPortOutFrames.setStatus('mandatory') dot1dTpPortInDiscards = MibTableColumn((1, 3, 6, 1, 2, 1, 17, 4, 4, 1, 5), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: dot1dTpPortInDiscards.setStatus('mandatory') dot1dStaticTable = MibTable((1, 3, 6, 1, 2, 1, 17, 5, 1), ) if mibBuilder.loadTexts: dot1dStaticTable.setStatus('mandatory') dot1dStaticEntry = MibTableRow((1, 3, 6, 1, 2, 1, 17, 5, 1, 1), ).setIndexNames((0, "BRIDGE-MIB", "dot1dStaticAddress"), (0, "BRIDGE-MIB", "dot1dStaticReceivePort")) if mibBuilder.loadTexts: dot1dStaticEntry.setStatus('mandatory') dot1dStaticAddress = MibTableColumn((1, 3, 6, 1, 2, 1, 17, 5, 1, 1, 1), MacAddress()).setMaxAccess("readwrite") if mibBuilder.loadTexts: dot1dStaticAddress.setStatus('mandatory') dot1dStaticReceivePort = MibTableColumn((1, 3, 6, 1, 2, 1, 17, 5, 1, 1, 2), Integer32()).setMaxAccess("readwrite") if mibBuilder.loadTexts: dot1dStaticReceivePort.setStatus('mandatory') dot1dStaticAllowedToGoTo = MibTableColumn((1, 3, 6, 1, 2, 1, 17, 5, 1, 1, 3), OctetString()).setMaxAccess("readwrite") if mibBuilder.loadTexts: dot1dStaticAllowedToGoTo.setStatus('mandatory') dot1dStaticStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 17, 5, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("other", 1), ("invalid", 2), ("permanent", 3), ("deleteOnReset", 4), ("deleteOnTimeout", 5)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: dot1dStaticStatus.setStatus('mandatory') newRoot = NotificationType((1, 3, 6, 1, 2, 1, 17) + (0,1)) topologyChange = NotificationType((1, 3, 6, 1, 2, 1, 17) + (0,2)) mibBuilder.exportSymbols("BRIDGE-MIB", dot1dTpPort=dot1dTpPort, dot1dBaseNumPorts=dot1dBaseNumPorts, dot1dStp=dot1dStp, dot1dStpPortDesignatedCost=dot1dStpPortDesignatedCost, dot1dStaticAllowedToGoTo=dot1dStaticAllowedToGoTo, dot1dTpFdbStatus=dot1dTpFdbStatus, dot1dStpPortDesignatedPort=dot1dStpPortDesignatedPort, dot1dStpTopChanges=dot1dStpTopChanges, dot1dStpDesignatedRoot=dot1dStpDesignatedRoot, dot1dSr=dot1dSr, dot1dBasePortDelayExceededDiscards=dot1dBasePortDelayExceededDiscards, dot1dBase=dot1dBase, dot1dBasePortCircuit=dot1dBasePortCircuit, dot1dStpPortEntry=dot1dStpPortEntry, dot1dTpAgingTime=dot1dTpAgingTime, dot1dStpBridgeMaxAge=dot1dStpBridgeMaxAge, dot1dBasePortEntry=dot1dBasePortEntry, dot1dBasePort=dot1dBasePort, dot1dStpPortPriority=dot1dStpPortPriority, dot1dStaticTable=dot1dStaticTable, dot1dStpHelloTime=dot1dStpHelloTime, dot1dStpPort=dot1dStpPort, dot1dStpPortForwardTransitions=dot1dStpPortForwardTransitions, MacAddress=MacAddress, dot1dTp=dot1dTp, dot1dBridge=dot1dBridge, dot1dTpLearnedEntryDiscards=dot1dTpLearnedEntryDiscards, dot1dStpPortEnable=dot1dStpPortEnable, newRoot=newRoot, dot1dStpPriority=dot1dStpPriority, Timeout=Timeout, dot1dStpMaxAge=dot1dStpMaxAge, dot1dStpPortState=dot1dStpPortState, dot1dStpPortPathCost=dot1dStpPortPathCost, dot1dStaticReceivePort=dot1dStaticReceivePort, dot1dBaseType=dot1dBaseType, dot1dTpFdbTable=dot1dTpFdbTable, dot1dTpPortMaxInfo=dot1dTpPortMaxInfo, dot1dStpProtocolSpecification=dot1dStpProtocolSpecification, dot1dTpPortInFrames=dot1dTpPortInFrames, dot1dTpFdbPort=dot1dTpFdbPort, dot1dTpPortTable=dot1dTpPortTable, dot1dStatic=dot1dStatic, dot1dStpRootPort=dot1dStpRootPort, dot1dBaseBridgeAddress=dot1dBaseBridgeAddress, dot1dBasePortTable=dot1dBasePortTable, dot1dStaticAddress=dot1dStaticAddress, dot1dStaticStatus=dot1dStaticStatus, dot1dStpHoldTime=dot1dStpHoldTime, dot1dBasePortIfIndex=dot1dBasePortIfIndex, dot1dTpPortOutFrames=dot1dTpPortOutFrames, dot1dTpFdbEntry=dot1dTpFdbEntry, dot1dStaticEntry=dot1dStaticEntry, dot1dStpBridgeHelloTime=dot1dStpBridgeHelloTime, dot1dStpTimeSinceTopologyChange=dot1dStpTimeSinceTopologyChange, BridgeId=BridgeId, dot1dStpRootCost=dot1dStpRootCost, dot1dStpPortDesignatedRoot=dot1dStpPortDesignatedRoot, dot1dStpPortDesignatedBridge=dot1dStpPortDesignatedBridge, dot1dTpPortEntry=dot1dTpPortEntry, topologyChange=topologyChange, dot1dStpForwardDelay=dot1dStpForwardDelay, dot1dStpBridgeForwardDelay=dot1dStpBridgeForwardDelay, dot1dStpPortTable=dot1dStpPortTable, dot1dBasePortMtuExceededDiscards=dot1dBasePortMtuExceededDiscards, dot1dTpFdbAddress=dot1dTpFdbAddress, dot1dTpPortInDiscards=dot1dTpPortInDiscards)
python
from sqlalchemy import Column, Integer, String, DateTime, Float from sqlalchemy.ext.declarative import declared_attr from sqlalchemy.sql.expression import func from pow_comments.dblib import engine,session from pow_comments.powlib import pluralize import datetime from sqlalchemy import orm import sqlalchemy.inspection from cerberus import Validator import xmltodict import json import datetime, decimal from pow_comments.config import myapp # class MyValidator(Validator): # def _validate_type_default(self, value): # """ Enables validation for `objectid` schema attribute. # :param value: field value. # """ # print(" validating: default value: " + str(value)) # return True #print ('importing module %s' % __name__) class BaseModel(): __table_args__ = { "extend_existing": True } id = Column(Integer, primary_key=True) # create_date column will be populated with the result of the now() SQL function #(which, depending on backend, compiles into NOW() or CURRENT_TIMESTAMP in most cases # see: http://docs.sqlalchemy.org/en/latest/core/defaults.html created_at = Column(DateTime, default=func.now()) last_updated = Column(DateTime, onupdate=datetime.datetime.now, default=func.now()) session = session @orm.reconstructor def init_on_load(self, *args, **kwargs): # # setup a mashmallow schema to be able to dump (serialize) and load (deserialize) # models to json quick, safe and easy. # see: http://marshmallow-sqlalchemy.readthedocs.io/en/latest/ # and link it to the model. (as jsonify attribute) # this enables the model to load / dump json # #print(kwargs) self.class_name = self.__class__.__name__.capitalize() from marshmallow_sqlalchemy import ModelSchema cls_meta=type("Meta", (object,),{"model" : self.__class__}) jschema_class = type(self.class_name+'Schema', (ModelSchema,), {"Meta": cls_meta} ) setattr(self, "_jsonify", jschema_class()) self.session=session self.table = self.metadata.tables[pluralize(self.__class__.__name__.lower())] # # if there is a schema (cerberus) set it in the instance # print(str(self.__class__.__dict__.keys())) if "schema" in self.__class__.__dict__: print(" .. found a schema for: " +str(self.__class__.__name__) + " in class dict") self.schema = self.__class__.__dict__["schema"] # add the sqlcolumns schema definitions to the cerberus schema (if there are any) if myapp["auto_schema"]: self._setup_schema_from_sql() # # setup values from kwargs or from init_from_<format> if format="someformat" # if "format" in kwargs: # set the format and call the according init_from_<format> method # which initializes the instance with the given vaules (from data) # e.g. Model(format=json, data={data}) f = getattr(self, "init_from_" + kwargs["format"], None) if f: f(kwargs) else: # initializes the instanmce with the given kwargs values: # e.g.: Model(test="sometext", title="sometitle") for key in kwargs.keys(): if key in self.__class__.__dict__: setattr(self, key, kwargs[key]) @declared_attr def __tablename__(cls): """ returns the tablename for this model """ return pluralize(cls.__name__.lower()) def api(self): """ just for conveniance """ return self.show_api() def show_api(self): """ prints the "external API of the class. No under or dunder methods And methods only. Uses inspect module. """ import inspect print(50*"-") print(" external API for " + self.__class__.__name__) print(50*"-") for elem in inspect.getmembers(self, predicate=inspect.ismethod): meth = elem[0] if not meth.startswith("_"): print(" .. " + str(elem[0]) , end="") func=getattr(self,elem[0]) if func: print( str(func.__doc__)[0:100]) else: print() def _setup_schema_from_sql(self): """ Constructs a cerberus definition schema from a given sqlalchemy column definition for this model. """ print(" .. setup schema from sql for : " + str(self.class_name)) for idx,col in enumerate(self.table.columns.items()): # looks like this: # ('id', # Column('id', Integer(), table=<comments>, primary_key=True, # nullable=False)) col_type = col[1].type.python_type col_name = str(col[0]).lower() exclude_list = [elem for elem in self.schema.keys()] exclude_list.append( ["id", "created_at", "last_updated"] ) #print(" #" + str(idx) + "->" + str(col_name) + " -> " + str(col_type)) # dont check internal columns or relation columns. if ( col_name not in exclude_list ) and ( col[1].foreign_keys != set() ): print(" .. adding to schema: " + col_name) if col_type == int: # sqlalchemy: Integer, BigInteger # cerberus: integer pass elif col_type == str: # sqlalchemy: String, Text # cerberus: string # python: str pass elif col_type == bool: # sqlalchemy: Boolean # cerberus: boolean # python: bool pass elif col_type == datetime.date: # sqlalchemy: Date # cerberus: date # python: datetime.date pass elif col_type == datetime.datetime: # sqlalchemy: DateTime # cerberus: datetime # python: datetime.datetime pass elif col_type == float: # sqlalchemy: Float # cerberus: float # python: float pass elif col_type == decimal.Decimal: # sqlalchemy: Numeric # cerberus: number # python: decimal.Decimal pass elif col_type == bytes: # sqlalchemy: LargeBinary # cerberus: binary # python: bytes pass else: print(" .. skipping: " + col_name ) def validate(self): """ checks if the instance has a schema. validatees the current values """ if getattr(self,"schema", False): # if instance has a schema. (also see init_on_load) #v = cerberus.Validator(self.schema) v= MyValidator(self.schema) if v.validate(self.dict_dump()): return True else: return v def init_from_xml(self, data, root="root"): """ makes a py dict from input xml and sets the instance attributes root defines the xml root node """ d=xmltodict.parse(data) d=d[root] for key in d: print("key: " + key + " : " + str(d[key]) ) if isinstance(d[key],dict): print(d[key]) for elem in d[key]: if elem.startswith("#"): if key in self.__class__.__dict__: setattr(self, key, d[key][elem]) else: if key in self.__class__.__dict__: setattr(self, key, d[key]) def init_from_json(self, data): """ makes a py dict from input json and sets the instance attributes """ d=json.loads(data) for key in d: if key in self.__class__.__dict__: setattr(self, key, d[key]) def init_from_csv(self, data): """ makes a py dict from input ^csv and sets the instance attributes csv has the drawback coompared to json (or xml) that the data structure is flat. first row must be the "column names" """ def json_dump(self): return self._jsonify.dump(self).data def json_load_from_db(self, data, keep_id=False): if keep_id: self = self._jsonify.load(data, session=session).data return self else: obj = self.__class__() obj = obj._jsonify.load(data, session=session).data obj.id = None return obj def print_schema(self): print(50*"-") print("Schema for: " + str(self.__class__)) print("{0:30s} {1:20s}".format("Column", "Type")) print(50*"-") for col in self.__table__._columns: print("{0:30s} {1:20s}".format(str(col), str(col.type))) #print(dir(col)) def dict_dump(self): d = {} exclude_list=["_jsonify","_sa_instance_state", "session", "schema", "table", "tree_parent_id", "tree_children"] if getattr(self, "exclude_list", False): exclude_list += self.exclude_list for elem in vars(self).keys(): if not elem in exclude_list: d[elem] = vars(self)[elem] return d def get_relationships(self): """ returns the raw relationships see: http://stackoverflow.com/questions/21206818/sqlalchemy-flask-get-relationships-from-a-db-model """ return sqlalchemy.inspection.inspect(self.__class__).relationships def get_relations(self): """ returns a list of the relation names see: http://stackoverflow.com/questions/21206818/sqlalchemy-flask-get-relationships-from-a-db-model """ rels = sqlalchemy.inspection.inspect(self.__class__).relationships return rels.keys() def print_full(self): # # prints everything including related objects in FULL # lenghty but you see everything. # from pprint import pformat d = {} for k in self.__dict__.keys(): if not k.startswith("_"): d[k] = self.__dict__.get(k) # add the related objects: for elem in self.get_relations(): #print(elem) d[elem] = str(getattr(self, elem)) return pformat(d,indent=4) def __repr__(self): # # __repr__ method is what happens when you look at it with the interactive prompt # or (unlikely: use the builtin repr() function) # usage: at interactive python prompt # p=Post() # p from pprint import pformat d = self.json_dump() return pformat(d,indent=+4) def __str__(self): # # The __str__ method is what happens when you print the object # usage: # p=Post() # print(p) return self.__repr__() def create_table(self): """ created the physical table in the DB """ self.__table__.create(bind=engine) def drop_table(self): """ created the physical table in the DB """ self.__table__.drop(bind=engine) def upsert(self, session=None): if not session: session = self.session session.add(self) session.commit() def get(self, id): return self.query(self.__class__).get(id) def from_statement(self, statement): return self.query(self.__class__).from_statement(statement) def page(self, *criterion, limit=None, offset=None): res = session.query(self.__class__).filter(*criterion).limit(limit).offset(offset).all() return res def find(self,*criterion): return session.query(self.__class__).filter(*criterion) def find_all(self, *criterion, raw=False, as_json=False, limit=None, offset=None): if raw: return session.query(self.__class__).filter(*criterion).limit(limit).offset(offset) res = session.query(self.__class__).filter(*criterion).limit(limit).offset(offset).all() if as_json: return[x.json_dump() for x in res] return res def find_one(self, *criterion, as_json=False): res = session.query(self.__class__).filter(*criterion).one() if as_json: return[x.json_dump() for x in res] return res def find_first(self, *criterion, as_json=False): res = session.query(self.__class__).filter(*criterion).first() if as_json: return[x.json_dump() for x in res] return res def q(self): return session.query(self.__class__) def find_dynamic(self, filter_condition = [('name', 'eq', 'klaas')]): dynamic_filtered_query_class = DynamicFilter(query=None, model_class=self, filter_condition=filter_condition) dynamic_filtered_query = dynamic_filtered_query_class.return_query() return dynamic_filtered_query class DynamicFilter(): def __init__(self, query=None, model_class=None, filter_condition=None): #super().__init__(*args, **kwargs) self.query = query self.model_class = model_class.__class__ self.filter_condition = filter_condition self.session = get_session() def get_query(self): ''' Returns query with all the objects :return: ''' if not self.query: self.query = self.session.query(self.model_class) return self.query def filter_query(self, query, filter_condition): ''' Return filtered queryset based on condition. :param query: takes query :param filter_condition: Its a list, ie: [(key,operator,value)] operator list: eq for == lt for < ge for >= in for in_ like for like value could be list or a string :return: queryset ''' if query is None: query = self.get_query() #model_class = self.get_model_class() # returns the query's Model model_class = self.model_class for raw in filter_condition: try: key, op, value = raw except ValueError: raise Exception('Invalid filter: %s' % raw) column = getattr(model_class, key, None) if not column: raise Exception('Invalid filter column: %s' % key) if op == 'in': if isinstance(value, list): filt = column.in_(value) else: filt = column.in_(value.split(',')) else: try: attr = list(filter( lambda e: hasattr(column, e % op), ['%s', '%s_', '__%s__'] ))[0] % op except IndexError: raise Exception('Invalid filter operator: %s' % op) if value == 'null': value = None filt = getattr(column, attr)(value) query = query.filter(filt) return query def return_query(self): return self.filter_query(self.get_query(), self.filter_condition)
python
# Imports from 3rd party libraries import dash import dash_bootstrap_components as dbc import dash_core_components as dcc import dash_html_components as html from dash.dependencies import Input, Output # Imports from this application from app import app # 1 column layout # https://dash-bootstrap-components.opensource.faculty.ai/l/components/layout row1 = dbc.Row( [ dcc.Markdown( """ According to some studies there are about 70 million stray animals in the United States alone. Solving this issue can improve not only the lives of the stray animals but goes a long way in making the communities they frequent better as well. One often overlooked problem that homeless animals cause is the strain on ecosystems as a whole. When a non-native species overtakes an area, it can negatively impact delicate ecosystems through pollution and the spreading of disease. Stray animals usually aren’t vaccinated, leading to the transmission of rabies and other life-threatening diseases to other animal populations. Internal parasites can also be passed along through the feces of an infected animal, threatening native wildlife when the fecal matter contaminates food and water sources. """ ), html.Img(src='../assets/aac.jpg', width="275", height="200", ), dcc.Markdown( """ Shelters play a huge role in meeting the problems head on by providing necessary food, water, shelter and standard veterinary care for animals in need. They work year round to find pets loving and sustainable homes. Austin, Texas is the largest No Kill community in the nation, and home to the Austin Animal Center. They provide shelter to more than 16,000 animals each year and animal protection and pet resource services to all of Austin and Travis County. As part of the City of Austin Open Data Initiative, the Austin Animal Center makes available its collected dataset that contains statistics and outcomes of animals entering the Austin Animal Services system. My data is sourced from their most recent release at the time of writing (March 7, 2020). While my model is strictly for educational purposes I hope it may offer some value in helping better understand what type of animals find homes, since as we gain understanding we can concentrate efforts more effectively on the animals that are the highest risk for negative outcomes such as being harder to find homes alongside Euthanasias and death. """ ), ], ) row2 = dbc.Row( [ dcc.Markdown( ''' Going into this dataset my general assumptions were: that puppy and kittens would be much more likely to be adopted than their older counterparts breeds would play a large role in whether an animal is adopted Notorious breeds would be less likely to be adopted due to stereotypes Older animals would have a harder time finding homes and may be euthaized more often ''' ), html.H5('Going into this dataset my general assumptions were:'), html.Li( children=[ 'That puppy and kittens would be much more likely to be adopted than their older.', 'Breeds would play a large role in whether an animal is adopted.', 'Notorious breeds would be less likely to be adopted due to stereotypes.', 'Older animals would have a harder time finding homes and may be euthanized more often.' ], ) ] ) row3 = dbc.Row( [ dcc.Markdown( ''' For this visual I looked at bulldogs and retriever breeds of dogs what I found was: total number of bulldog breeds in the dataset is 11,282 while retrievers account for 11,107 so relatively close number Bulldogs are more than two times more likely to be euthanized than retrievers Bulldogs are about 31% less likely to be adopted over retrievers The data seems to support the notion that notorious breeds are less likely good outcomes ''' ), html.Img(src='../assets/plot2.png', width="700", height="450") ] ) row4 = dbc.Row( [ html.H5('For my next visualization I wanted to game more insight about how the different outcomes was effected by age.\n'), html.H6('\nWhat I found was that all outcomes seem to be sqewed to younger animals'), html.H6('\nAge seem to play a role in how each out come turned outs'), html.Img(src='../assets/plot3.png', width="750", height="450") ] ) layout = dbc.Row([row1, row2, row3, row4])
python
# -*- coding: utf-8 -*- """ Created on Wed Oct 7 22:06:42 2015 @author: hoseung """ import numpy as np a = np.zeros(10) b = [0,1,4,7] c= a[b] print(c) c[2] = 1.2 print(c) print(a) #%% x = np.array([(1.5, 4), (1.0, 2), (3.0, 4)], dtype=[('x', float), ('y', int)]) ind = np.where(x['x'] < 2) b = x[ind] #%% from tree import tmtree import tree.halomodule as hmo import utils.sampling as smp wdir = '/home/hoseung/Work/data/01605/' tt = tmtree.load(work_dir=wdir, filename="halo/TMtree.fits") m_halo_min = 2e10 nout_fi = 187 hh = hmo.Halo(base=wdir, nout=nout_fi, halofinder='HM', info=info, load=True) #halo = hmu.load_data(nout_fi, work_dir=work_dir, normalize=True) i_center = np.where(hh.data['np'] == max(hh.data['np'])) i_satellites = smp.extract_halos_within(hh.data, i_center, scale=r_cluster_scale) print("Total {0} halos \n{1} halos are selected".format( len(i_satellites),sum(i_satellites))) # halos found inside the cluster and has tree back to nout_ini large_enugh = hh.data['mvir'] > m_halo_min halo_list = hh.data['id'][i_satellites * large_enugh] h_ind_ok, halo_ok = tmtree.check_tree_complete(tt, 0, nout_fi - nout_ini0, halo_list) print(len(halo_ok), "halos left") final_gal = halo_ok[:,0] ngals = len(final_gal) #%% import matplotlib.pyplot as plt plt.plot(np.log10(hh.data['mvir'][large_enugh])) plt.show() #%% def chunks(l, n): """Yield successive n-sized chunks from l.""" for i in range(0, len(l), n): yield l[i:i+n] #%% print(list(chunks(range(10),10))) #%% l = 21 n = 5 arr=[] [arr.append([]) for i in range(5)] for i in range(l): j = i % n arr[j].append(i) #%% def chunks(l, n): n = max(1, n) return [l[i:i + n] for i in range(0, len(l), n)] print(chunks(np.arange(21),4))
python
class Solution: def XXX(self, matrix: List[List[int]]) -> None: """ Do not return anything, modify matrix in-place instead. """ n = len(matrix) for i in range(n//2): for j in range((n+1)//2): matrix[i][j], matrix[j][n-i-1], matrix[n-i-1][n-j-1], matrix[n-j-1][i]\ = matrix[n-j-1][i], matrix[i][j], matrix[j][n-i-1], matrix[n-i-1][n-j-1]
python
from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('marketplace_openstack', '0007_change_billing_type_for_volumes_of_tenants'), ('invoices', '0043_drop_package_column'), ('marketplace', '0041_drop_package'), ] operations = [ # Raw SQL is used instead of Django migration operations # because packages application has been removed migrations.RunSQL('DROP TABLE IF EXISTS packages_openstackpackage'), migrations.RunSQL('DROP TABLE IF EXISTS packages_packagecomponent'), migrations.RunSQL('DROP TABLE IF EXISTS packages_packagetemplate'), ]
python
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from botbuilder.schema._connector_client_enums import ActivityTypes from datetime import datetime from msrest.serialization import Model from msrest.exceptions import HttpOperationError class ConversationReference(Model): """An object relating to a particular point in a conversation. :param activity_id: (Optional) ID of the activity to refer to :type activity_id: str :param user: (Optional) User participating in this conversation :type user: ~botframework.connector.models.ChannelAccount :param bot: Bot participating in this conversation :type bot: ~botframework.connector.models.ChannelAccount :param conversation: Conversation reference :type conversation: ~botframework.connector.models.ConversationAccount :param channel_id: Channel ID :type channel_id: str :param locale: A locale name for the contents of the text field. The locale name is a combination of an ISO 639 two- or three-letter culture code associated with a language and an ISO 3166 two-letter subculture code associated with a country or region. The locale name can also correspond to a valid BCP-47 language tag. :type locale: str :param service_url: Service endpoint where operations concerning the referenced conversation may be performed :type service_url: str """ _attribute_map = { "activity_id": {"key": "activityId", "type": "str"}, "user": {"key": "user", "type": "ChannelAccount"}, "bot": {"key": "bot", "type": "ChannelAccount"}, "conversation": {"key": "conversation", "type": "ConversationAccount"}, "channel_id": {"key": "channelId", "type": "str"}, "locale": {"key": "locale", "type": "str"}, "service_url": {"key": "serviceUrl", "type": "str"}, } def __init__( self, *, activity_id: str = None, user=None, bot=None, conversation=None, channel_id: str = None, locale: str = None, service_url: str = None, **kwargs ) -> None: super(ConversationReference, self).__init__(**kwargs) self.activity_id = activity_id self.user = user self.bot = bot self.conversation = conversation self.channel_id = channel_id self.locale = locale self.service_url = service_url class Mention(Model): """Mention information (entity type: "mention"). :param mentioned: The mentioned user :type mentioned: ~botframework.connector.models.ChannelAccount :param text: Sub Text which represents the mention (can be null or empty) :type text: str :param type: Type of this entity (RFC 3987 IRI) :type type: str """ _attribute_map = { "mentioned": {"key": "mentioned", "type": "ChannelAccount"}, "text": {"key": "text", "type": "str"}, "type": {"key": "type", "type": "str"}, } def __init__( self, *, mentioned=None, text: str = None, type: str = None, **kwargs ) -> None: super(Mention, self).__init__(**kwargs) self.mentioned = mentioned self.text = text self.type = type class ResourceResponse(Model): """A response containing a resource ID. :param id: Id of the resource :type id: str """ _attribute_map = {"id": {"key": "id", "type": "str"}} def __init__(self, *, id: str = None, **kwargs) -> None: super(ResourceResponse, self).__init__(**kwargs) self.id = id class Activity(Model): """An Activity is the basic communication type for the Bot Framework 3.0 protocol. :param type: Contains the activity type. Possible values include: 'message', 'contactRelationUpdate', 'conversationUpdate', 'typing', 'endOfConversation', 'event', 'invoke', 'deleteUserData', 'messageUpdate', 'messageDelete', 'installationUpdate', 'messageReaction', 'suggestion', 'trace', 'handoff' :type type: str or ~botframework.connector.models.ActivityTypes :param id: Contains an ID that uniquely identifies the activity on the channel. :type id: str :param timestamp: Contains the date and time that the message was sent, in UTC, expressed in ISO-8601 format. :type timestamp: datetime :param local_timestamp: Contains the local date and time of the message expressed in ISO-8601 format. For example, 2016-09-23T13:07:49.4714686-07:00. :type local_timestamp: datetime :param local_timezone: Contains the name of the local timezone of the message, expressed in IANA Time Zone database format. For example, America/Los_Angeles. :type local_timezone: str :param service_url: Contains the URL that specifies the channel's service endpoint. Set by the channel. :type service_url: str :param channel_id: Contains an ID that uniquely identifies the channel. Set by the channel. :type channel_id: str :param from_property: Identifies the sender of the message. :type from_property: ~botframework.connector.models.ChannelAccount :param conversation: Identifies the conversation to which the activity belongs. :type conversation: ~botframework.connector.models.ConversationAccount :param recipient: Identifies the recipient of the message. :type recipient: ~botframework.connector.models.ChannelAccount :param text_format: Format of text fields Default:markdown. Possible values include: 'markdown', 'plain', 'xml' :type text_format: str or ~botframework.connector.models.TextFormatTypes :param attachment_layout: The layout hint for multiple attachments. Default: list. Possible values include: 'list', 'carousel' :type attachment_layout: str or ~botframework.connector.models.AttachmentLayoutTypes :param members_added: The collection of members added to the conversation. :type members_added: list[~botframework.connector.models.ChannelAccount] :param members_removed: The collection of members removed from the conversation. :type members_removed: list[~botframework.connector.models.ChannelAccount] :param reactions_added: The collection of reactions added to the conversation. :type reactions_added: list[~botframework.connector.models.MessageReaction] :param reactions_removed: The collection of reactions removed from the conversation. :type reactions_removed: list[~botframework.connector.models.MessageReaction] :param topic_name: The updated topic name of the conversation. :type topic_name: str :param history_disclosed: Indicates whether the prior history of the channel is disclosed. :type history_disclosed: bool :param locale: A locale name for the contents of the text field. The locale name is a combination of an ISO 639 two- or three-letter culture code associated with a language and an ISO 3166 two-letter subculture code associated with a country or region. The locale name can also correspond to a valid BCP-47 language tag. :type locale: str :param text: The text content of the message. :type text: str :param speak: The text to speak. :type speak: str :param input_hint: Indicates whether your bot is accepting, expecting, or ignoring user input after the message is delivered to the client. Possible values include: 'acceptingInput', 'ignoringInput', 'expectingInput' :type input_hint: str or ~botframework.connector.models.InputHints :param summary: The text to display if the channel cannot render cards. :type summary: str :param suggested_actions: The suggested actions for the activity. :type suggested_actions: ~botframework.connector.models.SuggestedActions :param attachments: Attachments :type attachments: list[~botframework.connector.models.Attachment] :param entities: Represents the entities that were mentioned in the message. :type entities: list[~botframework.connector.models.Entity] :param channel_data: Contains channel-specific content. :type channel_data: object :param action: Indicates whether the recipient of a contactRelationUpdate was added or removed from the sender's contact list. :type action: str :param reply_to_id: Contains the ID of the message to which this message is a reply. :type reply_to_id: str :param label: A descriptive label for the activity. :type label: str :param value_type: The type of the activity's value object. :type value_type: str :param value: A value that is associated with the activity. :type value: object :param name: The name of the operation associated with an invoke or event activity. :type name: str :param relates_to: A reference to another conversation or activity. :type relates_to: ~botframework.connector.models.ConversationReference :param code: The a code for endOfConversation activities that indicates why the conversation ended. Possible values include: 'unknown', 'completedSuccessfully', 'userCancelled', 'botTimedOut', 'botIssuedInvalidMessage', 'channelFailed' :type code: str or ~botframework.connector.models.EndOfConversationCodes :param expiration: The time at which the activity should be considered to be "expired" and should not be presented to the recipient. :type expiration: datetime :param importance: The importance of the activity. Possible values include: 'low', 'normal', 'high' :type importance: str or ~botframework.connector.models.ActivityImportance :param delivery_mode: A delivery hint to signal to the recipient alternate delivery paths for the activity. The default delivery mode is "default". Possible values include: 'normal', 'notification', 'expectReplies', 'ephemeral' :type delivery_mode: str or ~botframework.connector.models.DeliveryModes :param listen_for: List of phrases and references that speech and language priming systems should listen for :type listen_for: list[str] :param text_highlights: The collection of text fragments to highlight when the activity contains a ReplyToId value. :type text_highlights: list[~botframework.connector.models.TextHighlight] :param semantic_action: An optional programmatic action accompanying this request :type semantic_action: ~botframework.connector.models.SemanticAction :param caller_id: A string containing an IRI identifying the caller of a bot. This field is not intended to be transmitted over the wire, but is instead populated by bots and clients based on cryptographically verifiable data that asserts the identity of the callers (e.g. tokens). :type caller_id: str """ _attribute_map = { "type": {"key": "type", "type": "str"}, "id": {"key": "id", "type": "str"}, "timestamp": {"key": "timestamp", "type": "iso-8601"}, "local_timestamp": {"key": "localTimestamp", "type": "iso-8601"}, "local_timezone": {"key": "localTimezone", "type": "str"}, "service_url": {"key": "serviceUrl", "type": "str"}, "channel_id": {"key": "channelId", "type": "str"}, "from_property": {"key": "from", "type": "ChannelAccount"}, "conversation": {"key": "conversation", "type": "ConversationAccount"}, "recipient": {"key": "recipient", "type": "ChannelAccount"}, "text_format": {"key": "textFormat", "type": "str"}, "attachment_layout": {"key": "attachmentLayout", "type": "str"}, "members_added": {"key": "membersAdded", "type": "[ChannelAccount]"}, "members_removed": {"key": "membersRemoved", "type": "[ChannelAccount]"}, "reactions_added": {"key": "reactionsAdded", "type": "[MessageReaction]"}, "reactions_removed": {"key": "reactionsRemoved", "type": "[MessageReaction]"}, "topic_name": {"key": "topicName", "type": "str"}, "history_disclosed": {"key": "historyDisclosed", "type": "bool"}, "locale": {"key": "locale", "type": "str"}, "text": {"key": "text", "type": "str"}, "speak": {"key": "speak", "type": "str"}, "input_hint": {"key": "inputHint", "type": "str"}, "summary": {"key": "summary", "type": "str"}, "suggested_actions": {"key": "suggestedActions", "type": "SuggestedActions"}, "attachments": {"key": "attachments", "type": "[Attachment]"}, "entities": {"key": "entities", "type": "[Entity]"}, "channel_data": {"key": "channelData", "type": "object"}, "action": {"key": "action", "type": "str"}, "reply_to_id": {"key": "replyToId", "type": "str"}, "label": {"key": "label", "type": "str"}, "value_type": {"key": "valueType", "type": "str"}, "value": {"key": "value", "type": "object"}, "name": {"key": "name", "type": "str"}, "relates_to": {"key": "relatesTo", "type": "ConversationReference"}, "code": {"key": "code", "type": "str"}, "expiration": {"key": "expiration", "type": "iso-8601"}, "importance": {"key": "importance", "type": "str"}, "delivery_mode": {"key": "deliveryMode", "type": "str"}, "listen_for": {"key": "listenFor", "type": "[str]"}, "text_highlights": {"key": "textHighlights", "type": "[TextHighlight]"}, "semantic_action": {"key": "semanticAction", "type": "SemanticAction"}, "caller_id": {"key": "callerId", "type": "str"}, } def __init__( self, *, type=None, id: str = None, timestamp=None, local_timestamp=None, local_timezone: str = None, service_url: str = None, channel_id: str = None, from_property=None, conversation=None, recipient=None, text_format=None, attachment_layout=None, members_added=None, members_removed=None, reactions_added=None, reactions_removed=None, topic_name: str = None, history_disclosed: bool = None, locale: str = None, text: str = None, speak: str = None, input_hint=None, summary: str = None, suggested_actions=None, attachments=None, entities=None, channel_data=None, action: str = None, reply_to_id: str = None, label: str = None, value_type: str = None, value=None, name: str = None, relates_to=None, code=None, expiration=None, importance=None, delivery_mode=None, listen_for=None, text_highlights=None, semantic_action=None, caller_id: str = None, **kwargs ) -> None: super(Activity, self).__init__(**kwargs) self.type = type self.id = id self.timestamp = timestamp self.local_timestamp = local_timestamp self.local_timezone = local_timezone self.service_url = service_url self.channel_id = channel_id self.from_property = from_property self.conversation = conversation self.recipient = recipient self.text_format = text_format self.attachment_layout = attachment_layout self.members_added = members_added self.members_removed = members_removed self.reactions_added = reactions_added self.reactions_removed = reactions_removed self.topic_name = topic_name self.history_disclosed = history_disclosed self.locale = locale self.text = text self.speak = speak self.input_hint = input_hint self.summary = summary self.suggested_actions = suggested_actions self.attachments = attachments self.entities = entities self.channel_data = channel_data self.action = action self.reply_to_id = reply_to_id self.label = label self.value_type = value_type self.value = value self.name = name self.relates_to = relates_to self.code = code self.expiration = expiration self.importance = importance self.delivery_mode = delivery_mode self.listen_for = listen_for self.text_highlights = text_highlights self.semantic_action = semantic_action self.caller_id = caller_id def apply_conversation_reference( self, reference: ConversationReference, is_incoming: bool = False ): """ Updates this activity with the delivery information from an existing ConversationReference :param reference: The existing conversation reference. :param is_incoming: Optional, True to treat the activity as an incoming activity, where the bot is the recipient; otherwise, False. Default is False, and the activity will show the bot as the sender. :returns: his activity, updated with the delivery information. .. remarks:: Call GetConversationReference on an incoming activity to get a conversation reference that you can then use to update an outgoing activity with the correct delivery information. """ self.channel_id = reference.channel_id self.service_url = reference.service_url self.conversation = reference.conversation if reference.locale is not None: self.locale = reference.locale if is_incoming: self.from_property = reference.user self.recipient = reference.bot if reference.activity_id is not None: self.id = reference.activity_id else: self.from_property = reference.bot self.recipient = reference.user if reference.activity_id is not None: self.reply_to_id = reference.activity_id return self def as_contact_relation_update_activity(self): """ Returns this activity as a ContactRelationUpdateActivity object; or None, if this is not that type of activity. :returns: This activity as a message activity; or None. """ return ( self if self.__is_activity(ActivityTypes.contact_relation_update) else None ) def as_conversation_update_activity(self): """ Returns this activity as a ConversationUpdateActivity object; or None, if this is not that type of activity. :returns: This activity as a conversation update activity; or None. """ return self if self.__is_activity(ActivityTypes.conversation_update) else None def as_end_of_conversation_activity(self): """ Returns this activity as an EndOfConversationActivity object; or None, if this is not that type of activity. :returns: This activity as an end of conversation activity; or None. """ return self if self.__is_activity(ActivityTypes.end_of_conversation) else None def as_event_activity(self): """ Returns this activity as an EventActivity object; or None, if this is not that type of activity. :returns: This activity as an event activity; or None. """ return self if self.__is_activity(ActivityTypes.event) else None def as_handoff_activity(self): """ Returns this activity as a HandoffActivity object; or None, if this is not that type of activity. :returns: This activity as a handoff activity; or None. """ return self if self.__is_activity(ActivityTypes.handoff) else None def as_installation_update_activity(self): """ Returns this activity as an InstallationUpdateActivity object; or None, if this is not that type of activity. :returns: This activity as an installation update activity; or None. """ return self if self.__is_activity(ActivityTypes.installation_update) else None def as_invoke_activity(self): """ Returns this activity as an InvokeActivity object; or None, if this is not that type of activity. :returns: This activity as an invoke activity; or None. """ return self if self.__is_activity(ActivityTypes.invoke) else None def as_message_activity(self): """ Returns this activity as a MessageActivity object; or None, if this is not that type of activity. :returns: This activity as a message activity; or None. """ return self if self.__is_activity(ActivityTypes.message) else None def as_message_delete_activity(self): """ Returns this activity as a MessageDeleteActivity object; or None, if this is not that type of activity. :returns: This activity as a message delete request; or None. """ return self if self.__is_activity(ActivityTypes.message_delete) else None def as_message_reaction_activity(self): """ Returns this activity as a MessageReactionActivity object; or None, if this is not that type of activity. :return: This activity as a message reaction activity; or None. """ return self if self.__is_activity(ActivityTypes.message_reaction) else None def as_message_update_activity(self): """ Returns this activity as an MessageUpdateActivity object; or None, if this is not that type of activity. :returns: This activity as a message update request; or None. """ return self if self.__is_activity(ActivityTypes.message_update) else None def as_suggestion_activity(self): """ Returns this activity as a SuggestionActivity object; or None, if this is not that type of activity. :returns: This activity as a suggestion activity; or None. """ return self if self.__is_activity(ActivityTypes.suggestion) else None def as_trace_activity(self): """ Returns this activity as a TraceActivity object; or None, if this is not that type of activity. :returns: This activity as a trace activity; or None. """ return self if self.__is_activity(ActivityTypes.trace) else None def as_typing_activity(self): """ Returns this activity as a TypingActivity object; or null, if this is not that type of activity. :returns: This activity as a typing activity; or null. """ return self if self.__is_activity(ActivityTypes.typing) else None @staticmethod def create_contact_relation_update_activity(): """ Creates an instance of the :class:`Activity` class as aContactRelationUpdateActivity object. :returns: The new contact relation update activity. """ return Activity(type=ActivityTypes.contact_relation_update) @staticmethod def create_conversation_update_activity(): """ Creates an instance of the :class:`Activity` class as a ConversationUpdateActivity object. :returns: The new conversation update activity. """ return Activity(type=ActivityTypes.conversation_update) @staticmethod def create_end_of_conversation_activity(): """ Creates an instance of the :class:`Activity` class as an EndOfConversationActivity object. :returns: The new end of conversation activity. """ return Activity(type=ActivityTypes.end_of_conversation) @staticmethod def create_event_activity(): """ Creates an instance of the :class:`Activity` class as an EventActivity object. :returns: The new event activity. """ return Activity(type=ActivityTypes.event) @staticmethod def create_handoff_activity(): """ Creates an instance of the :class:`Activity` class as a HandoffActivity object. :returns: The new handoff activity. """ return Activity(type=ActivityTypes.handoff) @staticmethod def create_invoke_activity(): """ Creates an instance of the :class:`Activity` class as an InvokeActivity object. :returns: The new invoke activity. """ return Activity(type=ActivityTypes.invoke) @staticmethod def create_message_activity(): """ Creates an instance of the :class:`Activity` class as a MessageActivity object. :returns: The new message activity. """ return Activity(type=ActivityTypes.message) def create_reply(self, text: str = None, locale: str = None): """ Creates a new message activity as a response to this activity. :param text: The text of the reply. :param locale: The language code for the text. :returns: The new message activity. .. remarks:: The new activity sets up routing information based on this activity. """ return Activity( type=ActivityTypes.message, timestamp=datetime.utcnow(), from_property=ChannelAccount( id=self.recipient.id if self.recipient else None, name=self.recipient.name if self.recipient else None, ), recipient=ChannelAccount( id=self.from_property.id if self.from_property else None, name=self.from_property.name if self.from_property else None, ), reply_to_id=self.id, service_url=self.service_url, channel_id=self.channel_id, conversation=ConversationAccount( is_group=self.conversation.is_group, id=self.conversation.id, name=self.conversation.name, ), text=text if text else "", locale=locale if locale else self.locale, attachments=[], entities=[], ) def create_trace( self, name: str, value: object = None, value_type: str = None, label: str = None ): """ Creates a new trace activity based on this activity. :param name: The name of the trace operation to create. :param value: Optional, the content for this trace operation. :param value_type: Optional, identifier for the format of the value Default is the name of type of the value. :param label: Optional, a descriptive label for this trace operation. :returns: The new trace activity. """ if not value_type and value: value_type = type(value) return Activity( type=ActivityTypes.trace, timestamp=datetime.utcnow(), from_property=ChannelAccount( id=self.recipient.id if self.recipient else None, name=self.recipient.name if self.recipient else None, ), recipient=ChannelAccount( id=self.from_property.id if self.from_property else None, name=self.from_property.name if self.from_property else None, ), reply_to_id=self.id, service_url=self.service_url, channel_id=self.channel_id, conversation=ConversationAccount( is_group=self.conversation.is_group, id=self.conversation.id, name=self.conversation.name, ), name=name, label=label, value_type=value_type, value=value, ).as_trace_activity() @staticmethod def create_trace_activity( name: str, value: object = None, value_type: str = None, label: str = None ): """ Creates an instance of the :class:`Activity` class as a TraceActivity object. :param name: The name of the trace operation to create. :param value: Optional, the content for this trace operation. :param value_type: Optional, identifier for the format of the value. Default is the name of type of the value. :param label: Optional, a descriptive label for this trace operation. :returns: The new trace activity. """ if not value_type and value: value_type = type(value) return Activity( type=ActivityTypes.trace, name=name, label=label, value_type=value_type, value=value, ) @staticmethod def create_typing_activity(): """ Creates an instance of the :class:`Activity` class as a TypingActivity object. :returns: The new typing activity. """ return Activity(type=ActivityTypes.typing) def get_conversation_reference(self): """ Creates a ConversationReference based on this activity. :returns: A conversation reference for the conversation that contains this activity. """ return ConversationReference( activity_id=self.id, user=self.from_property, bot=self.recipient, conversation=self.conversation, channel_id=self.channel_id, locale=self.locale, service_url=self.service_url, ) def get_mentions(self) -> [Mention]: """ Resolves the mentions from the entities of this activity. :returns: The array of mentions; or an empty array, if none are found. .. remarks:: This method is defined on the :class:`Activity` class, but is only intended for use with a message activity, where the activity Activity.Type is set to ActivityTypes.Message. """ _list = self.entities return [x for x in _list if str(x.type).lower() == "mention"] def get_reply_conversation_reference( self, reply: ResourceResponse ) -> ConversationReference: """ Create a ConversationReference based on this Activity's Conversation info and the ResourceResponse from sending an activity. :param reply: ResourceResponse returned from send_activity. :return: A ConversationReference that can be stored and used later to delete or update the activity. """ reference = self.get_conversation_reference() reference.activity_id = reply.id return reference def has_content(self) -> bool: """ Indicates whether this activity has content. :returns: True, if this activity has any content to send; otherwise, false. .. remarks:: This method is defined on the :class:`Activity` class, but is only intended for use with a message activity, where the activity Activity.Type is set to ActivityTypes.Message. """ if self.text and self.text.strip(): return True if self.summary and self.summary.strip(): return True if self.attachments and len(self.attachments) > 0: return True if self.channel_data: return True return False def is_from_streaming_connection(self) -> bool: """ Determine if the Activity was sent via an Http/Https connection or Streaming This can be determined by looking at the service_url property: (1) All channels that send messages via http/https are not streaming (2) Channels that send messages via streaming have a ServiceUrl that does not begin with http/https. :returns: True if the Activity originated from a streaming connection. """ if self.service_url: return not self.service_url.lower().startswith("http") return False def __is_activity(self, activity_type: str) -> bool: """ Indicates whether this activity is of a specified activity type. :param activity_type: The activity type to check for. :return: True if this activity is of the specified activity type; otherwise, False. """ if self.type is None: return False type_attribute = str(self.type).lower() activity_type = str(activity_type).lower() result = type_attribute.startswith(activity_type) if result: result = len(type_attribute) == len(activity_type) if not result: result = ( len(type_attribute) > len(activity_type) and type_attribute[len(activity_type)] == "/" ) return result class AnimationCard(Model): """An animation card (Ex: gif or short video clip). :param title: Title of this card :type title: str :param subtitle: Subtitle of this card :type subtitle: str :param text: Text of this card :type text: str :param image: Thumbnail placeholder :type image: ~botframework.connector.models.ThumbnailUrl :param media: Media URLs for this card. When this field contains more than one URL, each URL is an alternative format of the same content. :type media: list[~botframework.connector.models.MediaUrl] :param buttons: Actions on this card :type buttons: list[~botframework.connector.models.CardAction] :param shareable: This content may be shared with others (default:true) :type shareable: bool :param autoloop: Should the client loop playback at end of content (default:true) :type autoloop: bool :param autostart: Should the client automatically start playback of media in this card (default:true) :type autostart: bool :param aspect: Aspect ratio of thumbnail/media placeholder. Allowed values are "16:9" and "4:3" :type aspect: str :param duration: Describes the length of the media content without requiring a receiver to open the content. Formatted as an ISO 8601 Duration field. :type duration: str :param value: Supplementary parameter for this card :type value: object """ _attribute_map = { "title": {"key": "title", "type": "str"}, "subtitle": {"key": "subtitle", "type": "str"}, "text": {"key": "text", "type": "str"}, "image": {"key": "image", "type": "ThumbnailUrl"}, "media": {"key": "media", "type": "[MediaUrl]"}, "buttons": {"key": "buttons", "type": "[CardAction]"}, "shareable": {"key": "shareable", "type": "bool"}, "autoloop": {"key": "autoloop", "type": "bool"}, "autostart": {"key": "autostart", "type": "bool"}, "aspect": {"key": "aspect", "type": "str"}, "duration": {"key": "duration", "type": "str"}, "value": {"key": "value", "type": "object"}, } def __init__( self, *, title: str = None, subtitle: str = None, text: str = None, image=None, media=None, buttons=None, shareable: bool = None, autoloop: bool = None, autostart: bool = None, aspect: str = None, duration: str = None, value=None, **kwargs ) -> None: super(AnimationCard, self).__init__(**kwargs) self.title = title self.subtitle = subtitle self.text = text self.image = image self.media = media self.buttons = buttons self.shareable = shareable self.autoloop = autoloop self.autostart = autostart self.aspect = aspect self.duration = duration self.value = value class Attachment(Model): """An attachment within an activity. :param content_type: mimetype/Contenttype for the file :type content_type: str :param content_url: Content Url :type content_url: str :param content: Embedded content :type content: object :param name: (OPTIONAL) The name of the attachment :type name: str :param thumbnail_url: (OPTIONAL) Thumbnail associated with attachment :type thumbnail_url: str """ _attribute_map = { "content_type": {"key": "contentType", "type": "str"}, "content_url": {"key": "contentUrl", "type": "str"}, "content": {"key": "content", "type": "object"}, "name": {"key": "name", "type": "str"}, "thumbnail_url": {"key": "thumbnailUrl", "type": "str"}, } def __init__( self, *, content_type: str = None, content_url: str = None, content=None, name: str = None, thumbnail_url: str = None, **kwargs ) -> None: super(Attachment, self).__init__(**kwargs) self.content_type = content_type self.content_url = content_url self.content = content self.name = name self.thumbnail_url = thumbnail_url class AttachmentData(Model): """Attachment data. :param type: Content-Type of the attachment :type type: str :param name: Name of the attachment :type name: str :param original_base64: Attachment content :type original_base64: bytearray :param thumbnail_base64: Attachment thumbnail :type thumbnail_base64: bytearray """ _attribute_map = { "type": {"key": "type", "type": "str"}, "name": {"key": "name", "type": "str"}, "original_base64": {"key": "originalBase64", "type": "bytearray"}, "thumbnail_base64": {"key": "thumbnailBase64", "type": "bytearray"}, } def __init__( self, *, type: str = None, name: str = None, original_base64: bytearray = None, thumbnail_base64: bytearray = None, **kwargs ) -> None: super(AttachmentData, self).__init__(**kwargs) self.type = type self.name = name self.original_base64 = original_base64 self.thumbnail_base64 = thumbnail_base64 class AttachmentInfo(Model): """Metadata for an attachment. :param name: Name of the attachment :type name: str :param type: ContentType of the attachment :type type: str :param views: attachment views :type views: list[~botframework.connector.models.AttachmentView] """ _attribute_map = { "name": {"key": "name", "type": "str"}, "type": {"key": "type", "type": "str"}, "views": {"key": "views", "type": "[AttachmentView]"}, } def __init__( self, *, name: str = None, type: str = None, views=None, **kwargs ) -> None: super(AttachmentInfo, self).__init__(**kwargs) self.name = name self.type = type self.views = views class AttachmentView(Model): """Attachment View name and size. :param view_id: Id of the attachment :type view_id: str :param size: Size of the attachment :type size: int """ _attribute_map = { "view_id": {"key": "viewId", "type": "str"}, "size": {"key": "size", "type": "int"}, } def __init__(self, *, view_id: str = None, size: int = None, **kwargs) -> None: super(AttachmentView, self).__init__(**kwargs) self.view_id = view_id self.size = size class AudioCard(Model): """Audio card. :param title: Title of this card :type title: str :param subtitle: Subtitle of this card :type subtitle: str :param text: Text of this card :type text: str :param image: Thumbnail placeholder :type image: ~botframework.connector.models.ThumbnailUrl :param media: Media URLs for this card. When this field contains more than one URL, each URL is an alternative format of the same content. :type media: list[~botframework.connector.models.MediaUrl] :param buttons: Actions on this card :type buttons: list[~botframework.connector.models.CardAction] :param shareable: This content may be shared with others (default:true) :type shareable: bool :param autoloop: Should the client loop playback at end of content (default:true) :type autoloop: bool :param autostart: Should the client automatically start playback of media in this card (default:true) :type autostart: bool :param aspect: Aspect ratio of thumbnail/media placeholder. Allowed values are "16:9" and "4:3" :type aspect: str :param duration: Describes the length of the media content without requiring a receiver to open the content. Formatted as an ISO 8601 Duration field. :type duration: str :param value: Supplementary parameter for this card :type value: object """ _attribute_map = { "title": {"key": "title", "type": "str"}, "subtitle": {"key": "subtitle", "type": "str"}, "text": {"key": "text", "type": "str"}, "image": {"key": "image", "type": "ThumbnailUrl"}, "media": {"key": "media", "type": "[MediaUrl]"}, "buttons": {"key": "buttons", "type": "[CardAction]"}, "shareable": {"key": "shareable", "type": "bool"}, "autoloop": {"key": "autoloop", "type": "bool"}, "autostart": {"key": "autostart", "type": "bool"}, "aspect": {"key": "aspect", "type": "str"}, "duration": {"key": "duration", "type": "str"}, "value": {"key": "value", "type": "object"}, } def __init__( self, *, title: str = None, subtitle: str = None, text: str = None, image=None, media=None, buttons=None, shareable: bool = None, autoloop: bool = None, autostart: bool = None, aspect: str = None, duration: str = None, value=None, **kwargs ) -> None: super(AudioCard, self).__init__(**kwargs) self.title = title self.subtitle = subtitle self.text = text self.image = image self.media = media self.buttons = buttons self.shareable = shareable self.autoloop = autoloop self.autostart = autostart self.aspect = aspect self.duration = duration self.value = value class BasicCard(Model): """A basic card. :param title: Title of the card :type title: str :param subtitle: Subtitle of the card :type subtitle: str :param text: Text for the card :type text: str :param images: Array of images for the card :type images: list[~botframework.connector.models.CardImage] :param buttons: Set of actions applicable to the current card :type buttons: list[~botframework.connector.models.CardAction] :param tap: This action will be activated when user taps on the card itself :type tap: ~botframework.connector.models.CardAction """ _attribute_map = { "title": {"key": "title", "type": "str"}, "subtitle": {"key": "subtitle", "type": "str"}, "text": {"key": "text", "type": "str"}, "images": {"key": "images", "type": "[CardImage]"}, "buttons": {"key": "buttons", "type": "[CardAction]"}, "tap": {"key": "tap", "type": "CardAction"}, } def __init__( self, *, title: str = None, subtitle: str = None, text: str = None, images=None, buttons=None, tap=None, **kwargs ) -> None: super(BasicCard, self).__init__(**kwargs) self.title = title self.subtitle = subtitle self.text = text self.images = images self.buttons = buttons self.tap = tap class CardAction(Model): """A clickable action. :param type: The type of action implemented by this button. Possible values include: 'openUrl', 'imBack', 'postBack', 'playAudio', 'playVideo', 'showImage', 'downloadFile', 'signin', 'call', 'messageBack' :type type: str or ~botframework.connector.models.ActionTypes :param title: Text description which appears on the button :type title: str :param image: Image URL which will appear on the button, next to text label :type image: str :param text: Text for this action :type text: str :param display_text: (Optional) text to display in the chat feed if the button is clicked :type display_text: str :param value: Supplementary parameter for action. Content of this property depends on the ActionType :type value: object :param channel_data: Channel-specific data associated with this action :type channel_data: object """ _attribute_map = { "type": {"key": "type", "type": "str"}, "title": {"key": "title", "type": "str"}, "image": {"key": "image", "type": "str"}, "text": {"key": "text", "type": "str"}, "display_text": {"key": "displayText", "type": "str"}, "value": {"key": "value", "type": "object"}, "channel_data": {"key": "channelData", "type": "object"}, } def __init__( self, *, type=None, title: str = None, image: str = None, text: str = None, display_text: str = None, value=None, channel_data=None, **kwargs ) -> None: super(CardAction, self).__init__(**kwargs) self.type = type self.title = title self.image = image self.text = text self.display_text = display_text self.value = value self.channel_data = channel_data class CardImage(Model): """An image on a card. :param url: URL thumbnail image for major content property :type url: str :param alt: Image description intended for screen readers :type alt: str :param tap: Action assigned to specific Attachment :type tap: ~botframework.connector.models.CardAction """ _attribute_map = { "url": {"key": "url", "type": "str"}, "alt": {"key": "alt", "type": "str"}, "tap": {"key": "tap", "type": "CardAction"}, } def __init__(self, *, url: str = None, alt: str = None, tap=None, **kwargs) -> None: super(CardImage, self).__init__(**kwargs) self.url = url self.alt = alt self.tap = tap class ChannelAccount(Model): """Channel account information needed to route a message. :param id: Channel id for the user or bot on this channel (Example: [email protected], or @joesmith or 123456) :type id: str :param name: Display friendly name :type name: str :param aad_object_id: This account's object ID within Azure Active Directory (AAD) :type aad_object_id: str :param role: Role of the entity behind the account (Example: User, Bot, etc.). Possible values include: 'user', 'bot' :type role: str or ~botframework.connector.models.RoleTypes """ _attribute_map = { "id": {"key": "id", "type": "str"}, "name": {"key": "name", "type": "str"}, "aad_object_id": {"key": "aadObjectId", "type": "str"}, "role": {"key": "role", "type": "str"}, } def __init__( self, *, id: str = None, name: str = None, aad_object_id: str = None, role=None, **kwargs ) -> None: super(ChannelAccount, self).__init__(**kwargs) self.id = id self.name = name self.aad_object_id = aad_object_id self.role = role class ConversationAccount(Model): """Conversation account represents the identity of the conversation within a channel. :param is_group: Indicates whether the conversation contains more than two participants at the time the activity was generated :type is_group: bool :param conversation_type: Indicates the type of the conversation in channels that distinguish between conversation types :type conversation_type: str :param id: Channel id for the user or bot on this channel (Example: [email protected], or @joesmith or 123456) :type id: str :param name: Display friendly name :type name: str :param aad_object_id: This account's object ID within Azure Active Directory (AAD) :type aad_object_id: str :param role: Role of the entity behind the account (Example: User, Bot, etc.). Possible values include: 'user', 'bot' :type role: str or ~botframework.connector.models.RoleTypes :param tenant_id: This conversation's tenant ID :type tenant_id: str :param properties: This conversation's properties :type properties: object """ _attribute_map = { "is_group": {"key": "isGroup", "type": "bool"}, "conversation_type": {"key": "conversationType", "type": "str"}, "id": {"key": "id", "type": "str"}, "name": {"key": "name", "type": "str"}, "aad_object_id": {"key": "aadObjectId", "type": "str"}, "role": {"key": "role", "type": "str"}, "tenant_id": {"key": "tenantID", "type": "str"}, "properties": {"key": "properties", "type": "object"}, } def __init__( self, *, is_group: bool = None, conversation_type: str = None, id: str = None, name: str = None, aad_object_id: str = None, role=None, tenant_id=None, properties=None, **kwargs ) -> None: super(ConversationAccount, self).__init__(**kwargs) self.is_group = is_group self.conversation_type = conversation_type self.id = id self.name = name self.aad_object_id = aad_object_id self.role = role self.tenant_id = tenant_id self.properties = properties class ConversationMembers(Model): """Conversation and its members. :param id: Conversation ID :type id: str :param members: List of members in this conversation :type members: list[~botframework.connector.models.ChannelAccount] """ _attribute_map = { "id": {"key": "id", "type": "str"}, "members": {"key": "members", "type": "[ChannelAccount]"}, } def __init__(self, *, id: str = None, members=None, **kwargs) -> None: super(ConversationMembers, self).__init__(**kwargs) self.id = id self.members = members class ConversationParameters(Model): """Parameters for creating a new conversation. :param is_group: IsGroup :type is_group: bool :param bot: The bot address for this conversation :type bot: ~botframework.connector.models.ChannelAccount :param members: Members to add to the conversation :type members: list[~botframework.connector.models.ChannelAccount] :param topic_name: (Optional) Topic of the conversation (if supported by the channel) :type topic_name: str :param activity: (Optional) When creating a new conversation, use this activity as the initial message to the conversation :type activity: ~botframework.connector.models.Activity :param channel_data: Channel specific payload for creating the conversation :type channel_data: object :param tenant_id: (Optional) The tenant ID in which the conversation should be created :type tenant_id: str """ _attribute_map = { "is_group": {"key": "isGroup", "type": "bool"}, "bot": {"key": "bot", "type": "ChannelAccount"}, "members": {"key": "members", "type": "[ChannelAccount]"}, "topic_name": {"key": "topicName", "type": "str"}, "activity": {"key": "activity", "type": "Activity"}, "channel_data": {"key": "channelData", "type": "object"}, "tenant_id": {"key": "tenantID", "type": "str"}, } def __init__( self, *, is_group: bool = None, bot=None, members=None, topic_name: str = None, activity=None, channel_data=None, tenant_id=None, **kwargs ) -> None: super(ConversationParameters, self).__init__(**kwargs) self.is_group = is_group self.bot = bot self.members = members self.topic_name = topic_name self.activity = activity self.channel_data = channel_data self.tenant_id = tenant_id class ConversationResourceResponse(Model): """A response containing a resource. :param activity_id: ID of the Activity (if sent) :type activity_id: str :param service_url: Service endpoint where operations concerning the conversation may be performed :type service_url: str :param id: Id of the resource :type id: str """ _attribute_map = { "activity_id": {"key": "activityId", "type": "str"}, "service_url": {"key": "serviceUrl", "type": "str"}, "id": {"key": "id", "type": "str"}, } def __init__( self, *, activity_id: str = None, service_url: str = None, id: str = None, **kwargs ) -> None: super(ConversationResourceResponse, self).__init__(**kwargs) self.activity_id = activity_id self.service_url = service_url self.id = id class ConversationsResult(Model): """Conversations result. :param continuation_token: Paging token :type continuation_token: str :param conversations: List of conversations :type conversations: list[~botframework.connector.models.ConversationMembers] """ _attribute_map = { "continuation_token": {"key": "continuationToken", "type": "str"}, "conversations": {"key": "conversations", "type": "[ConversationMembers]"}, } def __init__( self, *, continuation_token: str = None, conversations=None, **kwargs ) -> None: super(ConversationsResult, self).__init__(**kwargs) self.continuation_token = continuation_token self.conversations = conversations class ExpectedReplies(Model): """ExpectedReplies. :param activities: A collection of Activities that conforms to the ExpectedReplies schema. :type activities: list[~botframework.connector.models.Activity] """ _attribute_map = {"activities": {"key": "activities", "type": "[Activity]"}} def __init__(self, *, activities=None, **kwargs) -> None: super(ExpectedReplies, self).__init__(**kwargs) self.activities = activities class Entity(Model): """Metadata object pertaining to an activity. :param type: Type of this entity (RFC 3987 IRI) :type type: str """ _attribute_map = {"type": {"key": "type", "type": "str"}} def __init__(self, *, type: str = None, **kwargs) -> None: super(Entity, self).__init__(**kwargs) self.type = type class Error(Model): """Object representing error information. :param code: Error code :type code: str :param message: Error message :type message: str :param inner_http_error: Error from inner http call :type inner_http_error: ~botframework.connector.models.InnerHttpError """ _attribute_map = { "code": {"key": "code", "type": "str"}, "message": {"key": "message", "type": "str"}, "inner_http_error": {"key": "innerHttpError", "type": "InnerHttpError"}, } def __init__( self, *, code: str = None, message: str = None, inner_http_error=None, **kwargs ) -> None: super(Error, self).__init__(**kwargs) self.code = code self.message = message self.inner_http_error = inner_http_error class ErrorResponse(Model): """An HTTP API response. :param error: Error message :type error: ~botframework.connector.models.Error """ _attribute_map = {"error": {"key": "error", "type": "Error"}} def __init__(self, *, error=None, **kwargs) -> None: super(ErrorResponse, self).__init__(**kwargs) self.error = error class ErrorResponseException(HttpOperationError): """Server responsed with exception of type: 'ErrorResponse'. :param deserialize: A deserializer :param response: Server response to be deserialized. """ def __init__(self, deserialize, response, *args): super(ErrorResponseException, self).__init__( deserialize, response, "ErrorResponse", *args ) class Fact(Model): """Set of key-value pairs. Advantage of this section is that key and value properties will be rendered with default style information with some delimiter between them. So there is no need for developer to specify style information. :param key: The key for this Fact :type key: str :param value: The value for this Fact :type value: str """ _attribute_map = { "key": {"key": "key", "type": "str"}, "value": {"key": "value", "type": "str"}, } def __init__(self, *, key: str = None, value: str = None, **kwargs) -> None: super(Fact, self).__init__(**kwargs) self.key = key self.value = value class GeoCoordinates(Model): """GeoCoordinates (entity type: "https://schema.org/GeoCoordinates"). :param elevation: Elevation of the location [WGS 84](https://en.wikipedia.org/wiki/World_Geodetic_System) :type elevation: float :param latitude: Latitude of the location [WGS 84](https://en.wikipedia.org/wiki/World_Geodetic_System) :type latitude: float :param longitude: Longitude of the location [WGS 84](https://en.wikipedia.org/wiki/World_Geodetic_System) :type longitude: float :param type: The type of the thing :type type: str :param name: The name of the thing :type name: str """ _attribute_map = { "elevation": {"key": "elevation", "type": "float"}, "latitude": {"key": "latitude", "type": "float"}, "longitude": {"key": "longitude", "type": "float"}, "type": {"key": "type", "type": "str"}, "name": {"key": "name", "type": "str"}, } def __init__( self, *, elevation: float = None, latitude: float = None, longitude: float = None, type: str = None, name: str = None, **kwargs ) -> None: super(GeoCoordinates, self).__init__(**kwargs) self.elevation = elevation self.latitude = latitude self.longitude = longitude self.type = type self.name = name class HeroCard(Model): """A Hero card (card with a single, large image). :param title: Title of the card :type title: str :param subtitle: Subtitle of the card :type subtitle: str :param text: Text for the card :type text: str :param images: Array of images for the card :type images: list[~botframework.connector.models.CardImage] :param buttons: Set of actions applicable to the current card :type buttons: list[~botframework.connector.models.CardAction] :param tap: This action will be activated when user taps on the card itself :type tap: ~botframework.connector.models.CardAction """ _attribute_map = { "title": {"key": "title", "type": "str"}, "subtitle": {"key": "subtitle", "type": "str"}, "text": {"key": "text", "type": "str"}, "images": {"key": "images", "type": "[CardImage]"}, "buttons": {"key": "buttons", "type": "[CardAction]"}, "tap": {"key": "tap", "type": "CardAction"}, } def __init__( self, *, title: str = None, subtitle: str = None, text: str = None, images=None, buttons=None, tap=None, **kwargs ) -> None: super(HeroCard, self).__init__(**kwargs) self.title = title self.subtitle = subtitle self.text = text self.images = images self.buttons = buttons self.tap = tap class InnerHttpError(Model): """Object representing inner http error. :param status_code: HttpStatusCode from failed request :type status_code: int :param body: Body from failed request :type body: object """ _attribute_map = { "status_code": {"key": "statusCode", "type": "int"}, "body": {"key": "body", "type": "object"}, } def __init__(self, *, status_code: int = None, body=None, **kwargs) -> None: super(InnerHttpError, self).__init__(**kwargs) self.status_code = status_code self.body = body class MediaCard(Model): """Media card. :param title: Title of this card :type title: str :param subtitle: Subtitle of this card :type subtitle: str :param text: Text of this card :type text: str :param image: Thumbnail placeholder :type image: ~botframework.connector.models.ThumbnailUrl :param media: Media URLs for this card. When this field contains more than one URL, each URL is an alternative format of the same content. :type media: list[~botframework.connector.models.MediaUrl] :param buttons: Actions on this card :type buttons: list[~botframework.connector.models.CardAction] :param shareable: This content may be shared with others (default:true) :type shareable: bool :param autoloop: Should the client loop playback at end of content (default:true) :type autoloop: bool :param autostart: Should the client automatically start playback of media in this card (default:true) :type autostart: bool :param aspect: Aspect ratio of thumbnail/media placeholder. Allowed values are "16:9" and "4:3" :type aspect: str :param duration: Describes the length of the media content without requiring a receiver to open the content. Formatted as an ISO 8601 Duration field. :type duration: str :param value: Supplementary parameter for this card :type value: object """ _attribute_map = { "title": {"key": "title", "type": "str"}, "subtitle": {"key": "subtitle", "type": "str"}, "text": {"key": "text", "type": "str"}, "image": {"key": "image", "type": "ThumbnailUrl"}, "media": {"key": "media", "type": "[MediaUrl]"}, "buttons": {"key": "buttons", "type": "[CardAction]"}, "shareable": {"key": "shareable", "type": "bool"}, "autoloop": {"key": "autoloop", "type": "bool"}, "autostart": {"key": "autostart", "type": "bool"}, "aspect": {"key": "aspect", "type": "str"}, "duration": {"key": "duration", "type": "str"}, "value": {"key": "value", "type": "object"}, } def __init__( self, *, title: str = None, subtitle: str = None, text: str = None, image=None, media=None, buttons=None, shareable: bool = None, autoloop: bool = None, autostart: bool = None, aspect: str = None, duration: str = None, value=None, **kwargs ) -> None: super(MediaCard, self).__init__(**kwargs) self.title = title self.subtitle = subtitle self.text = text self.image = image self.media = media self.buttons = buttons self.shareable = shareable self.autoloop = autoloop self.autostart = autostart self.aspect = aspect self.duration = duration self.value = value class MediaEventValue(Model): """Supplementary parameter for media events. :param card_value: Callback parameter specified in the Value field of the MediaCard that originated this event :type card_value: object """ _attribute_map = {"card_value": {"key": "cardValue", "type": "object"}} def __init__(self, *, card_value=None, **kwargs) -> None: super(MediaEventValue, self).__init__(**kwargs) self.card_value = card_value class MediaUrl(Model): """Media URL. :param url: Url for the media :type url: str :param profile: Optional profile hint to the client to differentiate multiple MediaUrl objects from each other :type profile: str """ _attribute_map = { "url": {"key": "url", "type": "str"}, "profile": {"key": "profile", "type": "str"}, } def __init__(self, *, url: str = None, profile: str = None, **kwargs) -> None: super(MediaUrl, self).__init__(**kwargs) self.url = url self.profile = profile class MessageReaction(Model): """Message reaction object. :param type: Message reaction type. Possible values include: 'like', 'plusOne' :type type: str or ~botframework.connector.models.MessageReactionTypes """ _attribute_map = {"type": {"key": "type", "type": "str"}} def __init__(self, *, type=None, **kwargs) -> None: super(MessageReaction, self).__init__(**kwargs) self.type = type class OAuthCard(Model): """A card representing a request to perform a sign in via OAuth. :param text: Text for signin request :type text: str :param connection_name: The name of the registered connection :type connection_name: str :param buttons: Action to use to perform signin :type buttons: list[~botframework.connector.models.CardAction] """ _attribute_map = { "text": {"key": "text", "type": "str"}, "connection_name": {"key": "connectionName", "type": "str"}, "buttons": {"key": "buttons", "type": "[CardAction]"}, "token_exchange_resource": {"key": "tokenExchangeResource", "type": "object"}, } def __init__( self, *, text: str = None, connection_name: str = None, buttons=None, token_exchange_resource=None, **kwargs ) -> None: super(OAuthCard, self).__init__(**kwargs) self.text = text self.connection_name = connection_name self.buttons = buttons self.token_exchange_resource = token_exchange_resource class PagedMembersResult(Model): """Page of members. :param continuation_token: Paging token :type continuation_token: str :param members: The Channel Accounts. :type members: list[~botframework.connector.models.ChannelAccount] """ _attribute_map = { "continuation_token": {"key": "continuationToken", "type": "str"}, "members": {"key": "members", "type": "[ChannelAccount]"}, } def __init__( self, *, continuation_token: str = None, members=None, **kwargs ) -> None: super(PagedMembersResult, self).__init__(**kwargs) self.continuation_token = continuation_token self.members = members class Place(Model): """Place (entity type: "https://schema.org/Place"). :param address: Address of the place (may be `string` or complex object of type `PostalAddress`) :type address: object :param geo: Geo coordinates of the place (may be complex object of type `GeoCoordinates` or `GeoShape`) :type geo: object :param has_map: Map to the place (may be `string` (URL) or complex object of type `Map`) :type has_map: object :param type: The type of the thing :type type: str :param name: The name of the thing :type name: str """ _attribute_map = { "address": {"key": "address", "type": "object"}, "geo": {"key": "geo", "type": "object"}, "has_map": {"key": "hasMap", "type": "object"}, "type": {"key": "type", "type": "str"}, "name": {"key": "name", "type": "str"}, } def __init__( self, *, address=None, geo=None, has_map=None, type: str = None, name: str = None, **kwargs ) -> None: super(Place, self).__init__(**kwargs) self.address = address self.geo = geo self.has_map = has_map self.type = type self.name = name class ReceiptCard(Model): """A receipt card. :param title: Title of the card :type title: str :param facts: Array of Fact objects :type facts: list[~botframework.connector.models.Fact] :param items: Array of Receipt Items :type items: list[~botframework.connector.models.ReceiptItem] :param tap: This action will be activated when user taps on the card :type tap: ~botframework.connector.models.CardAction :param total: Total amount of money paid (or to be paid) :type total: str :param tax: Total amount of tax paid (or to be paid) :type tax: str :param vat: Total amount of VAT paid (or to be paid) :type vat: str :param buttons: Set of actions applicable to the current card :type buttons: list[~botframework.connector.models.CardAction] """ _attribute_map = { "title": {"key": "title", "type": "str"}, "facts": {"key": "facts", "type": "[Fact]"}, "items": {"key": "items", "type": "[ReceiptItem]"}, "tap": {"key": "tap", "type": "CardAction"}, "total": {"key": "total", "type": "str"}, "tax": {"key": "tax", "type": "str"}, "vat": {"key": "vat", "type": "str"}, "buttons": {"key": "buttons", "type": "[CardAction]"}, } def __init__( self, *, title: str = None, facts=None, items=None, tap=None, total: str = None, tax: str = None, vat: str = None, buttons=None, **kwargs ) -> None: super(ReceiptCard, self).__init__(**kwargs) self.title = title self.facts = facts self.items = items self.tap = tap self.total = total self.tax = tax self.vat = vat self.buttons = buttons class ReceiptItem(Model): """An item on a receipt card. :param title: Title of the Card :type title: str :param subtitle: Subtitle appears just below Title field, differs from Title in font styling only :type subtitle: str :param text: Text field appears just below subtitle, differs from Subtitle in font styling only :type text: str :param image: Image :type image: ~botframework.connector.models.CardImage :param price: Amount with currency :type price: str :param quantity: Number of items of given kind :type quantity: str :param tap: This action will be activated when user taps on the Item bubble. :type tap: ~botframework.connector.models.CardAction """ _attribute_map = { "title": {"key": "title", "type": "str"}, "subtitle": {"key": "subtitle", "type": "str"}, "text": {"key": "text", "type": "str"}, "image": {"key": "image", "type": "CardImage"}, "price": {"key": "price", "type": "str"}, "quantity": {"key": "quantity", "type": "str"}, "tap": {"key": "tap", "type": "CardAction"}, } def __init__( self, *, title: str = None, subtitle: str = None, text: str = None, image=None, price: str = None, quantity: str = None, tap=None, **kwargs ) -> None: super(ReceiptItem, self).__init__(**kwargs) self.title = title self.subtitle = subtitle self.text = text self.image = image self.price = price self.quantity = quantity self.tap = tap class SemanticAction(Model): """Represents a reference to a programmatic action. :param id: ID of this action :type id: str :param entities: Entities associated with this action :type entities: dict[str, ~botframework.connector.models.Entity] :param state: State of this action. Allowed values: `start`, `continue`, `done` :type state: str or ~botframework.connector.models.SemanticActionStates """ _attribute_map = { "id": {"key": "id", "type": "str"}, "entities": {"key": "entities", "type": "{Entity}"}, "state": {"key": "state", "type": "str"}, } def __init__(self, *, id: str = None, entities=None, state=None, **kwargs) -> None: super(SemanticAction, self).__init__(**kwargs) self.id = id self.entities = entities self.state = state class SigninCard(Model): """A card representing a request to sign in. :param text: Text for signin request :type text: str :param buttons: Action to use to perform signin :type buttons: list[~botframework.connector.models.CardAction] """ _attribute_map = { "text": {"key": "text", "type": "str"}, "buttons": {"key": "buttons", "type": "[CardAction]"}, } def __init__(self, *, text: str = None, buttons=None, **kwargs) -> None: super(SigninCard, self).__init__(**kwargs) self.text = text self.buttons = buttons class SuggestedActions(Model): """SuggestedActions that can be performed. :param to: Ids of the recipients that the actions should be shown to. These Ids are relative to the channelId and a subset of all recipients of the activity :type to: list[str] :param actions: Actions that can be shown to the user :type actions: list[~botframework.connector.models.CardAction] """ _attribute_map = { "to": {"key": "to", "type": "[str]"}, "actions": {"key": "actions", "type": "[CardAction]"}, } def __init__(self, *, to=None, actions=None, **kwargs) -> None: super(SuggestedActions, self).__init__(**kwargs) self.to = to self.actions = actions class TextHighlight(Model): """Refers to a substring of content within another field. :param text: Defines the snippet of text to highlight :type text: str :param occurrence: Occurrence of the text field within the referenced text, if multiple exist. :type occurrence: int """ _attribute_map = { "text": {"key": "text", "type": "str"}, "occurrence": {"key": "occurrence", "type": "int"}, } def __init__(self, *, text: str = None, occurrence: int = None, **kwargs) -> None: super(TextHighlight, self).__init__(**kwargs) self.text = text self.occurrence = occurrence class Thing(Model): """Thing (entity type: "https://schema.org/Thing"). :param type: The type of the thing :type type: str :param name: The name of the thing :type name: str """ _attribute_map = { "type": {"key": "type", "type": "str"}, "name": {"key": "name", "type": "str"}, } def __init__(self, *, type: str = None, name: str = None, **kwargs) -> None: super(Thing, self).__init__(**kwargs) self.type = type self.name = name class ThumbnailCard(Model): """A thumbnail card (card with a single, small thumbnail image). :param title: Title of the card :type title: str :param subtitle: Subtitle of the card :type subtitle: str :param text: Text for the card :type text: str :param images: Array of images for the card :type images: list[~botframework.connector.models.CardImage] :param buttons: Set of actions applicable to the current card :type buttons: list[~botframework.connector.models.CardAction] :param tap: This action will be activated when user taps on the card itself :type tap: ~botframework.connector.models.CardAction """ _attribute_map = { "title": {"key": "title", "type": "str"}, "subtitle": {"key": "subtitle", "type": "str"}, "text": {"key": "text", "type": "str"}, "images": {"key": "images", "type": "[CardImage]"}, "buttons": {"key": "buttons", "type": "[CardAction]"}, "tap": {"key": "tap", "type": "CardAction"}, } def __init__( self, *, title: str = None, subtitle: str = None, text: str = None, images=None, buttons=None, tap=None, **kwargs ) -> None: super(ThumbnailCard, self).__init__(**kwargs) self.title = title self.subtitle = subtitle self.text = text self.images = images self.buttons = buttons self.tap = tap class ThumbnailUrl(Model): """Thumbnail URL. :param url: URL pointing to the thumbnail to use for media content :type url: str :param alt: HTML alt text to include on this thumbnail image :type alt: str """ _attribute_map = { "url": {"key": "url", "type": "str"}, "alt": {"key": "alt", "type": "str"}, } def __init__(self, *, url: str = None, alt: str = None, **kwargs) -> None: super(ThumbnailUrl, self).__init__(**kwargs) self.url = url self.alt = alt class TokenExchangeInvokeRequest(Model): """TokenExchangeInvokeRequest. :param id: The id from the OAuthCard. :type id: str :param connection_name: The connection name. :type connection_name: str :param token: The user token that can be exchanged. :type token: str :param properties: Extension data for overflow of properties. :type properties: dict[str, object] """ _attribute_map = { "id": {"key": "id", "type": "str"}, "connection_name": {"key": "connectionName", "type": "str"}, "token": {"key": "token", "type": "str"}, "properties": {"key": "properties", "type": "{object}"}, } def __init__( self, *, id: str = None, connection_name: str = None, token: str = None, properties=None, **kwargs ) -> None: super(TokenExchangeInvokeRequest, self).__init__(**kwargs) self.id = id self.connection_name = connection_name self.token = token self.properties = properties class TokenExchangeInvokeResponse(Model): """TokenExchangeInvokeResponse. :param id: The id from the OAuthCard. :type id: str :param connection_name: The connection name. :type connection_name: str :param failure_detail: The details of why the token exchange failed. :type failure_detail: str :param properties: Extension data for overflow of properties. :type properties: dict[str, object] """ _attribute_map = { "id": {"key": "id", "type": "str"}, "connection_name": {"key": "connectionName", "type": "str"}, "failure_detail": {"key": "failureDetail", "type": "str"}, "properties": {"key": "properties", "type": "{object}"}, } def __init__( self, *, id: str = None, connection_name: str = None, failure_detail: str = None, properties=None, **kwargs ) -> None: super(TokenExchangeInvokeResponse, self).__init__(**kwargs) self.id = id self.connection_name = connection_name self.failure_detail = failure_detail self.properties = properties class TokenExchangeState(Model): """TokenExchangeState :param connection_name: The connection name that was used. :type connection_name: str :param conversation: Gets or sets a reference to the conversation. :type conversation: ~botframework.connector.models.ConversationReference :param relates_to: Gets or sets a reference to a related parent conversation for this token exchange. :type relates_to: ~botframework.connector.models.ConversationReference :param bot_ur: The URL of the bot messaging endpoint. :type bot_ur: str :param ms_app_id: The bot's registered application ID. :type ms_app_id: str """ _attribute_map = { "connection_name": {"key": "connectionName", "type": "str"}, "conversation": {"key": "conversation", "type": "ConversationReference"}, "relates_to": {"key": "relatesTo", "type": "ConversationReference"}, "bot_url": {"key": "connectionName", "type": "str"}, "ms_app_id": {"key": "msAppId", "type": "str"}, } def __init__( self, *, connection_name: str = None, conversation=None, relates_to=None, bot_url: str = None, ms_app_id: str = None, **kwargs ) -> None: super(TokenExchangeState, self).__init__(**kwargs) self.connection_name = connection_name self.conversation = conversation self.relates_to = relates_to self.bot_url = bot_url self.ms_app_id = ms_app_id class TokenRequest(Model): """A request to receive a user token. :param provider: The provider to request a user token from :type provider: str :param settings: A collection of settings for the specific provider for this request :type settings: dict[str, object] """ _attribute_map = { "provider": {"key": "provider", "type": "str"}, "settings": {"key": "settings", "type": "{object}"}, } def __init__(self, *, provider: str = None, settings=None, **kwargs) -> None: super(TokenRequest, self).__init__(**kwargs) self.provider = provider self.settings = settings class TokenResponse(Model): """A response that includes a user token. :param connection_name: The connection name :type connection_name: str :param token: The user token :type token: str :param expiration: Expiration for the token, in ISO 8601 format (e.g. "2007-04-05T14:30Z") :type expiration: str :param channel_id: The channelId of the TokenResponse :type channel_id: str """ _attribute_map = { "connection_name": {"key": "connectionName", "type": "str"}, "token": {"key": "token", "type": "str"}, "expiration": {"key": "expiration", "type": "str"}, "channel_id": {"key": "channelId", "type": "str"}, } def __init__( self, *, connection_name: str = None, token: str = None, expiration: str = None, channel_id: str = None, **kwargs ) -> None: super(TokenResponse, self).__init__(**kwargs) self.connection_name = connection_name self.token = token self.expiration = expiration self.channel_id = channel_id class Transcript(Model): """Transcript. :param activities: A collection of Activities that conforms to the Transcript schema. :type activities: list[~botframework.connector.models.Activity] """ _attribute_map = {"activities": {"key": "activities", "type": "[Activity]"}} def __init__(self, *, activities=None, **kwargs) -> None: super(Transcript, self).__init__(**kwargs) self.activities = activities class VideoCard(Model): """Video card. :param title: Title of this card :type title: str :param subtitle: Subtitle of this card :type subtitle: str :param text: Text of this card :type text: str :param image: Thumbnail placeholder :type image: ~botframework.connector.models.ThumbnailUrl :param media: Media URLs for this card. When this field contains more than one URL, each URL is an alternative format of the same content. :type media: list[~botframework.connector.models.MediaUrl] :param buttons: Actions on this card :type buttons: list[~botframework.connector.models.CardAction] :param shareable: This content may be shared with others (default:true) :type shareable: bool :param autoloop: Should the client loop playback at end of content (default:true) :type autoloop: bool :param autostart: Should the client automatically start playback of media in this card (default:true) :type autostart: bool :param aspect: Aspect ratio of thumbnail/media placeholder. Allowed values are "16:9" and "4:3" :type aspect: str :param duration: Describes the length of the media content without requiring a receiver to open the content. Formatted as an ISO 8601 Duration field. :type duration: str :param value: Supplementary parameter for this card :type value: object """ _attribute_map = { "title": {"key": "title", "type": "str"}, "subtitle": {"key": "subtitle", "type": "str"}, "text": {"key": "text", "type": "str"}, "image": {"key": "image", "type": "ThumbnailUrl"}, "media": {"key": "media", "type": "[MediaUrl]"}, "buttons": {"key": "buttons", "type": "[CardAction]"}, "shareable": {"key": "shareable", "type": "bool"}, "autoloop": {"key": "autoloop", "type": "bool"}, "autostart": {"key": "autostart", "type": "bool"}, "aspect": {"key": "aspect", "type": "str"}, "duration": {"key": "duration", "type": "str"}, "value": {"key": "value", "type": "object"}, } def __init__( self, *, title: str = None, subtitle: str = None, text: str = None, image=None, media=None, buttons=None, shareable: bool = None, autoloop: bool = None, autostart: bool = None, aspect: str = None, duration: str = None, value=None, **kwargs ) -> None: super(VideoCard, self).__init__(**kwargs) self.title = title self.subtitle = subtitle self.text = text self.image = image self.media = media self.buttons = buttons self.shareable = shareable self.autoloop = autoloop self.autostart = autostart self.aspect = aspect self.duration = duration self.value = value
python
from frappe import _ #def get_data(): # return { # 'heatmap': True, # 'heatmap_message': _('This is based on the attendance of this Student'), # 'fieldname': 'cargo', # 'transactions': [ # { # 'label': _('Gate1'), # 'items': ['Gate1'] # }, # { # 'label': _('Student Activity'), # 'items': ['Gate2' ] # } # ] # }
python
# stdlib from typing import Any from typing import Optional # third party from google.protobuf.reflection import GeneratedProtocolMessageType # syft absolute from syft import deserialize from syft import serialize # relative from .. import python as py from ...core.common.serde.serializable import bind_protobuf from ...core.common.uid import UID from ...logger import traceback_and_raise from ...proto.lib.python.iterator_pb2 import Iterator as Iterator_PB from .primitive_factory import PrimitiveFactory from .primitive_interface import PyPrimitive from .types import SyPrimitiveRet @bind_protobuf class Iterator(PyPrimitive): def __init__(self, _ref: Any, max_len: Optional[int] = None): super().__init__() self._obj_ref = _ref self._index = 0 self._id = UID() self.max_len = max_len self.exhausted = False def __iter__(self) -> "Iterator": return self def __len__(self) -> int: try: return len(self._obj_ref) except Exception as e: traceback_and_raise(e) def __reduce__(self) -> Any: # see these tests: test_valuesiterator_pickling and test_iterator_pickling raise TypeError(f"Pickling {type(self)} is not supported.") def __eq__(self, other: Any) -> SyPrimitiveRet: if hasattr(other, "_obj_ref"): res = self._obj_ref == other._obj_ref else: res = self._obj_ref == other return PrimitiveFactory.generate_primitive(value=res) def __next__(self) -> Any: # we need to do lots of getattr / setattr because some times the __next__ # method gets called with a generator try: if hasattr(self, "_obj_ref"): _obj_ref = self._obj_ref else: # we got handed a generator directly into __next__ # happens in test_reversed_iterator _obj_ref = self # max_len == None means the _ref could update while iterating. While that # shouldn't happen with a IteratorPointer, it can happen on a local Iterator. # If thats the case we just calculate it each time. Importantly we need to # still set exhausted otherwise the test case in list_test.py wont pass. max_len = None if hasattr(self, "max_len"): max_len = self.max_len if max_len is None: try: if hasattr(_obj_ref, "__len__"): max_len = _obj_ref.__len__() except AttributeError: # I am not sure why this happens on some types pass exhausted = getattr(self, "exhausted", False) self_index = getattr(self, "_index", 0) if (max_len is not None and self_index >= max_len) or exhausted: setattr(self, "exhausted", True) raise StopIteration try: if hasattr(_obj_ref, "__next__"): try: obj = next(_obj_ref) except Exception as e: if type(e) is StopIteration: raise e if type(e) is AttributeError: # no _mapping exhausted? raise StopIteration() if type(e) is NameError: # free after use? raise StopIteration() # test_dictitems_contains_use_after_free wants us to StopIteration # test_merge_and_mutate and test_mutating_iteration wants us to # raise a RuntimeError # see: # def test_dictitems_contains_use_after_free(self): # Lets RuntimeError for now raise RuntimeError elif hasattr(_obj_ref, "__getitem__") and hasattr(self, "_index"): obj = _obj_ref[self._index] elif hasattr(_obj_ref, "__iter__"): # collections.abc.* KeysView, ValuesView, ItemsView end up here # they do not have __next__ or __getitem__ but they do have __iter__ # so we can just replace our self._obj_ref and keep going setattr(self, "_obj_ref", _obj_ref.__iter__()) # obj = next(self._obj_ref) # just call self.__next__() instead return self.__next__() else: raise ValueError("Can't iterate through given object.") except StopIteration as e: setattr(self, "exhausted", True) raise e if hasattr(self, "_index"): self._index += 1 return obj except Exception as e: raise e def upcast(self) -> Any: return iter(self._obj_ref) # TODO: Fix based on message from Tudor Cebere # So, when we add a new builtin type we want to have feature parity with cython ones. # When we tried to do this for iterators in the early days we had some problems when the iterators are infinite # (most likely an iterator from a generator). This pattern is common in functional programming, when you use # infinite iterators for different purposes. I then said that it makes sense to force the user to exhaust the # iterator himself and then to serde the type. Here, it might be a bit problematic because somebody might slip # in this kind of iterator and when we exhaust it (through list conversion), we go into infinite computation. # And there are similar edge cases to this. def _object2proto(self) -> Iterator_PB: id_ = serialize(obj=self._id) obj_ref_ = serialize(py.list.List(list(self._obj_ref)), to_bytes=True) index_ = self._index max_len_ = self.max_len exhausted_ = self.exhausted return Iterator_PB( id=id_, obj_ref=obj_ref_, index=index_, max_len=max_len_, exhausted=exhausted_, ) @staticmethod def _proto2object(proto: Iterator_PB) -> "Iterator": id_: UID = deserialize(blob=proto.id) obj_ref_ = deserialize(blob=proto.obj_ref, from_bytes=True) index_ = proto.index max_len_ = proto.max_len exhausted_ = proto.exhausted new_iter = Iterator(_ref=obj_ref_, max_len=max_len_) new_iter._index = index_ new_iter.exhausted = exhausted_ new_iter._id = id_ return new_iter @staticmethod def get_protobuf_schema() -> GeneratedProtocolMessageType: return Iterator_PB
python
from distribute_setup import use_setuptools use_setuptools() from setuptools import setup, find_packages, Extension setup(name='Mumoro', version='0.0.2a', author= 'Tristram Graebener', author_email = '[email protected]', url = 'http://github.com/Tristramg/mumoro/', description = 'Multimodal and multiobjective routing', license = 'GPLv3', packages = ['lib', 'lib.core', 'web'], install_requires = ['cherrypy', 'genshi', 'simplejson', 'transitfeed', 'setuptools-git', 'osm4routing', "iso8601"], py_modules = ['server', 'data_import', 'web', 'lib'], #ext_modules = [ #Extension("lib.core._mumoro", #sources=["lib/core/graph_wrapper.cpp", #"lib/core/path_algo.cpp", #"lib/core/reglc_graph.cpp", #"lib/core/duration.cpp", #"lib/core/nodes_filter.cpp", #"lib/core/muparo.cpp", #"lib/core/mumoro.i"], #swig_opts=['-c++'], #include_dirs=['lib/core/'], #libraries = ["boost_serialization"]) #], entry_points = { 'console_scripts': ['mumoro_import_data = data_import:main', 'mumoro_server = server:main'], } )
python
def Wakeup(): return require('wakeup')
python
# Copyright (C) 2010 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the Google name nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import os import subprocess from webkitpy.layout_tests.models.test_configuration import TestConfiguration from webkitpy.port.base import Port from webkitpy.port.pulseaudio_sanitizer import PulseAudioSanitizer from webkitpy.port.xvfbdriver import XvfbDriver class GtkPort(Port): port_name = "gtk" def __init__(self, *args, **kwargs): super(GtkPort, self).__init__(*args, **kwargs) self._pulseaudio_sanitizer = PulseAudioSanitizer() def warn_if_bug_missing_in_test_expectations(self): return not self.get_option('webkit_test_runner') def _port_flag_for_scripts(self): return "--gtk" def _driver_class(self): return XvfbDriver def default_timeout_ms(self): if self.get_option('configuration') == 'Debug': return 12 * 1000 return 6 * 1000 def setup_test_run(self): super(GtkPort, self).setup_test_run() self._pulseaudio_sanitizer.unload_pulseaudio_module() def clean_up_test_run(self): super(GtkPort, self).clean_up_test_run() self._pulseaudio_sanitizer.restore_pulseaudio_module() def setup_environ_for_server(self, server_name=None): environment = super(GtkPort, self).setup_environ_for_server(server_name) environment['GTK_MODULES'] = 'gail' environment['GSETTINGS_BACKEND'] = 'memory' environment['LIBOVERLAY_SCROLLBAR'] = '0' environment['TEST_RUNNER_INJECTED_BUNDLE_FILENAME'] = self._build_path('Libraries', 'libTestRunnerInjectedBundle.la') environment['TEST_RUNNER_TEST_PLUGIN_PATH'] = self._build_path('TestNetscapePlugin', '.libs') environment['WEBKIT_INSPECTOR_PATH'] = self._build_path('Programs', 'resources', 'inspector') environment['AUDIO_RESOURCES_PATH'] = self.path_from_webkit_base('Source', 'WebCore', 'platform', 'audio', 'resources') self._copy_value_from_environ_if_set(environment, 'WEBKIT_OUTPUTDIR') return environment def _generate_all_test_configurations(self): configurations = [] for build_type in self.ALL_BUILD_TYPES: configurations.append(TestConfiguration(version=self._version, architecture='x86', build_type=build_type)) return configurations def _path_to_driver(self): return self._build_path('Programs', self.driver_name()) def _path_to_image_diff(self): return self._build_path('Programs', 'ImageDiff') def _path_to_webcore_library(self): gtk_library_names = [ "libwebkitgtk-1.0.so", "libwebkitgtk-3.0.so", "libwebkit2gtk-1.0.so", ] for library in gtk_library_names: full_library = self._build_path(".libs", library) if self._filesystem.isfile(full_library): return full_library return None def _search_paths(self): search_paths = [] if self.get_option('webkit_test_runner'): search_paths.extend([self.port_name + '-wk2', 'wk2']) else: search_paths.append(self.port_name + '-wk1') search_paths.append(self.port_name) search_paths.extend(self.get_option("additional_platform_directory", [])) return search_paths def default_baseline_search_path(self): return map(self._webkit_baseline_path, self._search_paths()) def _port_specific_expectations_files(self): return [self._filesystem.join(self._webkit_baseline_path(p), 'TestExpectations') for p in reversed(self._search_paths())] # FIXME: We should find a way to share this implmentation with Gtk, # or teach run-launcher how to call run-safari and move this down to Port. def show_results_html_file(self, results_filename): run_launcher_args = ["file://%s" % results_filename] if self.get_option('webkit_test_runner'): run_launcher_args.append('-2') # FIXME: old-run-webkit-tests also added ["-graphicssystem", "raster", "-style", "windows"] # FIXME: old-run-webkit-tests converted results_filename path for cygwin. self._run_script("run-launcher", run_launcher_args) def check_sys_deps(self, needs_http): return super(GtkPort, self).check_sys_deps(needs_http) and XvfbDriver.check_xvfb(self) def _get_gdb_output(self, coredump_path): cmd = ['gdb', '-ex', 'thread apply all bt 1024', '--batch', str(self._path_to_driver()), coredump_path] proc = subprocess.Popen(cmd, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = proc.communicate() errors = [l.strip().decode('utf8', 'ignore') for l in stderr.splitlines()] return (stdout.decode('utf8', 'ignore'), errors) def _get_crash_log(self, name, pid, stdout, stderr, newer_than): pid_representation = str(pid or '<unknown>') log_directory = os.environ.get("WEBKIT_CORE_DUMPS_DIRECTORY") errors = [] crash_log = '' expected_crash_dump_filename = "core-pid_%s-_-process_%s" % (pid_representation, name) def match_filename(filesystem, directory, filename): if pid: return filename == expected_crash_dump_filename return filename.find(name) > -1 if log_directory: dumps = self._filesystem.files_under(log_directory, file_filter=match_filename) if dumps: # Get the most recent coredump matching the pid and/or process name. coredump_path = list(reversed(sorted(dumps)))[0] if not newer_than or self._filesystem.mtime(coredump_path) > newer_than: crash_log, errors = self._get_gdb_output(coredump_path) stderr_lines = errors + (stderr or '<empty>').decode('utf8', 'ignore').splitlines() errors_str = '\n'.join(('STDERR: ' + l) for l in stderr_lines) if not crash_log: if not log_directory: log_directory = "/path/to/coredumps" core_pattern = os.path.join(log_directory, "core-pid_%p-_-process_%e") crash_log = """\ Coredump %(expected_crash_dump_filename)s not found. To enable crash logs: - run this command as super-user: echo "%(core_pattern)s" > /proc/sys/kernel/core_pattern - enable core dumps: ulimit -c unlimited - set the WEBKIT_CORE_DUMPS_DIRECTORY environment variable: export WEBKIT_CORE_DUMPS_DIRECTORY=%(log_directory)s """ % locals() return (stderr, """\ Crash log for %(name)s (pid %(pid_representation)s): %(crash_log)s %(errors_str)s""" % locals())
python
import sys, os, re, traceback from PIL import Image from skimage.io import imread, imsave from resizeimage import resizeimage cwd = os.getcwd() rootDir = cwd + '/imagenes' for file_name in os.listdir(rootDir): folderDir = rootDir + '/' + file_name if (os.path.isdir(folderDir)): fileImages = os.listdir(folderDir) for fImage in fileImages: # para cada imagen if os.path.splitext(fImage)[1] == '.jpg': nameFileDir = folderDir + '/' + fImage # redimensiono la imagen a 256x256 print(nameFileDir) with open(nameFileDir, 'r+b') as f: with Image.open(f) as image: cover = resizeimage.resize_cover(image, [256, 256]) cover.save(nameFileDir, image.format) else: with open(folderDir, 'r+b') as f: with Image.open(f) as image: cover = resizeimage.resize_cover(image, [256, 256]) cover.save(folderDir, image.format)
python
import os import webbrowser from tkinter import * from tkinter import filedialog import win32com.client import winshell from PIL import Image from PyInstaller.utils.hooks import collect_data_files from tkinterdnd2 import * datas = collect_data_files('tkinterdnd2') iconPath = r"%systemroot%\system32\imageres.dll" IconName = "" def fix_path(datapath): fixedpath = datapath.replace("\\", "\\\\") return fixedpath #6278641112207629 def generate_label(text1): lb.insert("end", text1) def add_via_dnd(event): global x global BatText x += 1 text = fix_path(event.data) text2 = text.replace("{", "") text3 = text2.replace("}", "") BatText = BatText + "start \"\" \"" + text3 + "\"\n " lb.insert("end", text3) def browseFiles(): filename = filedialog.askopenfilename(initialdir="/", title="Select a File", filetypes=(("Alle Dateien", "*.*"), ("Apps", "*.exe*"), ("Fotos", "*.png"))) global x global BatText x += 1 BatText = BatText + "start \"\" \"" + fix_path(filename) + "\"\n " if len(filename) > 1: generate_label(filename) def browsePng(): filename = fix_path(filedialog.askopenfilename(initialdir="/", title="Foto auswählen:", filetypes=(("photos", ('.png', '.jpg', '.ico')), ("all files", "*.*")))) global iconPath print(filename) if ".png" in filename: img = Image.open(fix_path(filename)) img.save(filename.replace(".png", ".ico")) iconPath = filename.replace(".png", ".ico") elif ".jpg" in filename: img1 = Image.open(fix_path(filename)) img1.save(filename.replace(".jpg", ".ico")) iconPath = filename.replace(".jpg", ".ico") else: iconPath = filename print(iconPath) def callback(event): webbrowser.open_new("https://www.buymeacoffee.com/prayz208") def getShortcutName(): global IconName global Entry_Name if len(Entry_Name.get()) < 1: return "Neue_Verknüpfung" else: return Entry_Name.get() def end_file(): global iconPath if len(iconPath) < 3: iconPath = r"%systemroot%\system32\imageres.dll" path_to_batFile = f"{newpath}\\{getShortcutName()}.bat" myBat = open(path_to_batFile, 'w+') myBat.writelines(BatText) myBat.close() shell = win32com.client.Dispatch("WScript.Shell") shortcut = shell.CreateShortcut(os.path.join(winshell.desktop(), f'{getShortcutName()}.lnk')) shortcut.TargetPath = path_to_batFile shortcut.IconLocation = fix_path(iconPath) shortcut.save() root.destroy() root = Tk() Bottom_Frame = Frame(root, height=100, width=300) Bottom_Frame.grid(row=1, column=0, sticky=S) Bottom_Frame.rowconfigure(0, weight=1) Bottom_Frame.columnconfigure(0, weight=1) Label(Bottom_Frame, text="Shortcut Name:").grid(row=0, column=0, columnspan=2, ) Entry_Name = Entry(Bottom_Frame, width=40) Entry_Name.grid(row=1, column=0, columnspan=2, sticky=S) x = 0 BatText = "@echo off\n" user = os.path.expanduser('~') newpath = os.path.expanduser('~') + "\\" + "Shortcut" if not os.path.exists(newpath): os.makedirs(newpath) root.title('Shortcut Maker') root.resizable(width=False, height=True) root.geometry("400x350") Top_Frame = Frame(root, height=400, width=400) Top_Frame.grid(row=0, column=0) Top_Frame.drop_target_register(DND_FILES) Top_Frame.dnd_bind('<<Drop>>', add_via_dnd) root.rowconfigure(0, weight=1) root.columnconfigure(1, weight=1) lb = Listbox(Top_Frame, width=69, height=70, bd=0, selectmode=SINGLE, justify=LEFT, bg="#c4c4c4") lb.grid(row=0, column=0, sticky=N) add_Path = Button(Bottom_Frame, text="Dateipfad hinzufügen", command=browseFiles) add_Path.grid(row=3, column=1, sticky=S, ipadx=5) add_icon = Button(Bottom_Frame, text="Symbol hinzufügen", command=browsePng) add_icon.grid(row=3, column=0, ipadx=5) Done_file = Button(Bottom_Frame, text="Verknüpfung erstellen", command=end_file) Done_file.grid(row=4, column=0, columnspan=2) lbl1 = Label(Bottom_Frame, text=r"Spende", fg="#0e526c", height=0, cursor="hand2") lbl1.grid(row=5, column=0, columnspan=1) lbl1.lower() Label(Bottom_Frame, text="©2021 Beta 1.1", fg="#0e526c").grid(row=5, column=1) Label(Bottom_Frame, text="Drücken sie auf \"Dateipfad hinzufügen\", \noder ziehen sie eine Datei auf das graue " "Feld", fg="#4f4f4f").grid(row=2, column=0, sticky=N, columnspan=2) lbl1.bind("<Button-1>", callback) root.mainloop()
python
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Tue Apr 2 11:16:51 2019 @author: Kazuki """ import numpy as np import pandas as pd from tqdm import tqdm from sklearn.preprocessing import KBinsDiscretizer import utils PREF = 'f006' est = KBinsDiscretizer(n_bins=100, encode='ordinal', strategy='uniform') def fe(df): feature = pd.DataFrame(index=df.index) df = pd.DataFrame(est.fit_transform(df), columns=df.columns) for c in tqdm(df.columns): di = df[c].value_counts().sort_index().diff().to_dict() feature[f'{PREF}_{c}'] = df[c].map(di) # for i in [3,2,1]: # for c in tqdm(df.columns): # di = df[c].round(i).value_counts().to_dict() # feature[f'{PREF}_{c}_r{i}'] = df[c].round(i).map(di) feature.iloc[:200000].to_pickle(f'../data/train_{PREF}.pkl') feature.iloc[200000:].reset_index(drop=True).to_pickle(f'../data/test_{PREF}.pkl') return # ============================================================================= # main # ============================================================================= if __name__ == "__main__": utils.start(__file__) tr = utils.load_train().drop(['ID_code', 'target'], axis=1) te = utils.load_test().drop(['ID_code'], axis=1) te.drop(np.load('../data/fake_index.npy'), inplace=True) trte = pd.concat([tr, te], ignore_index=True)[tr.columns] fe(trte) utils.end(__file__)
python
# Copyright (c) Chris Choy ([email protected]). All Rights Reserved. # # Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural # Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part of # the code. from enum import Enum import torch from MinkowskiEngine import MinkowskiNetwork class NetworkType(Enum): """ Classification or segmentation. """ SEGMENTATION = 0, 'SEGMENTATION', CLASSIFICATION = 1, 'CLASSIFICATION' def __new__(cls, value, name): member = object.__new__(cls) member._value_ = value member.fullname = name return member def __int__(self): return self.value class Model(MinkowskiNetwork): """ Base network for all sparse convnet By default, all networks are segmentation networks. """ OUT_PIXEL_DIST = -1 NETWORK_TYPE = NetworkType.SEGMENTATION def __init__(self, in_channels, out_channels, config, D, **kwargs): super(Model, self).__init__(D) self.in_channels = in_channels self.out_channels = out_channels self.config = config def permute_label(self, label, max_label): if not isinstance(self.OUT_PIXEL_DIST, (list, tuple)): assert self.OUT_PIXEL_DIST > 0, "OUT_PIXEL_DIST not set" return super(Model, self).permute_label(label, max_label, self.OUT_PIXEL_DIST)
python
from fractions import Fraction import io import importlib import time import json from flask import (Flask, Response, render_template, send_file, request ,jsonify) from flask_bootstrap import Bootstrap from flask_httpauth import HTTPBasicAuth from flask_socketio import SocketIO from werkzeug.security import check_password_hash, generate_password_hash import logging FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' logging.basicConfig(format=FORMAT,level=logging.INFO) #from werkzeug.serving import WSGIRequestHandler from picamera_collector import camerapi from picamera_collector import ring_buffer from picamera_collector import config cf = config.Configuration() plugins = cf.config_data['plugins'] plugins_modules = [importlib.import_module(p) for p in plugins] camera = camerapi.Camera() app = Flask(__name__) app.config['SECRET_KEY'] = cf.config_data['flask']['secret'] app.config['TEMPLATES_AUTO_RELOAD'] = True sio = SocketIO(app) # simple security auth = HTTPBasicAuth() users = {k:generate_password_hash(v) for (k,v) in cf.config_data['users'].items()} @auth.verify_password def verify_password(username, password): if username in users and \ check_password_hash(users.get(username), password): return username # ring buffer for images rb =ring_buffer.RingBuffer(20) Bootstrap(app) def to_lookup(ll): " create drop down lookups" return [ {'name':x} for x in ll] @app.route('/') @auth.login_required def index(): global camera methodList=to_lookup(cf.config_data['methodList']) modeList=to_lookup(cf.config_data['modeList']) isoList=to_lookup(cf.config_data['isoList']) resolutionList=to_lookup(cf.config_data['resolution']) jpegqualityList=to_lookup(cf.config_data['jpegquality']) return render_template('index.html', methodList=methodList, modeList=modeList, isoList=isoList, resolutionList=resolutionList, jpegqualityList=jpegqualityList, cMethod=camera.method, cResolution=camera.resolution, cMode=camera.exposure_mode, cISO=camera.iso, cJPEG=camera.jpegquality, cShutterSpeed=camera.shutter_speed ) def takevideo(): video_buffer=camera.take_video(10) if bsm: bsm.add_job((time.time(),0,video_buffer,'h264')) return 0 class CustomJsonEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, Fraction): return str(obj) return super(CustomJsonEncoder, self).default(obj) def takepicture(single_picture,ts_sensor): global camera,rb epoch_time = int(time.time()*1000) if (camera.cf['numberimages']==1) or single_picture: app.logger.info('taking a single pictue') image,info=camera.take_still_picture() images = [image] else: app.logger.info('taking series of pictures') images,info = camera.take_picture_series() ts_server = round(time.time() * 1000) info['delay']=ts_server - ts_sensor app.logger.info('time delay trigger to end %d',ts_server - ts_sensor) for image in images: last_image = rb.add_to_buffer(image) if bsm: [bsm.add_job((ts_sensor,x,images[x],'jpg')) for x in range(len(images))] bsm.add_job((ts_sensor,0,json.dumps(info,cls=CustomJsonEncoder).encode(),'json')) return rb.get_state() @app.route('/api/v1/resources/takepicture', methods=['GET']) @auth.login_required def api_start(): app.logger.info('takepicture') global camera camera_args = request.args.to_dict() camera.change_mode_if_required(camera_args) if camera.method == 'picture': last=takepicture(True,round(time.time() * 1000)) else: last=takevideo() return jsonify(last) @app.route("/api/v1/resources/takesend") #@auth.login_required def takesend(): global camera camera.change_mode_if_required(None) ts_sensor = int(request.args.get('ts')) ts_server = round(time.time() * 1000) app.logger.info('time delay trigger to camera %d',ts_server - ts_sensor) app.logger.info('camera method %s',camera.method) if camera.method == 'picture': last = takepicture(False,round(time.time() * 1000)) ts_server = round(time.time() * 1000) app.logger.info('time delay trigger to end sequence %d',ts_server - ts_sensor) else: last = takevideo() return jsonify({'image index': str(last)}) @sio.event def takephoto(ts_sensor): global camera camera.change_mode_if_required(None) ts_server = round(time.time() * 1000) app.logger.info('time delay trigger to camera %d',ts_server - ts_sensor) app.logger.info('camera method %s',camera.method) if camera.method == 'picture': last = takepicture(False,ts_sensor) else: last = takevideo() @app.route('/api/v1/resources/saveconfig', methods=['GET']) @auth.login_required def api_saveconfig(): global camera camera_args = request.args.to_dict() camera.change_mode_if_required(camera_args) camera.save_camera_config(camera_args) return("config saved") @app.route('/images/<int:pid>', methods=['GET']) def image_frombuff(pid): global rb frame=rb.get(pid) return send_file(io.BytesIO(frame), attachment_filename=str(pid)+'.jpg', mimetype='image/jpg', cache_timeout=-1) @app.route('/api/v1/resources/lastpicture', methods=['GET']) @auth.login_required def api_lastpicturea(): global rb return jsonify(rb.get_state()) @app.route('/video_feed') @auth.login_required def video_feed(): global camera app.logger.info('video_feed') return Response(camerapi.Camera.gen(camera), mimetype='multipart/x-mixed-replace; boundary=frame') @sio.event def connect(sid): app.logger.info('connect %s', sid) @sio.event def disconnect(): app.logger.info('disconnect ') if __name__ == '__main__': plugins_instances = [p.PluginModule() for p in plugins_modules] bsm = None for p in plugins_instances: p.activate(app) if hasattr(p, "add_job"): bsm = p #WSGIRequestHandler.protocol_version = "HTTP/1.1" #app.run('0.0.0.0', threaded=True, debug=False, use_reloader=False) sio.run(app, host='0.0.0.0', port=5000, debug=False, use_reloader=False)
python
# vim:ts=4:sts=4:sw=4:expandtab from StringIO import StringIO from satori.ars.thrift import ThriftWriter from satori.core.export import generate_interface import satori.core.models ars_interface = generate_interface() writer = ThriftWriter() idl_io = StringIO() writer.write_to(ars_interface, idl_io) thrift_idl = idl_io.getvalue() del writer del idl_io
python
############################################################################## # # Copyright (c) 2002 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## from guillotina.schema._bootstrapfields import Bool from guillotina.schema._bootstrapfields import Field from guillotina.schema._bootstrapfields import Int from guillotina.schema._bootstrapfields import Text from guillotina.schema._bootstrapfields import TextLine from guillotina.schema._bootstrapinterfaces import IContextAwareDefaultFactory from guillotina.schema._bootstrapinterfaces import IFromUnicode from guillotina.schema._messageid import _ from zope.interface import Attribute from zope.interface import Interface from zope.interface.common.mapping import IEnumerableMapping __docformat__ = "reStructuredText" # pep 8 friendlyness IFromUnicode, IContextAwareDefaultFactory class IField(Interface): """Basic Schema Field Interface. Fields are used for Interface specifications. They at least provide a title, description and a default value. You can also specify if they are required and/or readonly. The Field Interface is also used for validation and specifying constraints. We want to make it possible for a IField to not only work on its value but also on the object this value is bound to. This enables a Field implementation to perform validation against an object which also marks a certain place. Note that many fields need information about the object containing a field. For example, when validating a value to be set as an object attribute, it may be necessary for the field to introspect the object's state. This means that the field needs to have access to the object when performing validation:: bound = field.bind(object) bound.validate(value) """ def bind(object): """Return a copy of this field which is bound to context. The copy of the Field will have the 'context' attribute set to 'object'. This way a Field can implement more complex checks involving the object's location/environment. Many fields don't need to be bound. Only fields that condition validation or properties on an object containing the field need to be bound. """ title = TextLine(title=_("Title"), description=_("A short summary or label"), default="", required=False) description = Text( title=_("Description"), description=_("A description of the field"), default="", required=False ) required = Bool( title=_("Required"), description=(_("Tells whether a field requires its value to exist.")), default=False, ) readonly = Bool( title=_("Read Only"), description=_("If true, the field's value cannot be changed."), required=False, default=False, ) default = Field( title=_("Default Value"), description=_( """The field default value may be None or a legal field value""" ), ) missing_value = Field( title=_("Missing Value"), description=_( """If input for this Field is missing, and that's ok, then this is the value to use""" ), ) order = Int( title=_("Field Order"), description=_( """ The order attribute can be used to determine the order in which fields in a schema were defined. If one field is created after another (in the same thread), its order will be greater. (Fields in separate threads could have the same order.) """ ), required=True, readonly=True, ) def constraint(value): """Check a customized constraint on the value. You can implement this method with your Field to require a certain constraint. This relaxes the need to inherit/subclass a Field you to add a simple constraint. Returns true if the given value is within the Field's constraint. """ def validate(value): """Validate that the given value is a valid field value. Returns nothing but raises an error if the value is invalid. It checks everything specific to a Field and also checks with the additional constraint. """ def get(object): """Get the value of the field for the given object.""" def query(object, default=None): """Query the value of the field for the given object. Return the default if the value hasn't been set. """ def set(object, value): """Set the value of the field for the object Raises a type error if the field is a read-only field. """ class IIterable(IField): """Fields with a value that can be iterated over. The value needs to support iteration; the implementation mechanism is not constrained. (Either `__iter__()` or `__getitem__()` may be used.) """ class IContainer(IField): """Fields whose value allows an ``x in value`` check. The value needs to support the `in` operator, but is not constrained in how it does so (whether it defines `__contains__()` or `__getitem__()` is immaterial). """ class IOrderable(IField): """Field requiring its value to be orderable. The set of value needs support a complete ordering; the implementation mechanism is not constrained. Either `__cmp__()` or 'rich comparison' methods may be used. """ class ILen(IField): """A Field requiring its value to have a length. The value needs to have a conventional __len__ method. """ class IMinMax(IOrderable): """Field requiring its value to be between min and max. This implies that the value needs to support the IOrderable interface. """ min = Field(title=_("Start of the range"), required=False, default=None) max = Field(title=_("End of the range (including the value itself)"), required=False, default=None) class IMinMaxLen(ILen): """Field requiring the length of its value to be within a range""" min_length = Int( title=_("Minimum length"), description=_( """ Value after whitespace processing cannot have less than `min_length` characters (if a string type) or elements (if another sequence type). If `min_length` is ``None``, there is no minimum. """ ), required=False, min=0, # needs to be a positive number default=0, ) max_length = Int( title=_("Maximum length"), description=_( """ Value after whitespace processing cannot have greater or equal than `max_length` characters (if a string type) or elements (if another sequence type). If `max_length` is ``None``, there is no maximum.""" ), required=False, min=0, # needs to be a positive number default=None, ) class IInterfaceField(IField): """Fields with a value that is an interface (implementing zope.interface.Interface).""" class IBool(IField): """Boolean Field.""" default = Bool( title=_("Default Value"), description=_( """The field default value may be None or a legal field value""" ), ) class IBytes(IMinMaxLen, IIterable, IField): """Field containing a byte string (like the python str). The value might be constrained to be with length limits. """ class IText(IMinMaxLen, IIterable, IField): """Field containing a unicode string.""" INativeString = IText class IASCII(INativeString): """Field containing a 7-bit ASCII string. No characters > DEL (chr(127)) are allowed The value might be constrained to be with length limits. """ class IBytesLine(IBytes): """Field containing a byte string without newlines.""" class IASCIILine(IASCII): """Field containing a 7-bit ASCII string without newlines.""" class ISourceText(IText): """Field for source text of object.""" class ITextLine(IText): """Field containing a unicode string without newlines.""" INativeStringLine = ITextLine class IPassword(ITextLine): "Field containing a unicode string without newlines that is a password." class IInt(IMinMax, IField): """Field containing an Integer Value.""" min = Int(title=_("Start of the range"), required=False, default=None) max = Int(title=_("End of the range (including the value itself)"), required=False, default=None) default = Int( title=_("Default Value"), description=_( """The field default value may be None or a legal field value""" ), ) class IFloat(IMinMax, IField): """Field containing a Float.""" class IDecimal(IMinMax, IField): """Field containing a Decimal.""" class IDatetime(IMinMax, IField): """Field containing a DateTime.""" class IDate(IMinMax, IField): """Field containing a date.""" class ITimedelta(IMinMax, IField): """Field containing a timedelta.""" class ITime(IMinMax, IField): """Field containing a time.""" def _is_field(value): if not IField.providedBy(value): return False return True def _fields(values): for value in values: if not _is_field(value): return False return True class IURI(INativeStringLine): """A field containing an absolute URI """ class IId(INativeStringLine): """A field containing a unique identifier A unique identifier is either an absolute URI or a dotted name. If it's a dotted name, it should have a module/package name as a prefix. """ class IDottedName(INativeStringLine): """Dotted name field. Values of DottedName fields must be Python-style dotted names. """ min_dots = Int(title=_("Minimum number of dots"), required=True, min=0, default=0) max_dots = Int( title=_("Maximum number of dots (should not be less than min_dots)"), required=False, default=None ) class IChoice(IField): """Field whose value is contained in a predefined set Only one, values or vocabulary, may be specified for a given choice. """ vocabulary = Field( title=_("Vocabulary or source providing values"), description=_( "The ISource, IContextSourceBinder or IBaseVocabulary " "object that provides values for this field." ), required=False, default=None, ) vocabularyName = TextLine( title=_("Vocabulary name"), description=_("Vocabulary name to lookup in the vocabulary registry"), required=False, default=None, ) # Collections: # Abstract class ICollection(IMinMaxLen, IIterable, IContainer): """Abstract interface containing a collection value. The Value must be iterable and may have a min_length/max_length. """ value_type = Field( title=_("Value Type"), description=_("Field value items must conform to the given type, " "expressed via a Field."), ) unique = Bool( title=_("Unique Members"), description=_("Specifies whether the members of the collection " "must be unique."), default=False, ) class ISequence(ICollection): """Abstract interface specifying that the value is ordered""" class IUnorderedCollection(ICollection): """Abstract interface specifying that the value cannot be ordered""" class IAbstractSet(IUnorderedCollection): """An unordered collection of unique values.""" unique = Attribute("This ICollection interface attribute must be True") class IAbstractBag(IUnorderedCollection): """An unordered collection of values, with no limitations on whether members are unique""" unique = Attribute("This ICollection interface attribute must be False") # Concrete class ITuple(ISequence): """Field containing a value that implements the API of a conventional Python tuple.""" class IList(ISequence): """Field containing a value that implements the API of a conventional Python list.""" class ISet(IAbstractSet): """Field containing a value that implements the API of a Python2.4+ set. """ class IFrozenSet(IAbstractSet): """Field containing a value that implements the API of a conventional Python 2.4+ frozenset.""" # (end Collections) class IObject(IField): """Field containing an Object value.""" schema = Attribute("schema", _("The Interface that defines the Fields comprising the Object.")) class IDict(IMinMaxLen, IIterable, IContainer): """Field containing a conventional dict. The key_type and value_type fields allow specification of restrictions for keys and values contained in the dict. """ key_type = Attribute("key_type", _("Field keys must conform to the given type, expressed via a Field.")) value_type = Attribute( "value_type", _("Field values must conform to the given type, expressed " "via a Field.") ) class ITerm(Interface): """Object representing a single value in a vocabulary.""" value = Attribute("value", "The value used to represent vocabulary term in a field.") class ITokenizedTerm(ITerm): """Object representing a single value in a tokenized vocabulary. """ # Should be a ``guillotina.schema.ASCIILine``, but `ASCIILine` is not a bootstrap # field. token = Attribute( "token", """Token which can be used to represent the value on a stream. The value of this attribute must be a non-empty 7-bit string. Control characters are not allowed. """, ) class ITitledTokenizedTerm(ITokenizedTerm): """A tokenized term that includes a title.""" title = TextLine(title=_("Title")) class ISource(Interface): """A set of values from which to choose Sources represent sets of values. They are used to specify the source for choice fields. Sources can be large (even infinite), in which case, they need to be queried to find out what their values are. """ def __contains__(value): """Return whether the value is available in this source """ class ISourceQueriables(Interface): """A collection of objects for querying sources """ def getQueriables(): # type: ignore """Return an iterable of objects that can be queried The returned obects should be two-tuples with: - A unicode id The id must uniquely identify the queriable object within the set of queriable objects. Furthermore, in subsequent calls, the same id should be used for a given queriable object. - A queriable object This is an object for which there is a view provided for searching for items. """ class IContextSourceBinder(Interface): def __call__(context): """Return a context-bound instance that implements ISource. """ class IBaseVocabulary(ISource): """Representation of a vocabulary. At this most basic level, a vocabulary only need to support a test for containment. This can be implemented either by __contains__() or by sequence __getitem__() (the later only being useful for vocabularies which are intrinsically ordered). """ def getTerm(value): """Return the ITerm object for the term 'value'. If 'value' is not a valid term, this method raises LookupError. """ class IIterableSource(ISource): """Source which supports iteration over allowed values. The objects iteration provides must be values from the source. """ def __iter__(): # type: ignore """Return an iterator which provides the values from the source.""" def __len__(): # type: ignore """Return the number of valid values, or sys.maxint.""" # BBB vocabularies are pending deprecation, hopefully in 3.3 class IIterableVocabulary(Interface): """Vocabulary which supports iteration over allowed values. The objects iteration provides must conform to the ITerm interface. """ def __iter__(): # type: ignore """Return an iterator which provides the terms from the vocabulary.""" def __len__(): # type: ignore """Return the number of valid terms, or sys.maxint.""" class IVocabulary(IIterableVocabulary, IBaseVocabulary): """Vocabulary which is iterable.""" class IVocabularyTokenized(IVocabulary): """Vocabulary that provides support for tokenized representation. Terms returned from getTerm() and provided by iteration must conform to ITokenizedTerm. """ def getTermByToken(token): """Return an ITokenizedTerm for the passed-in token. If `token` is not represented in the vocabulary, `LookupError` is raised. """ class ITreeVocabulary(IVocabularyTokenized, IEnumerableMapping): """A tokenized vocabulary with a tree-like structure. The tree is implemented as dictionary, with keys being ITokenizedTerm terms and the values being similar dictionaries. Leaf values are empty dictionaries. """ class IVocabularyRegistry(Interface): """Registry that provides IBaseVocabulary objects for specific fields. """ def get(object, name): """Return the vocabulary named 'name' for the content object 'object'. When the vocabulary cannot be found, LookupError is raised. """ class IVocabularyFactory(Interface): """Can create vocabularies.""" def __call__(context): """The context provides a location that the vocabulary can make use of. """ class IFieldEvent(Interface): field = Attribute("The field that has been changed") object = Attribute("The object containing the field") class IFieldUpdatedEvent(IFieldEvent): """ A field has been modified Subscribers will get the old and the new value together with the field """ old_value = Attribute("The value of the field before modification") new_value = Attribute("The value of the field after modification") class IJSONField(IField): """A text field that stores A JSON.""" json_schema = Attribute("json_schema", _("The JSON schema string serialization.")) class IUnionField(IField): """ A field that can be one of multiple types. This is sort of to mirror mypy's union type hint """
python
#!/usr/bin/python3 # -*- coding:utf-8 -*- #fuction:client from hashlib import sha1 import_flag = True try: from ckuser.sqlhelper.MySQLHelper import MySQLHelp from ckuser.sqlhelper.RedisHelper import RedisHelp from ckuser.config import * except Exception: import_flag = False if import_flag == True: pass else: from sqlhelper.MySQLHelper import MySQLHelp from sqlhelper.RedisHelper import RedisHelp from config import * conf = config() mysql_ip = conf['mysql_ip'] mysql_database = conf['mysql_database'] mysql_user = conf['mysql_user'] mysql_passwd = conf['mysql_passwd'] redis_ip = conf['redis_ip'] def user_info(): """加密返回用户输入信息""" user_name = input("请输入用户名:") user_passwd = input("请输入密码:") s1 = sha1() s2 = sha1() s1.update(user_name.encode("utf-8")) s2.update(user_passwd.encode("utf-8")) user_name_pro = s1.hexdigest() user_passwd_pro = s2.hexdigest() return user_name_pro,user_passwd_pro def check_mysql_name(user_name_temp): """查询用户表""" sql='select passwd,isdelete from userinfors where name=%s' params=[user_name_temp] helper=MySQLHelp(mysql_ip,mysql_database,mysql_user,mysql_passwd) result=helper.all(sql,params) return result def check_redis_name(user_name_temp): """查询用户表""" try: r = RedisHelp(redis_ip) result = r.get(user_name_temp) return result.decode('utf-8') # None or user_passwd_pro except Exception as msg: pass def save_to_redis(user_name_temp,user_passwd_temp): """保存用户信息到redis""" r = RedisHelp(redis_ip) r.set(user_name_temp,user_passwd_temp) def user_insert(user_name_temp,user_passwd_temp): """插入用户表""" sql='insert into userinfors(name,passwd) values(%s,%s)' params=[user_name_temp,user_passwd_temp] helper=MySQLHelp(mysql_ip,mysql_database,mysql_user,mysql_passwd) helper.cud(sql,params) def user_update(user_name_temp,user_passwd_temp): """更新用户表""" sql='update userinfors set passwd=%s where name=%s' params=[user_passwd_temp,user_name_temp] helper=MySQLHelp(mysql_ip,mysql_database,mysql_user,mysql_passwd) helper.cud(sql,params) def update(): """用户信息更新""" flag = login() if flag[0] == True: print("现在开始修改新的用户信息!") user_name_pro,user_passwd_pro=user_info() result_redis = check_redis_name(user_name_pro) if result_redis != None and user_name_pro != flag[1]: print("该用户已经存在,请重新选择用户名!") else: result_mysql = check_mysql_name(user_name_pro) if len(result_mysql)!=0 and user_name_pro != flag[1]: print("该用户名已经存在,请重新选择用户名!") else: user_update(user_name_pro,user_passwd_pro) save_to_redis(user_name_pro,user_passwd_pro) def register(): """用户信息注册""" user_name_pro,user_passwd_pro=user_info() result_redis = check_redis_name(user_name_pro) if result_redis != None: print("该用户已经存在,请重新选择用户名!") else: result_mysql = check_mysql_name(user_name_pro) if (len(result_mysql)!=0) and (result_mysql[0][1]==b'\x00'): print("该用户已经存在,请重新选择用户名!") save_to_redis(user_name_pro,user_passwd_pro) elif (len(result_mysql)!=0) and (result_mysql[0][1]==b'\x01'): print('该用户已被删除,请注册新用户名!') else: user_insert(user_name_pro,user_passwd_pro) save_to_redis(user_name_pro,user_passwd_pro) print("恭喜,注册成功!") def login(): """用户信息登录""" user_name_pro,user_passwd_pro=user_info() result_redis = check_redis_name(user_name_pro) s = [0,0] if result_redis == user_passwd_pro: print('登录成功!') s[0] = True elif result_redis == None: result_mysql = check_mysql_name(user_name_pro) if result_mysql==None: print("该用户不存在!") s[0] = False elif result_mysql[0][1]==b'\x01': print('该用户已被删除,请注册新用户名!') s[0] = False elif result_mysql[0][1]==b'\x00' and result_mysql[0][0]==user_passwd_pro: print('登录成功!') save_to_redis(user_name_pro,user_passwd_pro) s[0] = True else: print('密码错误!') s[0] = False elif result_redis != user_passwd_pro: print('密码错误!') s[0] = False s[1] = user_name_pro return s def main(): login() #register() #update() if __name__ == '__main__': main()
python