id
stringlengths
1
8
text
stringlengths
6
1.05M
dataset_id
stringclasses
1 value
8065068
<reponame>hasindu2008/NA12878<gh_stars>100-1000 import h5py import pandas as pd import numpy as np def smooth_pore(arr, cutoff=1500): m = int(np.mean(arr)) arr[arr > cutoff] = m return arr def export_read_file(channel, start_index, end_index, bulkfile, output_dir, remove_pore=False): """Generate a read FAST5 file from channel and coordinates in a bulk FAST5 file Parameters ---------- channel : int channel number from bulk FAST5 file start_index : int start index for read (time in seconds * sample_frequency) end_index : int end index for read (time in seconds * sample_frequency) bulkfile : h5py.File bulk FAST5 file opened as an h5py object output_dir : str output directory, must include trailing slash remove_pore : bool remove pore-like signal (>1500) from pore trace Returns ------- str filename of the exported read """ out_filename = bulkfile["UniqueGlobalKey"]["context_tags"].attrs["filename"].decode('utf8') out_filename = '{fn}_bulkvis-read_{start}-{end}_ch_{ch}.fast5'.format( fn=out_filename, start=start_index, end=end_index, ch=channel ) output_arg = "{dir}{fn}".format( dir=output_dir, fn=out_filename ) readfile = h5py.File(output_arg, "w") read_id_str = "{ch}-{start}-{end}".format( ch=channel, start=start_index, end=end_index ) version_num = 0.6 ch_num = channel ch_str = "Channel_{ch}".format(ch=ch_num) ugk = readfile.create_group("UniqueGlobalKey") bulkfile.copy('UniqueGlobalKey/context_tags', ugk) bulkfile.copy('UniqueGlobalKey/tracking_id', ugk) bulkfile.copy("IntermediateData/{ch}/Meta".format(ch=ch_str), ugk) readfile["UniqueGlobalKey"]["channel_id"] = readfile["UniqueGlobalKey"]["Meta"] readfile["UniqueGlobalKey"]["channel_id"].attrs.create( 'sampling_rate', readfile["UniqueGlobalKey"]["Meta"].attrs["sample_rate"], None, dtype='Float64' ) del readfile["UniqueGlobalKey"]["Meta"] readfile["UniqueGlobalKey"]["channel_id"].attrs.create('channel_number', ch_num, None, dtype='<S4') remove_attrs = ["description", "elimit", "scaling_used", "smallest_event", "threshold", "window", "sample_rate"] for attr in remove_attrs: del readfile["UniqueGlobalKey"]["channel_id"].attrs[attr] int_data_path = bulkfile["IntermediateData"][ch_str]["Reads"] int_dict = { 'read_start': int_data_path["read_start"], 'median_before': int_data_path["median_before"], 'current_well_id': int_data_path["current_well_id"] } df = pd.DataFrame(data=int_dict) df = df.where(df.read_start > start_index).dropna() read_number = 0 attrs = { 'duration': {'val': end_index - start_index, 'd': 'uint32'}, 'median_before': {'val': df.iloc[0].median_before, 'd': 'Float64'}, 'read_id': {'val': read_id_str, 'd': '<S38'}, 'read_number': {'val': read_number, 'd': 'uint16'}, 'start_mux': {'val': int(df.iloc[0].current_well_id), 'd': 'uint8'}, 'start_time': {'val': start_index, 'd': 'uint64'} } dataset = bulkfile["Raw"][ch_str]["Signal"][()] if remove_pore: dataset = smooth_pore(dataset[start_index:end_index]) else: dataset = dataset[start_index:end_index] readfile.create_group('Raw/Reads/Read_{n}'.format(n=read_number)) readfile.attrs.create('file_version', version_num, None, dtype='Float64') # add read_### attrs for k, v in attrs.items(): readfile["Raw"]["Reads"]["Read_{n}".format(n=read_number)].attrs.create(k, v['val'], None, dtype=v['d']) ms = [18446744073709551615] readfile.create_dataset( 'Raw/Reads/Read_{n}/Signal'.format(n=read_number), data=(dataset), maxshape=(ms), chunks=True, dtype='int16', compression="gzip", compression_opts=1 ) readfile.close() return out_filename
StarcoderdataPython
11334881
import numpy as np import pickle import matplotlib.pyplot as plt from scipy.interpolate import griddata def get_barycentric_coords(tensor): """ :param tensor: 3x3 anisotropic Reynolds stress tensor :return: x and y barycentric coordinates """ # Compute barycentric coordinates from the eigenvalues of a 3x3 matrix eigenvalues_RST = np.linalg.eigvals(tensor) eigenvalues_RST = np.flip(np.sort(eigenvalues_RST)) # Barycentric coordinates c1 = eigenvalues_RST[0] - eigenvalues_RST[1] c2 = 2. * (eigenvalues_RST[1] - eigenvalues_RST[2]) c3 = 3. * eigenvalues_RST[2] + 1. check_bary = c1 + c2 + c3 # should sum up to 1 # Define the corners xi1 = 1.2 eta1 = -np.sqrt(3.) / 2. xi2 = -0.8 eta2 = -np.sqrt(3.) / 2. xi3 = 0.2 eta3 = np.sqrt(3.) / 2. x_bary = c1 * xi1 + c2 * xi2 + c3 * xi3 y_bary = c1 * eta1 + c2 * eta2 + c3 * eta3 return x_bary, y_bary, eigenvalues_RST def get_barycentric_coords2(tensor): """ :param tensor: 3x3 anisotropic Reynolds stress tensor :return: x and y barycentric coordinates, new realizable tensor """ # Compute barycentric coordinates from the eigenvalues of a 3x3 matrix eigenvalues_RST_unsorted = np.linalg.eigvals(tensor) # Sort eigenvalues based on magnitude eig_sorted = np.flip(np.argsort(eigenvalues_RST_unsorted)) eigenvalues_RST = eigenvalues_RST_unsorted[eig_sorted] # Barycentric coordinates c1 = eigenvalues_RST[0] - eigenvalues_RST[1] c2 = 2. * (eigenvalues_RST[1] - eigenvalues_RST[2]) c3 = 3. * eigenvalues_RST[2] + 1. check_bary = c1 + c2 + c3 # should sum up to 1 # Define the corners xi1 = 1.2 eta1 = -np.sqrt(3.) / 2. xi2 = -0.8 eta2 = -np.sqrt(3.) / 2. xi3 = 0.2 eta3 = np.sqrt(3.) / 2. x_bary = c1 * xi1 + c2 * xi2 + c3 * xi3 y_bary = c1 * eta1 + c2 * eta2 + c3 * eta3 if y_bary < eta1: # print('\t apply realizability filter') x_new = x_bary + (xi3 - x_bary) / (eta3 - y_bary) * (eta1 - y_bary) y_new = eta1 # Solve linear system a beta = y a = np.array([[xi1, -xi1 + 2*xi2, -2*xi2+3*xi3], [eta1, -eta1 + 2*eta2, -2*eta2+3*eta3], [1., 1., 1.]]) y = np.array([[x_new - xi3], [y_new - eta3], [0.]]) bary_realizable = np.reshape(np.linalg.solve(a, y), [3]) c1 = bary_realizable[0] - bary_realizable[1] c2 = 2. * (bary_realizable[1] - bary_realizable[2]) c3 = 3. * bary_realizable[2] + 1. check_bary = c1 + c2 + c3 # should sum up to 1 # Define the corners xi1 = 1.2 eta1 = -np.sqrt(3.) / 2. xi2 = -0.8 eta2 = -np.sqrt(3.) / 2. xi3 = 0.2 eta3 = np.sqrt(3.) / 2. x_bary = c1 * xi1 + c2 * xi2 + c3 * xi3 y_bary = c1 * eta1 + c2 * eta2 + c3 * eta3 else: # Enforce general realizability -> sum eigenvalues = 0 # Solve linear system a beta = y a = np.array([[xi1, -xi1 + 2 * xi2, -2 * xi2 + 3 * xi3], [eta1, -eta1 + 2 * eta2, -2 * eta2 + 3 * eta3], [1., 1., 1.]]) y = np.array([[x_bary - xi3], [y_bary - eta3], [0.]]) bary_realizable = np.reshape(np.linalg.solve(a, y), [3]) # Compute new tensor with the realizable barycentric coordinates and eigenvalues eig_sorted_reverse = [] for i in eigenvalues_RST_unsorted: eig_sorted_reverse.append(np.where(eigenvalues_RST == i)[0][0]) tensor_eigvect = np.linalg.eig(tensor)[1] vectors = np.vstack([tensor_eigvect[0], tensor_eigvect[1], tensor_eigvect[2]]) bary_realizable_sorted = bary_realizable[eig_sorted_reverse] tensor_new = np.dot(vectors, np.dot(np.diag(bary_realizable_sorted), np.linalg.inv(vectors))) return x_bary, y_bary, tensor_new def get_barycentric_color(x_bary, y_bary): # Define the corners xi1 = 1.2 eta1 = -np.sqrt(3.) / 2. xi2 = -0.8 eta2 = -np.sqrt(3.) / 2. xi3 = 0.2 eta3 = np.sqrt(3.) / 2. # Set color range and colormap steps = 900 phi_range = np.linspace(0, -2. * np.pi, steps) norm = plt.Normalize() colors = plt.cm.hsv(norm(phi_range)) # Determine centroid of barycentric map in [x, y] centroid = [xi3, eta3 - (xi1 - xi2) * np.sqrt(3.) / 3.] # Determine polar coordinates of the input bary x and y radius = np.sqrt((x_bary - centroid[0])**2 + (y_bary - centroid[1])**2) delta_phi_1C = np.arctan2(eta1 - centroid[1], xi1 - centroid[0]) phi = np.arctan2(y_bary - centroid[1], x_bary - centroid[0]) # Correct for angles in top half of bary map if phi >= 0.: phi = - (2. * np.pi - phi) # set phi zero equal to the anlge of the 1C corner phi = phi - delta_phi_1C # Correct for angles between phi= 0 and the 1C corner if phi >= 0.: phi = - (2. * np.pi - phi) color_index = steps - np.searchsorted(np.flip(phi_range), phi, side="left") - 1 # Determine reference radius if -120./180. * np.pi < phi <= 0.: lhs = np.array([[(y_bary - centroid[1]) / (x_bary - centroid[0]), -1.], [(eta1 - eta2) / (xi1 - xi2), -1.]]) rhs = np.array([[-centroid[1] + centroid[0] * (y_bary - centroid[1]) / (x_bary - centroid[0])], [-eta2 + xi2 * (eta1 - eta2) / (xi1 - xi2)]]) coords_side = np.linalg.solve(lhs, rhs) max_radius = np.sqrt((coords_side[0] - centroid[0])**2 + (coords_side[1] - centroid[1])**2) elif -240./180. * np.pi < phi <= -120./180. * np.pi: lhs = np.array([[(y_bary - centroid[1]) / (x_bary - centroid[0]), -1.], [(eta3 - eta2) / (xi3 - xi2), -1.]]) rhs = np.array([[-centroid[1] + centroid[0] * (y_bary - centroid[1]) / (x_bary - centroid[0])], [-eta2 + xi2 * (eta3 - eta2) / (xi3 - xi2)]]) coords_side = np.linalg.solve(lhs, rhs) max_radius = np.sqrt((coords_side[0] - centroid[0]) ** 2 + (coords_side[1] - centroid[1]) ** 2) else: lhs = np.array([[(y_bary - centroid[1]) / (x_bary - centroid[0]), -1.], [(eta1 - eta3) / (xi1 - xi3), -1.]]) rhs = np.array([[-centroid[1] + centroid[0] * (y_bary - centroid[1]) / (x_bary - centroid[0])], [-eta3 + xi3 * (eta1 - eta3) / (xi1 - xi3)]]) coords_side = np.linalg.solve(lhs, rhs) max_radius = np.sqrt((coords_side[0] - centroid[0]) ** 2 + (coords_side[1] - centroid[1]) ** 2) # Select color bary_colors = colors[color_index, :] if radius / max_radius < 1.0: bary_colors[3] *= (radius / max_radius)**(1./3) else: max_radius = (eta3 - centroid[1]) problem = radius / (eta3 - centroid[1]) # print('Radius outside barycentric map') # Return colors as [R, B, G, alpha], because that is what matplotlib needs return bary_colors[[0, 2, 1, 3]] def area(x1, y1, x2, y2, x3, y3): return abs((x1 * (y2 - y3) + x2 * (y3 - y1) + x3 * (y1 - y2)) / 2.0) def isInside(x1, y1, x2, y2, x3, y3, x, y): # Calculate area of triangle ABC A = area(x1, y1, x2, y2, x3, y3) # Calculate area of triangle PBC A1 = area(x, y, x2, y2, x3, y3) # Calculate area of triangle PAC A2 = area(x1, y1, x, y, x3, y3) # Calculate area of triangle PAB A3 = area(x1, y1, x2, y2, x, y) # Check if sum of A1, A2 and A3 # is same as A if (A == A1 + A2 + A3): return True else: return False # Plot a sample of the barycentric map plot_map = True if plot_map: # Sample barycentric map # Define the corners xi1 = 1.2 eta1 = -np.sqrt(3.) / 2. xi2 = -0.8 eta2 = -np.sqrt(3.) / 2. xi3 = 0.2 eta3 = np.sqrt(3.) / 2. x_sample = np.random.uniform(xi2, xi1, 10000) y_sample = np.random.uniform(eta1, eta3, 10000) xy_bary = [] for i in range(1000): check_inside = isInside(xi2, eta2, xi3, eta3, xi1, eta1, x_sample[i], y_sample[i]) if check_inside: xy_bary.append([x_sample[i], y_sample[i]]) # add the corners xy_bary.append([xi1, eta1]) xy_bary.append([xi2, eta2]) xy_bary.append([xi3, eta3]) res = [] for i in range(len(xy_bary)): color_i = get_barycentric_color(xy_bary[i][0], xy_bary[i][1]) res.append(color_i) res = np.array(res) plt.figure(dpi=150) points = np.fliplr(np.array(xy_bary)) points[:, 0] += 0.8 - (eta3 - 0.8) points[:, 1] += eta3 points /= 2. grid_x, grid_y = np.mgrid[0:1:1000j, 0:1:1000j] grid_z2 = griddata(points, res, (grid_x, grid_y), method='cubic') grid_z2 = np.nan_to_num(grid_z2) plt.imshow(grid_z2, extent=(0, 1, 0, 1), origin='lower') plt.show() else: # Define training features and responses storage_filepath = 'J:/ALM_N_H_ParTurb/Slices/Result/22000.0918025/' propertyName = 'uuPrime2' sliceName = 'alongWindRotorOne' y_test = pickle.load(open(storage_filepath + propertyName + '_' + sliceName + '_tensors.p', 'rb')) y_test = np.reshape(y_test, [y_test.shape[0]*y_test.shape[1], 9]) x = pickle.load(open(storage_filepath + propertyName + '_' + sliceName + '_x.p', 'rb')) y = pickle.load(open(storage_filepath + propertyName + '_' + sliceName + '_y.p', 'rb')) z = pickle.load(open(storage_filepath + propertyName + '_' + sliceName + '_z.p', 'rb')) # meshRANS = pickle.load(open(storage_filepath + '/meshRANS.p', 'rb')) # # Get y, z coordinates # meshRANS = meshRANS[0:2] meshRANS = np.array([x, z]) # Storage for color data res = [] # Determine if to ensure realizability in barycentric map or not ensure_realizability = False # Loop over all data in the mesh # Check if data is there, otherwise set color to white for i in range(y_test.shape[0]): if all(np.isfinite(y_test[i, :])): # get barycentric coordinates with if ensure_realizability: x_bary, y_bary, _ = get_barycentric_coords2(np.reshape(y_test[i, :], [3, 3])) else: x_bary, y_bary, evalues = get_barycentric_coords(np.reshape(y_test[i, :], [3, 3])) # determine the associated color color_i = get_barycentric_color(x_bary, y_bary) else: color_i = np.array([0, 0, 0, 0]) # Store the color res.append(color_i) # Revert color list to np.array res = np.array(res) # Plot the result fig1 = plt.figure() # Stack mash to points (x, y) points = np.hstack([np.reshape(meshRANS[1], [y_test.shape[0], 1]), np.reshape(meshRANS[0], [y_test.shape[0], 1])]) # Create grid used for the final iamge grid_x, grid_y = np.mgrid[0:1:3000j, 0:1:1000j] # Resize image to fit the points of the flow meshgrid grid_x *= (meshRANS[1].max() - meshRANS[1].min()) grid_y *= (meshRANS[0].max() - meshRANS[0].min()) grid_x += meshRANS[1].min() grid_y += meshRANS[0].min() # Interpolate data to image grid_z2 = griddata(points, res, (grid_x, grid_y), method='cubic') # Resolve any problems with nan values grid_z2 = np.nan_to_num(grid_z2) # Plot final image ax = fig1.add_subplot(1, 1, 1) ax.imshow(grid_z2, extent=(meshRANS[0].min(), meshRANS[0].max(), meshRANS[1].min(), meshRANS[1].max()), origin='lower') plt.savefig('J:/ALM_N_H_ParTurb/Slices/Result/savefig1.png', dpi=600) plt.show() print('Done')
StarcoderdataPython
11299191
import os os.environ["TF_CPP_MIN_LOG_LEVEL"]='3' import warnings warnings.filterwarnings('ignore') import AlexNetCompleted import AlexNetForUsers rightModelPath = 'step4/modelInfo/AlexNet' userModelPath = 'step4/userModelInfo/AlexNet' # print(os.path.exists(rightModelPath)) # print(os.path.exists(userModelPath)) # print(os.path.getsize(rightModelPath)) # print(os.path.getsize(userModelPath)) try: # isRight = IsEqual(rightModelPath, userModelPath) # print(isRight) if os.path.getsize(rightModelPath)==os.path.getsize(userModelPath): print('恭喜你通过本关测试!模型结构正确,你已经掌握了AlexNet的结构!',end='') else: print('模型结构有误!未能通过本关测试!') except: print('模型结构文件保存有误!未能通过本关测试')
StarcoderdataPython
3455408
import feedparser import re from dataclasses import dataclass from datetime import datetime from decimal import Decimal from moneyed import Currency from typing import List @dataclass class CurrencyRate: source_currency: Currency target_currency: Currency rate: Decimal uploaded: datetime class ECBCurrencyRate: pattern = re.compile('\d+\.\d+') def __init__(self, source_url, source_currency, target_currency): self.source_url = source_url self.source_currency = source_currency self.target_currency = target_currency def get_currency_rates(self) -> List[CurrencyRate]: data = feedparser.parse(self.source_url) return [ CurrencyRate( source_currency=self.source_currency, target_currency=self.target_currency, rate=self.parse_rate(entry.cb_exchangerate), uploaded=datetime.fromisoformat(entry.updated), ) for entry in data.entries ] def parse_rate(self, rate): # rate from ECB is a concat of rate and currency e.g. 1278.95\nEUR return re.match(self.pattern, rate).group()
StarcoderdataPython
1788899
<gh_stars>1-10 import sys chars = "ACGT" ## check neighbors def neighbors(pattern, d): assert(d <= len(pattern)) if d == 0: return [pattern] r2 = neighbors(pattern[1:], d-1) r = [c + r3 for r3 in r2 for c in chars if c != pattern[0]] if (d < len(pattern)): r2 = neighbors(pattern[1:], d) r += [pattern[0] + r3 for r3 in r2] return r pattern = sys.argv[1] d = int(sys.argv[2]) print(neighbors(pattern, d), len(neighbors(pattern, d)))
StarcoderdataPython
6570914
#! /usr/bin/env python from tornado import ioloop from tornado import web from jinja2 import Environment, FileSystemLoader import os, argparse from lsst.sims.maf.viz import MafTracking, dbController import lsst.sims.maf.db as db import json class RunSelectHandler(web.RequestHandler): def get(self): selectTempl = env.get_template("runselect.html") if 'runId' in self.request.arguments: runId = int(self.request.arguments['runId'][0]) else: # Set runID to a negative number, to default to first run. runId = startRunId self.write(selectTempl.render(runlist=runlist, runId=runId, jsPath=jsPath)) class MetricSelectHandler(web.RequestHandler): def get(self): selectTempl = env.get_template("metricselect.html") runId = int(self.request.arguments['runId'][0]) self.write(selectTempl.render(runlist=runlist, runId=runId)) class MetricResultsPageHandler(web.RequestHandler): def get(self): resultsTempl = env.get_template("results.html") runId = int(self.request.arguments['runId'][0]) if 'metricId' in self.request.arguments: metricIdList = self.request.arguments['metricId'] else: metricIdList = [] if 'Group_subgroup' in self.request.arguments: groupList = self.request.arguments['Group_subgroup'] else: groupList = [] self.write(resultsTempl.render(metricIdList=metricIdList, groupList=groupList, runId=runId, runlist=runlist)) class DataHandler(web.RequestHandler): def get(self): runId = int(self.request.arguments['runId'][0]) metricId = int(self.request.arguments['metricId'][0]) if 'datatype' in self.request.arguments: datatype = self.request.arguments['datatype'][0].lower() else: datatype = 'npz' run = runlist.getRun(runId) metric = run.metricIdsToMetrics([metricId]) if datatype == 'npz': npz = run.getNpz(metric) if npz is None: self.write('No npz file available.') else: self.redirect(npz) elif datatype == 'json': jsn = run.getJson(metric) if jsn is None: self.write('No JSON file available.') else: self.write(jsn) else: self.write('Data type "%s" not understood.' %(datatype)) class ConfigPageHandler(web.RequestHandler): def get(self): configTempl = env.get_template("configs.html") runId = int(self.request.arguments['runId'][0]) self.write(configTempl.render(runlist=runlist, runId=runId)) class StatPageHandler(web.RequestHandler): def get(self): statTempl = env.get_template("stats.html") runId = int(self.request.arguments['runId'][0]) self.write(statTempl.render(runlist=runlist, runId=runId)) class AllMetricResultsPageHandler(web.RequestHandler): def get(self): """Load up the files and display """ allresultsTempl = env.get_template("allmetricresults.html") runId = int(self.request.arguments['runId'][0]) self.write(allresultsTempl.render(runlist=runlist, runId=runId)) class MultiColorPageHandler(web.RequestHandler): def get(self): """Display sky maps. """ multiColorTempl = env.get_template("multicolor.html") runId = int(self.request.arguments['runId'][0]) self.write(multiColorTempl.render(runlist=runlist, runId=runId)) class showMaf(web.RequestHandler): def get(self): template = env.get_template("showMaf.html") self.write(template.render()) class showRun(web.RequestHandler): def get(self, id): template = env.get_template("showRun.html") self.write(template.render(runId=int(id))) class SearchMetrics(web.RequestHandler): def get(self): template = env.get_template("search.html") self.write(template.render()) class SearchHandler(web.RequestHandler): """return metrics in json format""" def initialize(self, trackingDbAddress): self.controller = dbController.ShowMafDBController(trackingDbAddress) def get(self): list_type = self.get_argument('list_type') if list_type == 'metrics': results = self.controller.get_all_metrics() if list_type == 'sim_data': results = self.controller.get_all_sim_data() if list_type == 'slicer': results = self.controller.get_all_slicer() self.write(json.dumps(results)) def post(self): keywords = self.get_argument('keywords') results = self.controller.search_metrics(json.loads(keywords)) self.write(json.dumps(results)) class RunListHandler(web.RequestHandler): """return all the runs in json format""" def initialize(self, trackingDbAddress): self.runlist = MafTracking(trackingDbAddress) def get(self): runs = [dict(zip(self.runlist.runs.dtype.names,x)) for x in self.runlist.runs ] for run in runs: run['mafRunId'] = int(run['mafRunId']) self.write(json.dumps(runs)) class RunHandler(web.RequestHandler): """return metrics of a run in a tree-structured way in json format""" def initialize(self, trackingDbAddress): self.runlist = MafTracking(trackingDbAddress) def get(self, id): mafRun = self.runlist.getRun(int(id)) mafRunInfo = self.runlist.getRunInfo(int(id)) run = dict() runInfo = [dict(zip(mafRunInfo.dtype.names,x)) for x in mafRunInfo ][0] metrics = [] # TODO: make this recursive so that the groups go to more/less than two levels groups = mafRun.groups.keys() for g in groups: members = [] for sg in mafRun.groups[g]: subsetMembers = [] subsetMetrics = mafRun.metricsInSubgroup(g, sg) for metric in subsetMetrics: metricInfo = mafRun.metricInfo(metric) caption = mafRun.captionForMetric(metric) metricInfo['metricId'] = int(metric[0]) plotInfo = mafRun.plotsForMetric(metric) plotdict = mafRun.plotDict(plotInfo) plots = [] for plottype in plotdict: plots.append({ 'plotType': plottype, 'plotFile': plotdict[plottype]['plotFile'][0], 'thumbFile': plotdict[plottype]['thumbFile'][0] }) metricInfo['plots'] = plots metricInfo['caption'] = caption subsetMembers.append(json.loads(json.dumps(metricInfo))) members.append({"groupName": sg, "members": subsetMembers}) metrics.append({"groupName": g, "members": members}) run['runInfo'] = runInfo run['metrics'] = metrics self.write(json.dumps(run, indent=3)) def make_app(trackingDbAddress): """The tornado global configuration """ application = web.Application([ ("/", RunSelectHandler), web.url(r"/runList", RunListHandler, dict(trackingDbAddress=trackingDbAddress), name="runList"), web.url(r"/run/([0-9]*)", RunHandler, dict(trackingDbAddress=trackingDbAddress), name="run"), web.url(r"/search", SearchHandler, dict(trackingDbAddress=trackingDbAddress), name="search"), ("/showMaf", showMaf), ("/searchMetrics", SearchMetrics), web.url(r"/showRun/([0-9]*)", showRun), (r"/maf_cadence/(.*)", web.StaticFileHandler, {'path': mafDbDir}), ("/metricSelect", MetricSelectHandler), ("/metricResults", MetricResultsPageHandler), ("/getData", DataHandler), ("/configParams", ConfigPageHandler), ("/summaryStats", StatPageHandler), ("/allMetricResults", AllMetricResultsPageHandler), ("/multiColor", MultiColorPageHandler), (r"/(favicon.ico)", web.StaticFileHandler, {'path':faviconPath}), # (r"/(sorttable.js)", web.StaticFileHandler, {'path':jsPath}), (r"/fonts/(.*)", web.StaticFileHandler, {'path':os.path.join(mafDir, 'lsst/sims/maf/viz/statics/css/fonts')}), (r"/*/(.*)", web.StaticFileHandler, {'path':staticpath}), ], debug=True) return application if __name__ == "__main__": parser = argparse.ArgumentParser(description="Python script to display MAF output in a web browser."+ " After launching, point your browser to 'http://localhost:8989/'") defaultdb = os.path.join(os.getcwd(), 'trackingDb_sqlite.db') defaultdb = 'sqlite:///' + defaultdb parser.add_argument("-t", "--trackingDb", type=str, default=defaultdb, help="Tracking database dbAddress.") parser.add_argument("-d", "--mafDir", type=str, default=None, help="Add this directory to the trackingDb and open immediately.") parser.add_argument("-c", "--mafComment", type=str, default=None, help="Add a comment to the trackingDB describing the MAF analysis of this directory (paired with mafDir argument).") parser.add_argument("-p", "--port", type=int, default=8989, help="Port for connecting to showMaf.") args = parser.parse_args() # Check tracking DB is sqlite (and add as convenience if forgotten). trackingDbAddress = args.trackingDb if not trackingDbAddress.startswith('sqlite:///'): trackingDbAddress = 'sqlite:///' + trackingDbAddress print 'Using tracking database at %s' %(trackingDbAddress) global startRunId startRunId = -666 # If given a directory argument: if args.mafDir is not None: mafDir = os.path.realpath(args.mafDir) if not os.path.isdir(mafDir): print 'There is no directory containing MAF outputs at %s.' %(mafDir) print 'Just opening using tracking db at %s.' %(trackingDbAddress) # Open tracking database to add a run. trackingDb = db.TrackingDb(trackingDbAddress=trackingDbAddress) # Set opsim comment and name from the config files from the run. opsimComment = '' opsimRun = '' opsimDate = '' mafDate = '' if os.path.isfile(os.path.join(mafDir, 'configSummary.txt')): file = open(os.path.join(mafDir, 'configSummary.txt')) for line in file: tmp = line.split() if tmp[0].startswith('RunName'): opsimRun = ' '.join(tmp[1:]) if tmp[0].startswith('RunComment'): opsimComment = ' '.join(tmp[1:]) if tmp[0].startswith('MAFVersion'): mafDate = tmp[-1] if tmp[0].startswith('OpsimVersion'): opsimDate = tmp[-2] # Let's go ahead and make the formats match opsimDate = opsimDate.split('-') opsimDate = opsimDate[1]+'/'+opsimDate[2]+'/'+opsimDate[0][2:] # Give some feedback to the user about what we're doing. print 'Adding to tracking database at %s:' %(trackingDbAddress) print ' MafDir = %s' %(mafDir) print ' MafComment = %s' %(args.mafComment) print ' OpsimRun = %s' %(opsimRun) print ' OpsimComment = %s' %(opsimComment) print ' OpsimDate = %s' %(opsimDate) print ' MafDate = %s' %(mafDate) # Add the run. startRunId = trackingDb.addRun(opsimRun, opsimComment, args.mafComment, mafDir, opsimDate, mafDate) print ' Used runID %d' %(startRunId) trackingDb.close() # Open tracking database and start visualization. global runlist runlist = MafTracking(trackingDbAddress) if startRunId < 0: startRunId = runlist.runs[0]['mafRunId'] # Set up path to template and favicon paths, and load templates. mafDir = ""#os.getenv('SIMS_MAF_DIR') templateDir = os.path.join(mafDir, 'lsst/sims/maf/viz/templates/' ) global faviconPath faviconPath = os.path.join(mafDir, 'lsst/sims/maf/viz/') global jsPath jsPath = os.path.join(mafDir, 'lsst/sims/maf/viz/statics/js') env = Environment(loader=FileSystemLoader(templateDir)) # Add 'zip' to jinja templates. env.globals.update(zip=zip) global staticpath staticpath = os.path.join(mafDir, 'lsst/sims/maf/viz/statics/') global mafDbDir mafDbDir = os.path.join(mafDir, 'maf_cadence/') # Start up tornado app. application = make_app(trackingDbAddress) application.listen(args.port) print 'Tornado Starting: \nPoint your web browser to http://localhost:%d/ \nCtrl-C to stop' %(args.port) ioloop.IOLoop.instance().start()
StarcoderdataPython
11359657
TOP10_DATA_TITLE = { "jumps" : "Number of Hyperspace Jumps", "ly" : "Light Years Travelled", "bought" : "Cargo Bought", "sold" : "Cargo Sold", "points" : "Mission Points Earned", "bounty" : "Bountys Handed in", "bonds" : "Combat Bonds Handed in", "explodata" : "Exploration Data", "passengers" : "Passengers Transported", "objects" : "Objects Scanned", } TOP10_TIME_TITLE = { "today" : "Today", "week" : "This Week", "ever" : "In All Time", } TOP10_DATA_UNITS = { "jumps" : "Jumps", "ly" : "Ly", "bought" : "Units", "sold" : "Units", "points" : "Points", "bounty" : "Cr", "bonds" : "Cr", "explodata" : "Cr", "passengers" : "People", "objects" : "Objects", } HAPPINESS = { "$faction_happinessband1;" : "Elated", "$faction_happinessband2;" : "Happy", "$faction_happinessband3;" : "Discontented", "$faction_happinessband4;" : "Unhappy", "$faction_happinessband5;" : "Despondant" } PRISON_FACTIONS = ["Independent Detention Foundation", "Imperial Detainment Company", "Federal Internment Corporation", "Alliance Incarceration Concern"] PRISON_FAC_ID = ["75876", "75878", "75875", "75877"] IGNORE_SYSTEMS = ["AHHHH"] INTERVALS = ( ('weeks', 604800), # 60 * 60 * 24 * 7 ('days', 86400), # 60 * 60 * 24 ('hours', 3600), # 60 * 60 ('minutes', 60), ('seconds', 1), ) ADMINS = ["306477274452983829", "177189147843231744"] # ME, NINJ, EMOJIS = { "low" : ":sob:", "ok" : ":smile:", "high" : ":sweat:", }
StarcoderdataPython
5007825
<filename>PortScanner.py<gh_stars>1-10 import threading,socket,argparse from sys import exit from queue import Queue lock = threading.Lock() q = Queue() parser=argparse.ArgumentParser() group=parser.add_mutually_exclusive_group() group.add_argument("-i", "--ip", help="Target's ip", action="store") group.add_argument("-d", "--domain", help="Target's domain name", action="store") parser.add_argument("-p", "--port", help="Target ports' number interval", action="store", nargs="*", type=int) parser.add_argument("-t", "--thread", help="How many threads do you want to use?", action="store", default=4, type=int) args = parser.parse_args() # -------------------------------------- if args.ip: target = args.ip if args.domain: target = socket.gethostbyname(args.domain) if args.port: min_port = args.port[0] max_port = args.port[1] if args.thread: number_of_threads = args.thread def main(): range_to_scan(min_port, max_port) create_thread(target,number_of_threads) q.join() def scan(target,port): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) con = s.connect_ex((target, port)) try: if (con==0): with lock: print('[+] Port--> {}'.format(port)) s.close() except: pass def range_to_scan(min_port=1, max_port=1000): if (min_port <= 0): print("Minimum port number must be larger than 0") exit(1) for worker in range(min_port, max_port): q.put(worker) def create_thread(target,number_of_threads=4): for x in range(number_of_threads): t = threading.Thread(target=threader,args=(target,),daemon=True) t.start() def threader(target): while True: worker = q.get() scan(target,worker) q.task_done() if __name__=="__main__": try: main() except KeyboardInterrupt: print("Interrupted..") exit(1)
StarcoderdataPython
66010
#!/usr/bin/env python from __future__ import with_statement from geode import Prop,PropManager,cache from geode.value import Worker import sys def worker_test_factory(props): x = props.get('x') y = props.add('y',5) return cache(lambda:x()*y()) def remote(conn): inputs = conn.inputs x = inputs.get('x') assert x()==7 n = Prop('n',-1) done = Prop('done',False) conn.add_output('n',n) conn.add_output('done',done) for i in xrange(10): n.set(i) done.set(True) def test_worker(): command_file = __file__ if command_file.endswith('.pyc'): command_file=command_file[:-3]+'py' for command in None,[command_file,'--worker']: props = PropManager() x = props.add('x',3) props.add('y',5) with Worker.Worker(debug=0,command=command) as worker: worker.add_props(props) xy = worker.create('xy',worker_test_factory) assert xy() is None worker.pull('xy') worker.process(timeout=None,count=1) assert xy()==3*5 x.set(7) worker.process(timeout=None,count=1) assert xy()==None worker.pull('xy') worker.process(timeout=None,count=1) assert xy()==7*5 # Test remote function execution worker.run(remote) n = worker.wait_for_output('n') done = worker.wait_for_output('done') seen = [] while not done(): worker.process(timeout=None,count=1) seen.append(n()) assert seen==range(10)+[9] if __name__=='__main__': if len(sys.argv)==3 and sys.argv[1]=='--worker': Worker.worker_standalone_main(sys.argv[2]) else: test_worker()
StarcoderdataPython
6435994
######################################################################## # # Copyright (c) 2017, STEREOLABS. # # All rights reserved. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ######################################################################## """ Read SVO sample to read the video and the information of the camera. It can pick a frame of the svo and save it as a JPEG or PNG file. Depth map and Point Cloud can also be saved into files. """ import sys import pyzed.sl as sl import cv2 def main(): init = sl.InitParameters() init.camera_resolution = sl.RESOLUTION.HD720 init.depth_mode = sl.DEPTH_MODE.PERFORMANCE if (len(sys.argv) > 1) : ip = sys.argv[1] init.set_from_stream(ip) else : print('Usage : python3 streaming_receiver.py ip') exit(1) cam = sl.Camera() status = cam.open(init) if status != sl.ERROR_CODE.SUCCESS: print(repr(status)) exit(1) runtime = sl.RuntimeParameters() mat = sl.Mat() key = '' print(" Quit : CTRL+C\n") while key != 113: err = cam.grab(runtime) if (err == sl.ERROR_CODE.SUCCESS) : cam.retrieve_image(mat, sl.VIEW.LEFT) cv2.imshow("ZED", mat.get_data()) key = cv2.waitKey(1) else : key = cv2.waitKey(1) cam.close() if __name__ == "__main__": main()
StarcoderdataPython
8056189
<reponame>AmanMishra148/python-repo print("Hello") print("Python")
StarcoderdataPython
3361051
#!/usr/bin/env python3 # Quantopian, Inc. licenses this file to you under the Apache License, Version # 2.0 (the "License"); you may not use this file except in compliance with the # License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. import argparse import os import shutil import subprocess import sys from penguindome import ( top_dir, set_gpg, release_files_iter, signatures_dir, verify_signature, ) from penguindome.server import get_logger, sign_file os.chdir(top_dir) log = get_logger('sign') def parse_args(): parser = argparse.ArgumentParser(description='Generate digital signatures ' 'for client files') parser.add_argument('--full', action='store_true', help='Regenerate all ' 'signatures rather than only invalid ones') args = parser.parse_args() return args def main(): args = parse_args() old_signatures = set() if args.full: log.info('Renerating all signatures') shutil.rmtree(signatures_dir, ignore_errors=True) elif os.path.exists(signatures_dir): for dirpath, dirnames, filenames in os.walk(signatures_dir): for f in filenames: old_signatures.add(os.path.join(dirpath, f)) for file in release_files_iter(): set_gpg('client') signature = verify_signature(file) if signature: log.debug('Preserving valid signature for {}', file) else: set_gpg('server') log.info('Signing {}', file) signature = sign_file(file) old_signatures.discard(signature) for file in old_signatures: log.info('Removing obsolete signature {}', file) os.unlink(file) try: subprocess.check_output( ('python', os.path.join('client', 'verify.py')), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: sys.exit('Verify failed, try running bin/sign again. Output:\n{}'. format(e.output.decode('utf8'))) if __name__ == '__main__': main()
StarcoderdataPython
9763264
<filename>ss.py import os from flask import Flask, request, render_template, redirect, url_for, g from flask_pymongo import PyMongo import flask_pymongo from flask_babel import Babel from storyzer import storyze, format_story from bson.objectid import ObjectId app = Flask(__name__) app.debug = True babel = Babel(app) app.config['MONGO_URI'] = os.environ.get('MONGOLAB_URI') app.config['GSE_API_KEY'] = os.environ.get('GSE_API_KEY') app.config['GSE_CX_ID'] = os.environ.get('GSE_CX_ID') mongo = PyMongo(app) @app.before_request def before(): if request.view_args and 'lang_code' in request.view_args: g.current_lang = request.view_args['lang_code'] request.view_args.pop('lang_code') @babel.localeselector def get_locale(): return g.get('current_lang', 'en') @app.route('/') def root(): return redirect(url_for('home', lang_code='pt_br')) @app.route('/<lang_code>', methods=['GET', 'POST']) def home(): if request.method == 'POST': # TODO: sanitize lines st = storyze(request.form['title'], request.form['text'], request.form['author']) story_id = mongo.db.stories.insert(st) return redirect(get_locale() + '/story/' + str(story_id)) stories = mongo.db.stories.find().sort("_id", flask_pymongo.DESCENDING)[:15] return render_template('home.html', stories=stories) @app.route('/<lang_code>/story/<story_id>') def story(story_id): st = mongo.db.stories.find_one({"_id": ObjectId(story_id)}) st = format_story(st) return render_template('story.html', story=st) @app.route('/<lang_code>/about') def about(): return render_template('about.html') if __name__ == "__main__": app.run()
StarcoderdataPython
79435
import unittest import numpy as np import logging from dsbox.ml.neural_networks.keras_factory.text_models import LSTMFactory from dsbox.ml.neural_networks.processing.workflow import TextNeuralNetPipeline, ImageNeuralNetPipeline logging.getLogger("tensorflow").setLevel(logging.WARNING) np.random.seed(42) class TestPipeline(unittest.TestCase): def test_fit_predict_text_nn_pipeline_should_return_some_result(self): # given x_train = np.array(['this is really really awesome !', 'it is so awesome !', 'that sucks'] ) y_train = np.array([1, 1, 0]) # when model = TextNeuralNetPipeline(factory_class=LSTMFactory, num_labels=2) model.fit(x_train, y_train, verbose=0) x_test = np.array(['it is really awesome !']) y_pred = model.predict(x_test) # then self.assertIsNotNone(y_pred) def test_fit_predict_proba_text_nn_pipeline_should_return_some_result(self): # given x_train = np.array(['this is really really awesome !', 'it is so awesome !', 'that sucks'] ) y_train = np.array([1, 1, 0]) # when model = TextNeuralNetPipeline(factory_class=LSTMFactory, num_labels=2) model.fit(x_train, y_train, verbose=0) x_test = np.array(['it is really awesome !']) y_pred = model.predict_proba(x_test)[0] # then self.assertIsNotNone(y_pred) def test_fit_image_nn_workflow_should_set_params_automatically(self): # given workflow = ImageNeuralNetPipeline(weights="imagenet") # when workflow.fit() # then self.assertTupleEqual((299, 299), workflow.img_size_) self.assertEqual("block14_sepconv2_act", workflow.last_conv_layer_name_) self.assertListEqual(["avg_pool", "predictions"], workflow.classifier_layer_names_)
StarcoderdataPython
3254963
<gh_stars>0 #!/usr/bin/env python import sys,nuri,os,numpy import matplotlib.pyplot as plt import matplotlib.dates as md from datetime import datetime,timedelta def check24hrs(date): """ This operation will display the active periods for which data are available from every sensors. Parameters ---------- date : str Year and month to display activity from. The format shoud be YYYY-MM. """ # Check if date is given if date==None: print('date missing...') quit() # List all the months dates = numpy.empty((0,5)) y0 = int(date.split('-')[0]) m0 = int(date.split('-')[1]) d0 = datetime(y0,m0,1) y1 = y0 if m0<12 else y0+1 m1 = m0+1 if m0<12 else 1 d1 = datetime(y1,m1,1)-timedelta(hours=1) dt = timedelta(hours=1) dates = numpy.arange(d0,d1,dt) # Download metadata from Google Drive sys.stderr.write('Retrieve information from Google Drive...') os.system('skicka ls -r /MagneticFieldData/%s/%s/ > data'%(y0,m0)) data = numpy.loadtxt('data',dtype=str,delimiter='\n') print >>sys.stderr,' done!' # List file path for each date and each station sys.stderr.write('Select active hours for each station...') st0,st1,st2,st3,st4 = [],[],[],[],[] for d in dates: year = d.astype(object).year month = d.astype(object).month day = d.astype(object).day hour = d.astype(object).hour path = 'MagneticFieldData/%i/%i/%i/%i/'%(year,month,day,hour) fname = '%i-%i-%i_%i-xx.zip'%(year,month,day,hour) st0.append(path+'NURI-station/' +fname) st1.append(path+'NURI-station-01/'+fname) st2.append(path+'NURI-station-02/'+fname) st3.append(path+'NURI-station-03/'+fname) st4.append(path+'NURI-station-04/'+fname) st0 = numpy.array([1 if path in data else 0 for path in st0]) st1 = numpy.array([1 if path in data else 0 for path in st1]) st2 = numpy.array([1 if path in data else 0 for path in st2]) st3 = numpy.array([1 if path in data else 0 for path in st3]) st4 = numpy.array([1 if path in data else 0 for path in st4]) print >>sys.stderr,' done!' # Write down information in text file print 'Save information in ASCII file...' o = open('%i-%02i.dat'%(y0,m0),'w') for d in dates: year = d.astype(object).year month = d.astype(object).month day = d.astype(object).day hour = d.astype(object).hour path = 'MagneticFieldData/%i/%i/%i/%i/'%(year,month,day,hour) fname = '%i-%i-%i_%i-xx.zip'%(year,month,day,hour) o.write('%i-%02i-%02i_%02i'%(year,month,day,hour)) o.write(' NURI-station ') if path+'NURI-station/' +fname in data else o.write(' - ') o.write(' NURI-station-01') if path+'NURI-station-01/'+fname in data else o.write(' - ') o.write(' NURI-station-02') if path+'NURI-station-01/'+fname in data else o.write(' - ') o.write(' NURI-station-03') if path+'NURI-station-01/'+fname in data else o.write(' - ') o.write(' NURI-station-04') if path+'NURI-station-01/'+fname in data else o.write(' - ') o.write('\n') o.close() dates = [d.astype(object) for d in dates] plt.rc('font', size=2, family='serif') plt.rc('axes', labelsize=10, linewidth=0.2) plt.rc('legend', fontsize=2, handlelength=10) plt.rc('xtick', labelsize=7) plt.rc('ytick', labelsize=7) plt.rc('lines', lw=0.2, mew=0.2) plt.rc('grid', linewidth=0.2) fig = plt.figure(figsize=(10,6)) plt.subplots_adjust(left=0.07, right=0.95, bottom=0.1, top=0.96, hspace=0.2, wspace=0) print 'Plot active time for station 1...' ax1 = fig.add_subplot(511) ax1.bar(dates,st1,width=0.01,edgecolor='none',color='green') ax1.tick_params(direction='in') ax1.set_ylabel('Station 1') ax1.xaxis_date() plt.yticks([]) ax1.grid() print 'Plot active time for station 2...' ax = fig.add_subplot(512,sharex=ax1,sharey=ax1) ax.bar(dates,st2,width=0.01,edgecolor='none',color='green') ax.tick_params(direction='in') ax.set_ylabel('Station 2') ax.xaxis_date() plt.yticks([]) ax.grid() print 'Plot active time for station 3...' ax = fig.add_subplot(513,sharex=ax1,sharey=ax1) ax.bar(dates,st3,width=0.01,edgecolor='none',color='green') ax.tick_params(direction='in') ax.set_ylabel('Station 3') ax.xaxis_date() plt.yticks([]) ax.grid() print 'Plot active time for station 4...' ax = fig.add_subplot(514,sharex=ax1,sharey=ax1) ax.bar(dates,st4,width=0.01,edgecolor='none',color='green') ax.tick_params(direction='in') ax.set_ylabel('Station 4') ax.xaxis_date() plt.yticks([]) ax.grid() print 'Plot active time for station 0...' ax = fig.add_subplot(515,sharex=ax1,sharey=ax1) ax.bar(dates,st0,width=0.01,edgecolor='none',color='green') ax.tick_params(direction='in') ax.set_ylabel('Station 0') ax.xaxis_date() plt.yticks([]) ax.grid() ax.set_xlabel(r'Hourly activity in %s %i (UTC)'%(d0.strftime("%B"),y0)) ax1.xaxis.set_major_formatter(md.DateFormatter('%d')) ax1.xaxis.set_major_locator(md.DayLocator()) ax1.set_xlim(d0,d1) ax1.set_ylim(0,1) plt.savefig('%i-%02i.pdf'%(y0,m0),dpi=80)
StarcoderdataPython
5081430
class Message(object): def __init__(self, processor, *args): self.content = args self.processor = processor self.keywords = (processor.logger._ident, processor.name) def strcontent(self): return " ".join(map(str, self.content)) def strprefix(self): return '[%s] ' % ":".join(map(str, self.keywords)) def __str__(self): return self.strprefix() + self.strcontent() class Processor(object): def __init__(self, logger, name, consume): self.logger = logger self.name = name self.consume = consume def __call__(self, *args): try: consume = self.logger._override except AttributeError: consume = self.consume if consume is not None: msg = Message(self, *args) consume(msg) class Logger(object): _key2logger = {} def __init__(self, ident): self._ident = ident self._key2logger[ident] = self self._keywords = () def set_sub(self, **kwargs): for name, value in kwargs.items(): self._setsub(name, value) def ensure_sub(self, **kwargs): for name, value in kwargs.items(): if not hasattr(self, name): self._setsub(name, value) def set_override(self, consumer): self._override = lambda msg: consumer(msg) def del_override(self): try: del self._override except AttributeError: pass def _setsub(self, name, dest): assert "_" not in name setattr(self, name, Processor(self, name, dest)) def get(ident="global", **kwargs): """ return the Logger with id 'ident', instantiating if appropriate """ try: log = Logger._key2logger[ident] except KeyError: log = Logger(ident) log.ensure_sub(**kwargs) return log
StarcoderdataPython
8076955
<filename>byterun/pyobj.py<gh_stars>10-100 """Implementations of Python fundamental objects for Byterun.""" # TODO(ampere): Add doc strings and remove this. # pylint: disable=missing-docstring import collections import inspect import types import six PY3, PY2 = six.PY3, not six.PY3 def make_cell(value): # Thanks to <NAME> for help with this bit of twistiness. # Construct an actual cell object by creating a closure right here, # and grabbing the cell object out of the function we create. fn = (lambda x: lambda: x)(value) if PY3: return fn.__closure__[0] else: return fn.func_closure[0] class Function(object): __slots__ = [ 'func_code', 'func_name', 'func_defaults', 'func_globals', 'func_locals', 'func_dict', 'func_closure', '__name__', '__dict__', '__doc__', '_vm', '_func', ] CO_OPTIMIZED = 0x0001 CO_NEWLOCALS = 0x0002 CO_VARARGS = 0x0004 CO_VARKEYWORDS = 0x0008 CO_NESTED = 0x0010 CO_GENERATOR = 0x0020 CO_NOFREE = 0x0040 CO_FUTURE_DIVISION = 0x2000 CO_FUTURE_ABSOLUTE_IMPORT = 0x4000 CO_FUTURE_WITH_STATEMENT = 0x8000 CO_FUTURE_PRINT_FUNCTION = 0x10000 CO_FUTURE_UNICODE_LITERALS = 0x20000 def __init__(self, name, code, globs, defaults, closure, vm): self._vm = vm self.func_code = code self.func_name = self.__name__ = name or code.co_name self.func_defaults = tuple(defaults) self.func_globals = globs self.func_locals = self._vm.frame.f_locals self.__dict__ = {} self.func_closure = closure self.__doc__ = code.co_consts[0] if code.co_consts else None # Sometimes, we need a real Python function. This is for that. kw = { 'argdefs': self.func_defaults, } if closure: kw['closure'] = tuple(make_cell(0) for _ in closure) self._func = types.FunctionType(code, globs, **kw) def __repr__(self): # pragma: no cover return '<Function %s at 0x%08x>' % ( self.func_name, id(self) ) def __get__(self, instance, owner): if instance is not None: return Method(instance, owner, self) if PY2: return Method(None, owner, self) else: return self def __call__(self, *args, **kwargs): if PY2 and self.func_name in ['<setcomp>', '<dictcomp>', '<genexpr>']: # D'oh! http://bugs.python.org/issue19611 Py2 doesn't know how to # inspect set comprehensions, dict comprehensions, or generator # expressions properly. They are always functions of one argument, # so just do the right thing. assert len(args) == 1 and not kwargs, 'Surprising comprehension!' callargs = {'.0': args[0]} else: callargs = inspect.getcallargs(self._func, *args, **kwargs) frame = self._vm.make_frame( self.func_code, callargs, self.func_globals, self.func_locals ) if self.func_code.co_flags & self.CO_GENERATOR: gen = Generator(frame, self._vm) frame.generator = gen retval = gen else: retval = self._vm.run_frame(frame) return retval class Class(object): """ The VM level mirror of python class type objects. """ def __init__(self, name, bases, methods, vm): self._vm = vm self.__name__ = name self.__bases__ = bases self.__mro__ = self._compute_mro(self) self.locals = dict(methods) self.locals['__name__'] = self.__name__ self.locals['__mro__'] = self.__mro__ self.locals['__bases__'] = self.__bases__ @classmethod def mro_merge(cls, seqs): """ Merge a sequence of MROs into a single resulting MRO. This code is copied from the following URL with print statments removed. https://www.python.org/download/releases/2.3/mro/ """ res = [] while True: nonemptyseqs = [seq for seq in seqs if seq] if not nonemptyseqs: return res for seq in nonemptyseqs: # find merge candidates among seq heads cand = seq[0] nothead = [s for s in nonemptyseqs if cand in s[1:]] if nothead: cand = None # reject candidate else: break if not cand: raise TypeError("Illegal inheritance.") res.append(cand) for seq in nonemptyseqs: # remove candidate if seq[0] == cand: del seq[0] @classmethod def _compute_mro(cls, c): """ Compute the class precedence list (mro) according to C3. This code is copied from the following URL with print statments removed. https://www.python.org/download/releases/2.3/mro/ """ return tuple(cls.mro_merge([[c]] + [list(base.__mro__) for base in c.__bases__] + [list(c.__bases__)])) def __call__(self, *args, **kw): return self._vm.make_instance(self, args, kw) def __repr__(self): # pragma: no cover return '<Class %s at 0x%08x>' % (self.__name__, id(self)) def resolve_attr(self, name): """ Find an attribute in self and return it raw. This does not handle properties or method wrapping. """ for base in self.__mro__: # The following code does a double lookup on the dict, however # measurements show that this is faster than either a special # sentinel value or catching KeyError. # Handle both VM classes and python host environment classes. if isinstance(base, Class): if name in base.locals: return base.locals[name] else: if name in base.__dict__: # Avoid using getattr so we can handle method wrapping return base.__dict__[name] raise AttributeError( "%r class has no attribute %r" % (self.__name__, name) ) def __getattr__(self, name): val = self.resolve_attr(name) # Check if we have a descriptor get = getattr(val, '__get__', None) if get: return get(None, self) # Not a descriptor, return the value. return val class Object(object): def __init__(self, _class, args, kw): # pylint: disable=protected-access self._vm = _class._vm self._class = _class self.locals = {} if '__init__' in _class.locals: _class.locals['__init__'](self, *args, **kw) def __repr__(self): # pragma: no cover return '<%s Instance at 0x%08x>' % (self._class.__name__, id(self)) def __getattr__(self, name): if name in self.locals: val = self.locals[name] else: try: val = self._class.resolve_attr(name) except AttributeError: raise AttributeError( "%r object has no attribute %r" % (self._class.__name__, name) ) # Check if we have a descriptor get = getattr(val, '__get__', None) if get: return get(self, self._class) # Not a descriptor, return the value. return val # TODO(ampere): Does this need a __setattr__ and __delattr__ implementation? class Method(object): def __init__(self, obj, _class, func): self.im_self = obj self.im_class = _class self.im_func = func def __repr__(self): # pragma: no cover name = "%s.%s" % (self.im_class.__name__, self.im_func.func_name) if self.im_self is not None: return '<Bound Method %s of %s>' % (name, self.im_self) else: return '<Unbound Method %s>' % (name,) def __call__(self, *args, **kwargs): if self.im_self is not None: return self.im_func(self.im_self, *args, **kwargs) else: return self.im_func(*args, **kwargs) class Cell(object): """A fake cell for closures. Closures keep names in scope by storing them not in a frame, but in a separate object called a cell. Frames share references to cells, and the LOAD_DEREF and STORE_DEREF opcodes get and set the value from cells. This class acts as a cell, though it has to jump through two hoops to make the simulation complete: 1. In order to create actual FunctionType functions, we have to have actual cell objects, which are difficult to make. See the twisty double-lambda in __init__. 2. Actual cell objects can't be modified, so to implement STORE_DEREF, we store a one-element list in our cell, and then use [0] as the actual value. """ def __init__(self, value): self.contents = value def get(self): return self.contents def set(self, value): self.contents = value Block = collections.namedtuple("Block", "type, handler, level") class Frame(object): """ An interpreter frame. This contains the local value and block stacks and the associated code and pointer. The most complex usage is with generators in which a frame is stored and then repeatedly reactivated. Other than that frames are created executed and then discarded. Attributes: f_code: The code object this frame is executing. f_globals: The globals dict used for global name resolution. f_locals: Similar for locals. f_builtins: Similar for builtins. f_back: The frame above self on the stack. f_lineno: The first line number of the code object. f_lasti: The instruction pointer. Despite its name (which matches actual python frames) this points to the next instruction that will be executed. block_stack: A stack of blocks used to manage exceptions, loops, and "with"s. data_stack: The value stack that is used for instruction operands. generator: None or a Generator object if this frame is a generator frame. """ def __init__(self, f_code, f_globals, f_locals, f_back): self.f_code = f_code self.f_globals = f_globals self.f_locals = f_locals self.f_back = f_back if f_back: self.f_builtins = f_back.f_builtins else: self.f_builtins = f_locals['__builtins__'] if hasattr(self.f_builtins, '__dict__'): self.f_builtins = self.f_builtins.__dict__ self.f_lineno = f_code.co_firstlineno self.f_lasti = 0 self.cells = {} if f_code.co_cellvars: if not f_back.cells: f_back.cells = {} for var in f_code.co_cellvars: # Make a cell for the variable in our locals, or None. cell = Cell(self.f_locals.get(var)) f_back.cells[var] = self.cells[var] = cell if f_code.co_freevars: if not self.cells: self.cells = {} for var in f_code.co_freevars: assert self.cells is not None assert f_back.cells, "f_back.cells: %r" % (f_back.cells,) self.cells[var] = f_back.cells[var] # The stack holding exception and generator handling information self.block_stack = [] # The stack holding input and output of bytecode instructions self.data_stack = [] self.generator = None def push(self, *vals): """Push values onto the value stack.""" self.data_stack.extend(vals) def __repr__(self): # pragma: no cover return '<Frame at 0x%08x: %r @ %d>' % ( id(self), self.f_code.co_filename, self.f_lineno ) def line_number(self): """Get the current line number the frame is executing.""" # We don't keep f_lineno up to date, so calculate it based on the # instruction address and the line number table. lnotab = self.f_code.co_lnotab byte_increments = six.iterbytes(lnotab[0::2]) line_increments = six.iterbytes(lnotab[1::2]) byte_num = 0 line_num = self.f_code.co_firstlineno for byte_incr, line_incr in zip(byte_increments, line_increments): byte_num += byte_incr if byte_num > self.f_lasti: break line_num += line_incr return line_num class Generator(object): def __init__(self, g_frame, vm): self.gi_frame = g_frame self.vm = vm self.first = True self.finished = False def __iter__(self): return self def next(self): if self.finished: raise StopIteration # Ordinary iteration is like sending None into a generator. # Push the value onto the frame stack. if not self.first: self.gi_frame.push(None) self.first = False # To get the next value from an iterator, push its frame onto the # stack, and let it run. val = self.vm.resume_frame(self.gi_frame) if self.finished: raise StopIteration return val __next__ = next
StarcoderdataPython
9621601
""" This file sets default constants that are used throughout the package. Objects set as defaults are set in classes (so that we don't have to import anything here). Mostly I did this so I could easily reuse defaults and change them to match my data structure. Don't look at me like that. """ """Default Environmental Variable Lookup""" # This is a dict, keyed by the class setattr variable name, of tuples (env name, coercion function, default value) SBATCH_VARS = dict(output_dir=('RUNDIR', str, None), input_dir=('DATADIR', str, None), rank=('SLURM_PROCID', int, 0), cores=('SLURM_NTASKS_PER_NODE', int, 1), tasks=('SLURM_NTASKS', int, 1), node=('SLURM_NODEID', int, 0), num_nodes=('SLURM_JOB_NUM_NODES', int, 1)) SBATCH_VARS_FOR_KVS = ["rank", "cores", "tasks", "node", "num_nodes"] SBATCH_VARS_FOR_WORKFLOW = ["output_dir", "input_dir"] """Default Data File Settings""" DEFAULT_PD_INPUT_SETTINGS = dict(sep="\t") DEFAULT_EXPRESSION_FILE = "expression.tsv" DEFAULT_TFNAMES_FILE = "tf_names.tsv" DEFAULT_METADATA_FILE = "meta_data.tsv" DEFAULT_PRIORS_FILE = "gold_standard.tsv" DEFAULT_GOLDSTANDARD_FILE = "gold_standard.tsv" """Default TFAWorkflow Parameters""" DEFAULT_DELTMIN = 0 DEFAULT_DELTMAX = 120 DEFAULT_TAU = 45 DEFAULT_GS_FILTER_METHOD = 'keep_all_gold_standard' """Defaults For Regression""" # Default number of predictors to include in the model DEFAULT_nS = 10 # Default weight for priors & Non-priors # If prior_weight is the same as no_prior_weight: # Priors will be included in the pp matrix before the number of predictors is reduced to nS # They won't get special treatment in the model though DEFAULT_prior_weight = 1 DEFAULT_no_prior_weight = 1 # Throw away the priors which have a CLR that is 0 before the number of predictors is reduced by BIC DEFAULT_filter_priors_for_clr = False
StarcoderdataPython
12847996
import sys import time import threading import platform import subprocess import os try: if platform.system() == 'Windows': import win32console # TODO: we should win32console anyway so we could just omit colorama import colorama colorama.init() except ModuleNotFoundError: print("Could not init terminal features.") sys.stdout.flush() pass def get_serial_number_str(device): if hasattr(device, 'serial_number'): return format(device.serial_number, 'x').upper() else: return "[unknown serial number]" ## Threading utils ## class Event(): """ Alternative to threading.Event(), enhanced by the subscribe() function that the original fails to provide. @param Trigger: if supplied, the newly created event will be triggered as soon as the trigger event becomes set """ def __init__(self, trigger=None): self._evt = threading.Event() self._subscribers = [] self._mutex = threading.Lock() if not trigger is None: trigger.subscribe(lambda: self.set()) def is_set(self): return self._evt.is_set() def set(self): """ Sets the event and invokes all subscribers if the event was not already set """ self._mutex.acquire() try: if not self._evt.is_set(): self._evt.set() for s in self._subscribers: s() finally: self._mutex.release() def subscribe(self, handler): """ Invokes the specified handler exactly once as soon as the specified event is set. If the event is already set, the handler is invoked immediately. Returns a function that can be invoked to unsubscribe. """ if handler is None: raise TypeError self._mutex.acquire() try: self._subscribers.append(handler) if self._evt.is_set(): handler() finally: self._mutex.release() return handler def unsubscribe(self, handler): self._mutex.acquire() try: self._subscribers.pop(self._subscribers.index(handler)) finally: self._mutex.release() def wait(self, timeout=None): if not self._evt.wait(timeout=timeout): raise TimeoutError() def trigger_after(self, timeout): """ Triggers the event after the specified timeout. This function returns immediately. """ def delayed_trigger(): if not self.wait(timeout=timeout): self.set() threading.Thread(target=delayed_trigger, daemon=True).start() def wait_any(timeout=None, *events): """ Blocks until any of the specified events are triggered. Returns the index of the event that was triggerd or raises a TimeoutError Param timeout: A timeout in seconds """ or_event = threading.Event() subscriptions = [] for event in events: subscriptions.append((event, event.subscribe(lambda: or_event.set()))) or_event.wait(timeout=timeout) for event, sub in subscriptions: event.unsubscribe(sub) for i in range(len(events)): if events[i].is_set(): return i raise TimeoutError() ## Log utils ## class Logger(): """ Logs messages to stdout """ COLOR_DEFAULT = 0 COLOR_GREEN = 1 COLOR_CYAN = 2 COLOR_YELLOW = 3 COLOR_RED = 4 _VT100Colors = { COLOR_GREEN: '\x1b[92;1m', COLOR_CYAN: '\x1b[96;1m', COLOR_YELLOW: '\x1b[93;1m', COLOR_RED: '\x1b[91;1m', COLOR_DEFAULT: '\x1b[0m' } _Win32Colors = { COLOR_GREEN: 0x0A, COLOR_CYAN: 0x0B, COLOR_YELLOW: 0x0E, COLOR_RED: 0x0C, COLOR_DEFAULT: 0x07 } def __init__(self, verbose=True): self._prefix = '' self._skip_bottom_line = False # If true, messages are printed one line above the cursor self._verbose = verbose self._print_lock = threading.Lock() if platform.system() == 'Windows': self._stdout_buf = win32console.GetStdHandle(win32console.STD_OUTPUT_HANDLE) def indent(self, prefix=' '): indented_logger = Logger() indented_logger._prefix = self._prefix + prefix return indented_logger def print_on_second_last_line(self, text, color): """ Prints a text on the second last line. This can be used to print a message above the command prompt. If the command prompt spans multiple lines there will be glitches. If the printed text spans multiple lines there will also be glitches (though this could be fixed). """ if platform.system() == 'Windows': # Windows <10 doesn't understand VT100 escape codes and the colorama # also doesn't support the specific escape codes we need so we use the # native Win32 API. info = self._stdout_buf.GetConsoleScreenBufferInfo() cursor_pos = info['CursorPosition'] scroll_rect=win32console.PySMALL_RECTType( Left=0, Top=1, Right=info['Window'].Right, Bottom=cursor_pos.Y-1) scroll_dest = win32console.PyCOORDType(scroll_rect.Left, scroll_rect.Top-1) self._stdout_buf.ScrollConsoleScreenBuffer( scroll_rect, scroll_rect, scroll_dest, # clipping rect is same as scroll rect u' ', Logger._Win32Colors[color]) # fill with empty cells with the desired color attributes line_start = win32console.PyCOORDType(0, cursor_pos.Y-1) self._stdout_buf.WriteConsoleOutputCharacter(text, line_start) else: # Assume we're in a terminal that interprets VT100 escape codes. # TODO: test on macOS # Escape character sequence: # ESC 7: store cursor position # ESC 1A: move cursor up by one # ESC 1S: scroll entire viewport by one # ESC 1L: insert 1 line at cursor position # (print text) # ESC 8: restore old cursor position self._print_lock.acquire() sys.stdout.write('\x1b7\x1b[1A\x1b[1S\x1b[1L') sys.stdout.write(Logger._VT100Colors[color] + text + Logger._VT100Colors[Logger.COLOR_DEFAULT]) sys.stdout.write('\x1b8') sys.stdout.flush() self._print_lock.release() def print_colored(self, text, color): if self._skip_bottom_line: self.print_on_second_last_line(text, color) else: # On Windows, colorama does the job of interpreting the VT100 escape sequences self._print_lock.acquire() sys.stdout.write(Logger._VT100Colors[color] + text + Logger._VT100Colors[Logger.COLOR_DEFAULT] + '\n') sys.stdout.flush() self._print_lock.release() def debug(self, text): if self._verbose: self.print_colored(self._prefix + text, Logger.COLOR_DEFAULT) def success(self, text): self.print_colored(self._prefix + text, Logger.COLOR_GREEN) def info(self, text): self.print_colored(self._prefix + text, Logger.COLOR_DEFAULT) def notify(self, text): self.print_colored(self._prefix + text, Logger.COLOR_CYAN) def warn(self, text): self.print_colored(self._prefix + text, Logger.COLOR_YELLOW) def error(self, text): # TODO: write to stderr self.print_colored(self._prefix + text, Logger.COLOR_RED)
StarcoderdataPython
1997937
<gh_stars>0 import logging import time import uuid from pydoc import locate from minifi.core.InputPort import InputPort from minifi.core.DockerTestCluster import DockerTestCluster from minifi.core.DockerTestDirectoryBindings import DockerTestDirectoryBindings from minifi.validators.EmptyFilesOutPutValidator import EmptyFilesOutPutValidator from minifi.validators.NoFileOutPutValidator import NoFileOutPutValidator from minifi.validators.SingleFileOutputValidator import SingleFileOutputValidator from minifi.validators.MultiFileOutputValidator import MultiFileOutputValidator from minifi.validators.SingleOrMultiFileOutputValidator import SingleOrMultiFileOutputValidator from minifi.validators.NoContentCheckFileNumberValidator import NoContentCheckFileNumberValidator from minifi.validators.NumFileRangeValidator import NumFileRangeValidator from minifi.validators.SingleJSONFileOutputValidator import SingleJSONFileOutputValidator class MiNiFi_integration_test(): def __init__(self, image_store): self.test_id = str(uuid.uuid4()) self.cluster = DockerTestCluster(image_store) self.connectable_nodes = [] # Remote process groups are not connectables self.remote_process_groups = [] self.file_system_observer = None self.docker_directory_bindings = DockerTestDirectoryBindings() self.docker_directory_bindings.create_new_data_directories(self.test_id) self.cluster.set_directory_bindings(self.docker_directory_bindings.get_directory_bindings(self.test_id), self.docker_directory_bindings.get_data_directories(self.test_id)) def __del__(self): self.cleanup() def cleanup(self): self.cluster.cleanup() def docker_path_to_local_path(self, docker_path): return self.docker_directory_bindings.docker_path_to_local_path(self.test_id, docker_path) def acquire_container(self, name, engine='minifi-cpp'): return self.cluster.acquire_container(name, engine) def wait_for_container_startup_to_finish(self, container_name): startup_success = self.cluster.wait_for_startup_log(container_name, 120) if not startup_success: logging.error("Cluster startup failed for %s", container_name) self.cluster.log_app_output() return startup_success def start_kafka_broker(self): self.cluster.acquire_container('kafka-broker', 'kafka-broker') self.cluster.deploy('zookeeper') self.cluster.deploy('kafka-broker') assert self.wait_for_container_startup_to_finish('kafka-broker') def start(self): logging.info("MiNiFi_integration_test start") self.cluster.deploy_flow() for container_name in self.cluster.containers: assert self.wait_for_container_startup_to_finish(container_name) def add_node(self, processor): if processor.get_name() in (elem.get_name() for elem in self.connectable_nodes): raise Exception("Trying to register processor with an already registered name: \"%s\"" % processor.get_name()) self.connectable_nodes.append(processor) def get_or_create_node_by_name(self, node_name): node = self.get_node_by_name(node_name) if node is None: if node_name == "RemoteProcessGroup": raise Exception("Trying to register RemoteProcessGroup without an input port or address.") node = locate("minifi.processors." + node_name + "." + node_name)() node.set_name(node_name) self.add_node(node) return node def get_node_by_name(self, name): for node in self.connectable_nodes: if name == node.get_name(): return node raise Exception("Trying to fetch unknown node: \"%s\"" % name) def add_remote_process_group(self, remote_process_group): if remote_process_group.get_name() in (elem.get_name() for elem in self.remote_process_groups): raise Exception("Trying to register remote_process_group with an already registered name: \"%s\"" % remote_process_group.get_name()) self.remote_process_groups.append(remote_process_group) def get_remote_process_group_by_name(self, name): for node in self.remote_process_groups: if name == node.get_name(): return node raise Exception("Trying to fetch unknow node: \"%s\"" % name) @staticmethod def generate_input_port_for_remote_process_group(remote_process_group, name): input_port_node = InputPort(name, remote_process_group) # Generate an MD5 hash unique to the remote process group id input_port_node.set_uuid(uuid.uuid3(remote_process_group.get_uuid(), "input_port")) return input_port_node def add_test_data(self, path, test_data, file_name=str(uuid.uuid4())): self.docker_directory_bindings.put_file_to_docker_path(self.test_id, path, file_name, test_data.encode('utf-8')) def put_test_resource(self, file_name, contents): self.docker_directory_bindings.put_test_resource(self.test_id, file_name, contents) def rm_out_child(self): self.docker_directory_bindings.rm_out_child(self.test_id) def add_file_system_observer(self, file_system_observer): self.file_system_observer = file_system_observer def check_for_no_files_generated(self, timeout_seconds): output_validator = NoFileOutPutValidator() output_validator.set_output_dir(self.file_system_observer.get_output_dir()) self.check_output(timeout_seconds, output_validator, 1) def check_for_single_file_with_content_generated(self, content, timeout_seconds): output_validator = SingleFileOutputValidator(content) output_validator.set_output_dir(self.file_system_observer.get_output_dir()) self.check_output(timeout_seconds, output_validator, 1) def check_for_single_json_file_with_content_generated(self, content, timeout_seconds): output_validator = SingleJSONFileOutputValidator(content) output_validator.set_output_dir(self.file_system_observer.get_output_dir()) self.check_output(timeout_seconds, output_validator, 1) def check_for_multiple_files_generated(self, file_count, timeout_seconds, expected_content=[]): output_validator = MultiFileOutputValidator(file_count, expected_content) output_validator.set_output_dir(self.file_system_observer.get_output_dir()) self.check_output(timeout_seconds, output_validator, file_count) def check_for_at_least_one_file_with_content_generated(self, content, timeout_seconds): output_validator = SingleOrMultiFileOutputValidator(content) output_validator.set_output_dir(self.file_system_observer.get_output_dir()) self.check_output(timeout_seconds, output_validator, 1) def check_for_num_files_generated(self, num_flowfiles, timeout_seconds): output_validator = NoContentCheckFileNumberValidator(num_flowfiles) output_validator.set_output_dir(self.file_system_observer.get_output_dir()) self.check_output(timeout_seconds, output_validator, max(1, num_flowfiles)) def check_for_num_file_range_generated(self, min_files, max_files, timeout_seconds): output_validator = NumFileRangeValidator(min_files, max_files) output_validator.set_output_dir(self.file_system_observer.get_output_dir()) self.check_output_force_wait(timeout_seconds, output_validator) def check_for_an_empty_file_generated(self, timeout_seconds): output_validator = EmptyFilesOutPutValidator() output_validator.set_output_dir(self.file_system_observer.get_output_dir()) self.check_output(timeout_seconds, output_validator, 1) def check_output_force_wait(self, timeout_seconds, output_validator): time.sleep(timeout_seconds) self.validate(output_validator) def check_output(self, timeout_seconds, output_validator, max_files): self.file_system_observer.wait_for_output(timeout_seconds, max_files) self.validate(output_validator) def validate(self, validator): self.cluster.log_app_output() assert not self.cluster.segfault_happened() assert validator.validate() def check_s3_server_object_data(self, s3_container_name, object_data): assert self.cluster.check_s3_server_object_data(s3_container_name, object_data) def check_s3_server_object_metadata(self, s3_container_name, content_type): assert self.cluster.check_s3_server_object_metadata(s3_container_name, content_type) def check_empty_s3_bucket(self, s3_container_name): assert self.cluster.is_s3_bucket_empty(s3_container_name) def check_http_proxy_access(self, http_proxy_container_name, url): assert self.cluster.check_http_proxy_access(http_proxy_container_name, url) def check_azure_storage_server_data(self, azure_container_name, object_data): assert self.cluster.check_azure_storage_server_data(azure_container_name, object_data) def wait_for_kafka_consumer_to_be_registered(self, kafka_container_name): assert self.cluster.wait_for_kafka_consumer_to_be_registered(kafka_container_name) def check_minifi_log_contents(self, line, timeout_seconds=60): for container in self.cluster.containers.values(): if container.get_engine() == "minifi-cpp": line_found = self.cluster.wait_for_app_logs(container.get_name(), line, timeout_seconds) if line_found: return assert False def check_query_results(self, postgresql_container_name, query, number_of_rows, timeout_seconds): assert self.cluster.check_query_results(postgresql_container_name, query, number_of_rows, timeout_seconds)
StarcoderdataPython
6440726
<filename>test/test_retinanet.py import unittest import torch import numpy as np from mitorch.models import * class TestRetinaNet(unittest.TestCase): def test_mobilenetv2(self): self._test_model(MobileNetV2, 320) def test_mobilenetv3(self): self._test_model(MobileNetV3, 320) def test_mobilenetv3small(self): self._test_model(MobileNetV3Small, 320) def _test_model(self, model_class, input_size): model = RetinaNet(FeaturePyramidNetwork(model_class()), 3) model.eval() inputs = torch.tensor(np.random.rand(1, 3, input_size, input_size), dtype=torch.float32) outputs = model(inputs) self.assertIsNotNone(outputs) predictions = model.predictor(outputs) loss = model.loss(outputs, [[]]) self.assertIsNotNone(predictions) self.assertIsNotNone(loss) if __name__ == '__main__': unittest.main()
StarcoderdataPython
4949444
<gh_stars>1-10 import ghalton import numpy as np from src.autoks.distance.sampling.sampler import Sampler from src.autoks.distance.sampling.scramble import scramble_array def generate_halton(n: int, d: int): sequencer = ghalton.Halton(d) return sequencer.get(n) def generate_generalized_halton(n: int, d: int): sequencer = ghalton.GeneralizedHalton(ghalton.EA_PERMS[:d]) return sequencer.get(n) def halton_sample(n_samples: int, n_dims: int, scramble: bool = True): samples = generate_halton(n_samples, n_dims) # Scramble if scramble: scramble_array(np.asarray(samples)) return samples def generalized_halton_sample(n_samples: int, n_dims: int): max_dims = 100 if n_dims > max_dims: raise ValueError( f'{ghalton.GeneralizedHalton.__class__.__name__} supports up to {max_dims} spatial dimensions.') # Generate samples samples = generate_generalized_halton(n_samples, n_dims) return samples class HaltonSampler(Sampler): """Halton sequence sampler.""" def sample(self, n_points: int, n_dims: int) -> np.ndarray: return np.asarray(halton_sample(n_points, n_dims)) class GeneralizedHaltonSampler(Sampler): """Generalized Halton sequence sampler""" def sample(self, n_points: int, n_dims: int) -> np.ndarray: return np.asarray(generalized_halton_sample(n_points, n_dims))
StarcoderdataPython
3294314
from __future__ import division from __future__ import print_function import datetime import json import logging import os import pickle import time import numpy as np import optimizers import torch from config import parser from models.base_models import NCModel, LPModel from utils.data_utils import load_data from utils.train_utils import get_dir_name, format_metrics import torch.cuda.profiler as profiler def test(args): np.random.seed(args.seed) torch.manual_seed(args.seed) if int(args.double_precision): torch.set_default_dtype(torch.float64) if int(args.cuda) >= 0: torch.cuda.manual_seed(args.seed) args.device = 'cuda:' + str(args.cuda) if int(args.cuda) >= 0 else 'cpu' args.patience = args.epochs if not args.patience else int(args.patience) logging.getLogger().setLevel(logging.INFO) if args.save: if not args.save_dir: dt = datetime.datetime.now() date = f"{dt.year}_{dt.month}_{dt.day}" models_dir = os.path.join(os.environ['LOG_DIR'], args.task, date) save_dir = get_dir_name(models_dir) else: save_dir = args.save_dir logging.basicConfig(level=logging.INFO, handlers=[ logging.FileHandler(os.path.join(save_dir, 'log.txt')), logging.StreamHandler() ]) logging.info(f'Using: {args.device}') logging.info("Using seed {}.".format(args.seed)) # Load data data = load_data(args, os.path.join(os.environ['DATAPATH'], args.dataset)) args.n_nodes, args.feat_dim = data['features'].shape if args.task == 'nc': Model = NCModel args.n_classes = int(data['labels'].max() + 1) logging.info(f'Num classes: {args.n_classes}') else: args.nb_false_edges = len(data['train_edges_false']) args.nb_edges = len(data['train_edges']) if args.task == 'lp': Model = LPModel else: Model = RECModel # No validation for reconstruction task args.eval_freq = args.epochs + 1 if not args.lr_reduce_freq: args.lr_reduce_freq = args.epochs # Model and optimizer model = Model(args) checkpoint_path="hgcn_chkpt/model.pth" model.load_state_dict(torch.load(checkpoint_path)) logging.info(str(model)) optimizer = getattr(optimizers, args.optimizer)(params=model.parameters(), lr=args.lr, weight_decay=args.weight_decay) lr_scheduler = torch.optim.lr_scheduler.StepLR( optimizer, step_size=int(args.lr_reduce_freq), gamma=float(args.gamma) ) tot_params = sum([np.prod(p.size()) for p in model.parameters()]) logging.info(f"Total number of parameters: {tot_params}") if args.cuda is not None and int(args.cuda) >= 0 : os.environ['CUDA_VISIBLE_DEVICES'] = str(args.cuda) model = model.to(args.device) for x, val in data.items(): if torch.is_tensor(data[x]): data[x] = data[x].to(args.device) if len(args.time_file) == 0: model.eval() # set evaluation mode embeddings = model.encode(data['features'], data['adj_train_norm']) val_metrics = model.compute_metrics(embeddings, data, 'val') else: n_warmup = 50 n_sample = 50 model.eval() # set evaluation mode print("=== Running Warmup Passes") for i in range(0,n_warmup): embeddings = model.encode(data['features'], data['adj_train_norm']) val_metrics = model.compute_metrics(embeddings, data, 'val') print("=== Collecting Runtime over ", str(n_sample), " Passes") tic = time.perf_counter() for i in range(0,n_sample): embeddings = model.encode(data['features'], data['adj_train_norm']) val_metrics = model.compute_metrics(embeddings, data, 'val') toc = time.perf_counter() avg_runtime = float(toc - tic)/n_sample print("average runtime = ", avg_runtime) # write runtime to file f = open(args.time_file, "w") f.write(str(avg_runtime)+"\n") f.close() if __name__ == '__main__': parser.add_argument('--time_file', type=str, default='', help='timing output file') args = parser.parse_args() profiler.start() test(args) profiler.stop()
StarcoderdataPython
3310925
# -*- coding: utf-8 -*- # Copyright (c) 2020 Nekokatt # Copyright (c) 2021 davfsa # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import builtins import operator import mock import pytest from hikari.internal import enums class TestEnum: @mock.patch.object(enums, "_Enum", new=NotImplemented) def test_init_first_enum_type_populates_Enum(self): class Enum(metaclass=enums._EnumMeta): pass assert enums._Enum is Enum @mock.patch.object(enums, "_Enum", new=NotImplemented) def test_init_first_enum_type_with_wrong_name_and_no_bases_raises_TypeError(self): with pytest.raises(TypeError): class Potato(metaclass=enums._EnumMeta): pass assert enums._Enum is NotImplemented def test_init_second_enum_type_with_no_bases_does_not_change_Enum_attribute_and_raises_TypeError(self): expect = enums._Enum with pytest.raises(TypeError): class Enum(metaclass=enums._EnumMeta): pass assert enums._Enum is expect @pytest.mark.parametrize( ("args", "kwargs"), [([str], {"metaclass": enums._EnumMeta}), ([enums.Enum], {"metaclass": enums._EnumMeta}), ([enums.Enum], {})], ) def test_init_enum_type_with_one_base_is_TypeError(self, args, kwargs): with pytest.raises(TypeError): class Enum(*args, **kwargs): pass @pytest.mark.parametrize( ("args", "kwargs"), [ ([enums.Enum, str], {"metaclass": enums._EnumMeta}), ([enums.Enum, str], {}), ], ) def test_init_enum_type_with_bases_in_wrong_order_is_TypeError(self, args, kwargs): with pytest.raises(TypeError): class Enum(*args, **kwargs): pass def test_init_with_more_than_2_types(self): with pytest.raises(TypeError): class Enum(enums.Enum, str, int): pass def test_init_with_less_than_2_types(self): with pytest.raises(TypeError): class Enum(enums.Enum): pass def test_init_enum_type_default_docstring_set(self): class Enum(str, enums.Enum): pass assert Enum.__doc__ == "An enumeration." def test_init_enum_type_disallows_objects_that_are_not_instances_of_the_first_base(self): with pytest.raises(TypeError): class Enum(str, enums.Enum): foo = 1 def test_init_enum_type_allows_any_object_if_it_has_a_dunder_name(self): class Enum(str, enums.Enum): __foo__ = 1 __bar = 2 assert Enum is not None def test_init_enum_type_allows_any_object_if_it_has_a_sunder_name(self): class Enum(str, enums.Enum): _foo_ = 1 _bar = 2 assert Enum is not None def test_init_enum_type_allows_methods(self): class Enum(int, enums.Enum): def foo(self): return "foo" assert Enum.foo(12) == "foo" def test_init_enum_type_allows_classmethods(self): class Enum(int, enums.Enum): @classmethod def foo(cls): assert cls is Enum return "foo" assert Enum.foo() == "foo" def test_init_enum_type_allows_staticmethods(self): class Enum(int, enums.Enum): @staticmethod def foo(): return "foo" assert Enum.foo() == "foo" def test_init_enum_type_allows_descriptors(self): class Enum(int, enums.Enum): @property def foo(self): return "foo" assert isinstance(Enum.foo, property) def test_init_enum_type_maps_names_in_members(self): class Enum(int, enums.Enum): foo = 9 bar = 18 baz = 27 @staticmethod def sm(): pass @classmethod def cm(cls): pass def m(self): pass @property def p(self): pass __dunder__ = "aaa" _sunder_ = "bbb" __priv = "ccc" _prot = "ddd" assert Enum.__members__ == {"foo": 9, "bar": 18, "baz": 27} def test_init_with_invalid_name(self): with pytest.raises(TypeError): class Enum(int, enums.Enum): mro = 420 def test_init_with_unhashable_value(self): with mock.patch.object(builtins, "hash", side_effect=TypeError): with pytest.raises(TypeError): class Enum(int, enums.Enum): test = 420 def test_init_with_duplicate(self): with pytest.raises(TypeError): class Enum(int, enums.Enum): test = 123 test = 321 def test_call_when_member(self): class Enum(int, enums.Enum): foo = 9 bar = 18 baz = 27 returned = Enum(9) assert returned == Enum.foo assert type(returned) == Enum def test_call_when_not_member(self): class Enum(int, enums.Enum): foo = 9 bar = 18 baz = 27 returned = Enum(69) assert returned == 69 assert type(returned) != Enum def test_getitem(self): class Enum(int, enums.Enum): foo = 9 bar = 18 baz = 27 returned = Enum["foo"] assert returned == Enum.foo assert type(returned) == Enum def test_contains(self): class Enum(int, enums.Enum): foo = 9 bar = 18 baz = 27 assert 9 in Enum assert 100 not in Enum def test_name(self): class Enum(int, enums.Enum): foo = 9 bar = 18 baz = 27 assert Enum.foo.name == "foo" def test_iter(self): class Enum(int, enums.Enum): foo = 9 bar = 18 baz = 27 a = [] for i in Enum: a.append(i) assert a == [Enum.foo, Enum.bar, Enum.baz] def test_repr(self): class Enum(int, enums.Enum): foo = 9 bar = 18 baz = 27 assert repr(Enum) == "<enum Enum>" assert repr(Enum.foo) == "<Enum.foo: 9>" def test_str(self): class Enum(int, enums.Enum): foo = 9 bar = 18 baz = 27 assert str(Enum) == "<enum Enum>" assert str(Enum.foo) == "foo" def test_can_overwrite_method(self): class TestEnum1(str, enums.Enum): FOO = "foo" def __str__(self) -> str: return "Ok" assert str(TestEnum1.FOO) == "Ok" @pytest.mark.parametrize(("type_", "value"), [(int, 42), (str, "ok"), (bytes, b"no"), (float, 4.56), (complex, 3j)]) def test_inherits_type_dunder_method_behaviour(self, type_, value): class TestEnum(type_, enums.Enum): BAR = value result = type_(TestEnum.BAR) assert type(result) is type_ assert result == value def test_allows_overriding_methods(self): class TestEnum(int, enums.Enum): BAR = 2222 def __int__(self): return 53 assert int(TestEnum.BAR) == 53 class TestIntFlag: @mock.patch.object(enums, "_Flag", new=NotImplemented) def test_init_first_flag_type_populates_Flag(self): class Flag(metaclass=enums._FlagMeta): a = 1 assert enums._Flag is Flag @mock.patch.object(enums, "_Flag", new=NotImplemented) def test_init_first_flag_type_with_wrong_name_and_no_bases_raises_TypeError(self): with pytest.raises(TypeError): class Potato(metaclass=enums._FlagMeta): a = 1 assert enums._Flag is NotImplemented def test_init_second_flag_type_with_no_bases_does_not_change_Flag_attribute_and_raises_TypeError(self): expect = enums._Flag with pytest.raises(TypeError): class Flag(metaclass=enums._FlagMeta): a = 1 assert enums._Flag is expect def test_init_flag_type_default_docstring_set(self): class Flag(enums.Flag): a = 1 assert Flag.__doc__ == "An enumeration." def test_init_flag_type_disallows_objects_that_are_not_instances_int(self): with pytest.raises(TypeError): class Flag(enums.Flag): a = 1 foo = "hi" def test_init_flag_type_disallows_other_bases(self): with pytest.raises(TypeError): class Flag(float, enums.Flag): a = 1 def test_init_flag_type_allows_any_object_if_it_has_a_dunder_name(self): class Flag(enums.Flag): __foo__ = 1 __bar = 2 a = 3 assert Flag is not None def test_init_flag_type_allows_any_object_if_it_has_a_sunder_name(self): class Flag(enums.Flag): _foo_ = 1 _bar = 2 a = 3 assert Flag is not None def test_init_flag_type_allows_methods(self): class Flag(enums.Flag): A = 0x1 def foo(self): return "foo" assert Flag().foo() == "foo" def test_init_flag_type_allows_classmethods(self): class Flag(enums.Flag): A = 0x1 @classmethod def foo(cls): assert cls is Flag return "foo" assert Flag.foo() == "foo" def test_init_flag_type_allows_staticmethods(self): class Flag(enums.Flag): A = 0x1 @staticmethod def foo(): return "foo" assert Flag.foo() == "foo" def test_init_flag_type_allows_descriptors(self): class Flag(enums.Flag): A = 0x1 @property def foo(self): return "foo" assert isinstance(Flag.foo, property) def test_name_to_member_map(self): class Flag(enums.Flag): foo = 9 bar = 18 baz = 27 @staticmethod def sm(): pass @classmethod def cm(cls): pass def m(self): pass @property def p(self): pass assert Flag._name_to_member_map_["foo"].__class__ is Flag assert Flag._name_to_member_map_["foo"] is Flag.foo assert Flag._name_to_member_map_["bar"].__class__ is Flag assert Flag._name_to_member_map_["bar"] is Flag.bar assert Flag._name_to_member_map_["baz"].__class__ is Flag assert Flag._name_to_member_map_["baz"] is Flag.baz assert len(Flag._name_to_member_map_) == 3 def test_value_to_member_map(self): class Flag(enums.Flag): foo = 9 bar = 18 baz = 27 @staticmethod def sm(): pass @classmethod def cm(cls): pass def m(self): pass @property def p(self): pass assert Flag._value_to_member_map_[9].__class__ is Flag assert Flag._value_to_member_map_[9] is Flag.foo assert Flag._value_to_member_map_[18].__class__ is Flag assert Flag._value_to_member_map_[18] is Flag.bar assert Flag._value_to_member_map_[27].__class__ is Flag assert Flag._value_to_member_map_[27] is Flag.baz def test_member_names(self): class Flag(enums.Flag): foo = 9 bar = 18 baz = 27 @staticmethod def sm(): pass @classmethod def cm(cls): pass def m(self): pass @property def p(self): pass assert Flag._member_names_ == ["foo", "bar", "baz"] def test_members(self): class Flag(enums.Flag): foo = 9 bar = 18 baz = 27 @staticmethod def sm(): pass @classmethod def cm(cls): pass def m(self): pass @property def p(self): pass assert Flag.__members__["foo"].__class__ is int assert Flag.__members__["foo"] == 9 assert Flag.__members__["bar"].__class__ is int assert Flag.__members__["bar"] == 18 assert Flag.__members__["baz"].__class__ is int assert Flag.__members__["baz"] == 27 assert len(Flag.__members__) == 3 def test_call_on_existing_value(self): class Flag(enums.Flag): foo = 9 bar = 18 baz = 27 assert Flag(9) is Flag.foo assert Flag(Flag.foo) is Flag.foo def test_call_on_composite_value(self): class Flag(enums.Flag): foo = 1 bar = 2 baz = 4 assert Flag(3) is Flag.foo | Flag.bar assert Flag(Flag.foo | Flag.bar) is Flag.foo | Flag.bar def test_call_on_named_composite_value(self): class Flag(enums.Flag): foo = 1 bar = 2 baz = 3 assert Flag(3) is Flag.baz assert Flag(Flag.foo | Flag.bar) is Flag.baz def test_call_on_invalid_value(self): class Flag(enums.Flag): foo = 1 bar = 2 baz = 3 assert Flag(4) == 4 def test_cache(self): class Flag(enums.Flag): foo = 1 bar = 2 baz = 4 assert Flag._temp_members_ == {} # Cache something. Remember the dict is evaluated before the equality # so this will populate the cache. assert Flag._temp_members_ == {3: Flag.foo | Flag.bar} assert Flag._temp_members_ == {3: Flag.foo | Flag.bar, 7: Flag.foo | Flag.bar | Flag.baz} # Shouldn't mutate for existing items. assert Flag._temp_members_ == {3: Flag.foo | Flag.bar, 7: Flag.foo | Flag.bar | Flag.baz} assert Flag._temp_members_ == {3: Flag.foo | Flag.bar, 7: Flag.foo | Flag.bar | Flag.baz} def test_cache_when_temp_values_over_MAX_CACHED_MEMBERS(self): class MockDict: def __getitem__(self, key): raise KeyError def __len__(self): return enums._MAX_CACHED_MEMBERS + 1 def __setitem__(self, k, v): pass popitem = mock.Mock() class Flag(enums.Flag): foo = 1 bar = 2 baz = 3 Flag._temp_members_ = MockDict() Flag(4) Flag._temp_members_.popitem.assert_called_once_with() def test_bitwise_name(self): class Flag(enums.Flag): foo = 1 bar = 2 baz = 4 assert Flag.foo.name == "foo" def test_combined_bitwise_name(self): class Flag(enums.Flag): foo = 1 bar = 2 baz = 4 assert (Flag.foo | Flag.bar).name == "foo|bar" def test_combined_known_bitwise_name(self): class Flag(enums.Flag): foo = 1 bar = 2 baz = 3 assert (Flag.foo | Flag.bar).name == "baz" def test_combined_partially_known_name(self): class Flag(enums.Flag): doo = 1 laa = 2 dee = 3 # This is fine because it is not an identity or exact value. assert (Flag.laa | 4 | Flag.doo).name == "doo|laa|0x4" def test_combined_partially_known_combined_bitwise_name(self): class Flag(enums.Flag): foo = 1 bar = 2 baz = 3 # This is fine because it is not an identity or exact value. assert (Flag.baz | 4).name == "foo|bar|0x4" def test_unknown_name(self): class Flag(enums.Flag): foo = 1 bar = 2 baz = 3 assert Flag(4).name == "UNKNOWN 0x4" def test_value(self): class Flag(enums.Flag): foo = 1 bar = 2 baz = 3 assert Flag.foo.value.__class__ is int assert Flag.foo.value == 1 assert Flag.bar.value.__class__ is int assert Flag.bar.value == 2 assert Flag.baz.value.__class__ is int assert Flag.baz.value == 3 def test_is_instance_of_declaring_type(self): class TestFlag(enums.Flag): FOO = 0x1 BAR = 0x2 BAZ = 0x3 BORK = 0x4 QUX = 0x8 QUXX = QUX | BORK assert isinstance(TestFlag.BORK, TestFlag) assert isinstance(TestFlag.BORK, int) assert isinstance(TestFlag.QUXX, TestFlag) assert isinstance(TestFlag.QUXX, int) def test_and(self): class TestFlag(enums.Flag): FOO = 0x1 BAR = 0x2 BAZ = 0x3 BORK = 0x4 QUX = 0x8 QUXX = QUX | BORK assert TestFlag.QUXX & TestFlag.QUX == TestFlag.QUX assert TestFlag.QUXX & TestFlag.QUX == 0x8 assert TestFlag.QUXX & 0x8 == 0x8 assert isinstance(TestFlag.QUXX & TestFlag.QUX, TestFlag) assert isinstance(TestFlag.QUXX & TestFlag.QUX, int) def test_rand(self): class TestFlag(enums.Flag): FOO = 0x1 BAR = 0x2 BAZ = 0x3 BORK = 0x4 QUX = 0x8 QUXX = QUX | BORK assert 0x8 & TestFlag.QUXX == TestFlag.QUX assert 0x8 & TestFlag.QUXX == 0x8 assert isinstance(0x8 & TestFlag.QUXX, TestFlag) assert isinstance(0x8 & TestFlag.QUXX, int) def test_all_positive_case(self): class TestFlag(enums.Flag): FOO = 0x1 BAR = 0x2 BAZ = 0x3 BORK = 0x4 QUX = 0x8 val = TestFlag.BAZ | TestFlag.BORK assert val.all(TestFlag.FOO) assert val.all(TestFlag.FOO, TestFlag.BAR, TestFlag.BAZ, TestFlag.BORK) def test_all_negative_case(self): class TestFlag(enums.Flag): FOO = 0x1 BAR = 0x2 BAZ = 0x3 BORK = 0x4 QUX = 0x8 QUXX = 0x10 val = TestFlag.BAZ | TestFlag.BORK assert not val.all(TestFlag.QUX) assert not val.all(TestFlag.BAZ, TestFlag.QUX, TestFlag.QUXX) def test_any_positive_case(self): class TestFlag(enums.Flag): FOO = 0x1 BAR = 0x2 BAZ = 0x3 BORK = 0x4 QUX = 0x8 val = TestFlag.BAZ | TestFlag.BORK assert val.any(TestFlag.FOO) assert val.any(TestFlag.BAR) assert val.any(TestFlag.BAZ) assert val.any(TestFlag.BORK) # All present assert val.any(TestFlag.FOO, TestFlag.BAR, TestFlag.BAZ, TestFlag.BORK) # One present, one not assert val.any( TestFlag.FOO, TestFlag.QUX, ) def test_any_negative_case(self): class TestFlag(enums.Flag): FOO = 0x1 BAR = 0x2 BAZ = 0x3 BORK = 0x4 QUX = 0x8 QUXX = 0x10 val = TestFlag.BAZ | TestFlag.BORK assert not val.any(TestFlag.QUX) def test_bool(self): class TestFlag(enums.Flag): BLEH = 0x0 FOO = 0x1 BAR = 0x2 assert not TestFlag.BLEH assert TestFlag.FOO assert TestFlag.BAR def test_contains(self): class TestFlag(enums.Flag): BLEH = 0x1 FOO = 0x2 BAR = 0x4 BAZ = 0x8 f = TestFlag.FOO | TestFlag.BLEH | TestFlag.BAZ assert TestFlag.FOO in f assert TestFlag.BLEH in f assert TestFlag.BAZ in f assert TestFlag.BAR not in f def test_difference(self): class TestFlag(enums.Flag): A = 0x1 B = 0x2 C = 0x4 D = 0x8 E = 0x10 F = 0x20 G = 0x40 H = 0x80 a = TestFlag.A | TestFlag.B | TestFlag.D | TestFlag.F | TestFlag.G b = TestFlag.A | TestFlag.B | TestFlag.E c = 0x13 expect_asubb = TestFlag.D | TestFlag.F | TestFlag.G expect_bsuba = TestFlag.E expect_asubc = 0x68 assert a.difference(b) == expect_asubb assert b.difference(a) == expect_bsuba assert a.difference(c) == expect_asubc assert isinstance(a.difference(b), int) assert isinstance(a.difference(b), TestFlag) assert isinstance(a.difference(c), int) assert isinstance(a.difference(c), TestFlag) def test_index(self): class TestFlag(enums.Flag): OK = 0x5 FOO = 0x312 BAT = 0x3123 assert operator.index(TestFlag.OK) == 0x5 assert operator.index(TestFlag.FOO) == 0x312 assert operator.index(TestFlag.BAT) == 0x3123 def test_int(self): class TestFlag(enums.Flag): BLEH = 0x0 FOO = 0x1 BAR = 0x2 assert int(TestFlag.BLEH) == 0x0 assert int(TestFlag.FOO) == 0x1 assert int(TestFlag.BAR) == 0x2 assert type(int(TestFlag.BAR)) is int def test_intersection(self): class TestFlag(enums.Flag): FOO = 0x1 BAR = 0x2 BAZ = 0x3 BORK = 0x4 QUX = 0x8 QUXX = QUX | BORK assert TestFlag.QUXX.intersection(TestFlag.QUX) == TestFlag.QUX assert TestFlag.QUXX.intersection(TestFlag.QUX) == 0x8 assert TestFlag.QUXX.intersection(0x8) == 0x8 assert isinstance(TestFlag.QUXX.intersection(TestFlag.QUX), TestFlag) assert isinstance(TestFlag.QUXX.intersection(TestFlag.QUX), int) def test_invert(self): class TestFlag(enums.Flag): FOO = 0x1 BAR = 0x2 BAZ = 0x4 assert TestFlag.BAR.invert() == TestFlag.FOO | TestFlag.BAZ def test_invert_op(self): class TestFlag(enums.Flag): FOO = 0x1 BAR = 0x2 BAZ = 0x4 assert ~TestFlag.BAR == TestFlag.FOO | TestFlag.BAZ def test_is_disjoint(self): class TestFlag(enums.Flag): FOO = 0x1 BAR = 0x2 BAZ = 0x4 BORK = 0x8 QUX = 0x10 assert (TestFlag.FOO | TestFlag.BAR).is_disjoint(TestFlag.BAZ | TestFlag.BORK) assert not (TestFlag.FOO | TestFlag.BAR).is_disjoint(TestFlag.BAR | TestFlag.BORK) assert (TestFlag.FOO | TestFlag.BAR).is_disjoint(0xC) assert not (TestFlag.FOO | TestFlag.BAR).is_disjoint(0xA) def test_isdisjoint(self): class TestFlag(enums.Flag): FOO = 0x1 BAR = 0x2 BAZ = 0x4 BORK = 0x8 QUX = 0x10 assert (TestFlag.FOO | TestFlag.BAR).isdisjoint(TestFlag.BAZ | TestFlag.BORK) assert not (TestFlag.FOO | TestFlag.BAR).isdisjoint(TestFlag.BAR | TestFlag.BORK) assert (TestFlag.FOO | TestFlag.BAR).isdisjoint(0xC) assert not (TestFlag.FOO | TestFlag.BAR).isdisjoint(0xA) def test_is_subset(self): class TestFlag(enums.Flag): BLEH = 0x1 FOO = 0x2 BAR = 0x4 BAZ = 0x8 BORK = 0x10 f = TestFlag.FOO | TestFlag.BLEH | TestFlag.BAZ assert f.is_subset(TestFlag.FOO) assert f.is_subset(TestFlag.BLEH) assert f.is_subset(TestFlag.BAZ) assert not f.is_subset(TestFlag.BAR) assert f.is_subset(0x2) assert f.is_subset(0x1) assert f.is_subset(0x8) assert not f.is_subset(0x4) assert f.is_subset(TestFlag.FOO | TestFlag.BLEH) assert f.is_subset(0x3) assert f.is_subset(TestFlag.FOO | TestFlag.BLEH) assert not f.is_subset(TestFlag.BAR | TestFlag.BORK) assert not f.is_subset(0x14) def test_issubset(self): class TestFlag(enums.Flag): BLEH = 0x1 FOO = 0x2 BAR = 0x4 BAZ = 0x8 BORK = 0x10 f = TestFlag.FOO | TestFlag.BLEH | TestFlag.BAZ assert f.issubset(TestFlag.FOO) assert f.issubset(TestFlag.BLEH) assert f.issubset(TestFlag.BAZ) assert not f.issubset(TestFlag.BAR) assert f.issubset(0x2) assert f.issubset(0x1) assert f.issubset(0x8) assert not f.issubset(0x4) assert f.issubset(TestFlag.FOO | TestFlag.BLEH) assert f.issubset(0x3) assert not f.issubset(TestFlag.BAR | TestFlag.BORK) assert not f.issubset(0x14) def test_is_superset(self): class TestFlag(enums.Flag): BLEH = 0x1 FOO = 0x2 BAR = 0x4 BAZ = 0x8 BORK = 0x10 QUX = 0x10 f = TestFlag.FOO | TestFlag.BLEH | TestFlag.BAZ assert f.is_superset(TestFlag.BLEH | TestFlag.FOO | TestFlag.BAR | TestFlag.BAZ | TestFlag.BORK) assert f.is_superset(0x1F) assert not f.is_superset(TestFlag.QUX) assert not f.is_superset(0x20) def test_issuperset(self): class TestFlag(enums.Flag): BLEH = 0x1 FOO = 0x2 BAR = 0x4 BAZ = 0x8 BORK = 0x10 QUX = 0x10 f = TestFlag.FOO | TestFlag.BLEH | TestFlag.BAZ assert f.issuperset(TestFlag.BLEH | TestFlag.FOO | TestFlag.BAR | TestFlag.BAZ | TestFlag.BORK) assert f.issuperset(0x1F) assert not f.issuperset(TestFlag.QUX) assert not f.issuperset(0x20) def test_iter(self): class TestFlag(enums.Flag): FOO = 0x1 BAR = 0x2 BAZ = 0x3 BORK = 0x4 QUX = 0x8 val_iter = iter(TestFlag) assert next(val_iter) == TestFlag.FOO assert next(val_iter) == TestFlag.BAR assert next(val_iter) == TestFlag.BAZ assert next(val_iter) == TestFlag.BORK assert next(val_iter) == TestFlag.QUX with pytest.raises(StopIteration): next(val_iter) def test_flag_iter(self): class TestFlag(enums.Flag): FOO = 0x1 BAR = 0x2 BAZ = 0x3 BORK = 0x4 QUX = 0x8 val = TestFlag.BAZ | TestFlag.BORK val_iter = iter(val) assert next(val_iter) == TestFlag.BAR assert next(val_iter) == TestFlag.BORK assert next(val_iter) == TestFlag.FOO with pytest.raises(StopIteration): next(val_iter) def test_len(self): class TestFlag(enums.Flag): FOO = 0x1 BAR = 0x2 BAZ = 0x3 BORK = 0x4 QUX = 0x8 val0 = TestFlag(0) val1 = TestFlag.FOO val2 = TestFlag.FOO | TestFlag.BORK val3 = TestFlag.FOO | TestFlag.BAR | TestFlag.BORK val3_comb = TestFlag.BAZ | TestFlag.BORK assert len(val0) == 0 assert len(val1) == 1 assert len(val2) == 2 assert len(val3) == 3 assert len(val3_comb) == 3 def test_or(self): class TestFlag(enums.Flag): FOO = 0x1 BAR = 0x2 BAZ = 0x4 BORK = 0x8 QUX = 0x10 assert isinstance(TestFlag.FOO | TestFlag.BAR, int) assert isinstance(TestFlag.FOO | TestFlag.BAR, TestFlag) assert isinstance(TestFlag.FOO | 0x2, int) assert isinstance(TestFlag.FOO | 0x2, TestFlag) assert TestFlag.FOO | TestFlag.BAR == 0x3 assert TestFlag.FOO | TestFlag.BAR == TestFlag(0x3) assert TestFlag.FOO | 0x2 == 0x3 assert TestFlag.FOO | 0x2 == TestFlag(0x3) def test_ror(self): class TestFlag(enums.Flag): FOO = 0x1 BAR = 0x2 BAZ = 0x4 BORK = 0x8 QUX = 0x10 assert isinstance(0x2 | TestFlag.FOO, int) assert isinstance(0x2 | TestFlag.FOO, TestFlag) assert 0x2 | TestFlag.FOO == 0x3 assert 0x2 | TestFlag.FOO == TestFlag(0x3) def test_none_positive_case(self): class TestFlag(enums.Flag): FOO = 0x1 BAR = 0x2 BAZ = 0x3 BORK = 0x4 QUX = 0x8 QUXX = 0x10 val = TestFlag.BAZ | TestFlag.BORK assert val.none(TestFlag.QUX) def test_none_negative_case(self): class TestFlag(enums.Flag): FOO = 0x1 BAR = 0x2 BAZ = 0x3 BORK = 0x4 QUX = 0x8 val = TestFlag.BAZ | TestFlag.BORK assert not val.none(TestFlag.FOO) assert not val.none(TestFlag.BAR) assert not val.none(TestFlag.BAZ) assert not val.none(TestFlag.BORK) # All present assert not val.none(TestFlag.FOO, TestFlag.BAR, TestFlag.BAZ, TestFlag.BORK) # One present, one not assert not val.none( TestFlag.FOO, TestFlag.QUX, ) def test_split(self): class TestFlag(enums.Flag): FOO = 0x1 BAR = 0x2 BAZ = 0x3 BORK = 0x4 QUX = 0x8 val = TestFlag.BAZ | TestFlag.BORK # Baz is a combined field technically, so we don't expect it to be output here assert val.split() == [TestFlag.BAR, TestFlag.BORK, TestFlag.FOO] def test_str_operator(self): class TestFlag(enums.Flag): FOO = 0x1 BAR = 0x2 BAZ = 0x3 BORK = 0x4 QUX = 0x8 val = TestFlag.BAZ | TestFlag.BORK assert str(val) == "FOO|BAR|BORK" def test_symmetric_difference(self): class TestFlag(enums.Flag): FOO = 0x1 BAR = 0x2 BAZ = 0x4 BORK = 0x8 QUX = 0x10 a = TestFlag.FOO | TestFlag.BAR | TestFlag.BAZ b = TestFlag.BAZ | TestFlag.BORK | TestFlag.QUX assert isinstance(a.symmetric_difference(b), int) assert isinstance(a.symmetric_difference(b), TestFlag) assert isinstance(a.symmetric_difference(0x1C), int) assert isinstance(a.symmetric_difference(0x1C), TestFlag) assert a.symmetric_difference(b) == b.symmetric_difference(a) assert a.symmetric_difference(a) == 0 assert b.symmetric_difference(b) == 0 assert a.symmetric_difference(b) == TestFlag.FOO | TestFlag.BAR | TestFlag.BORK | TestFlag.QUX assert a.symmetric_difference(b) == 0x1B def test_sub(self): class TestFlag(enums.Flag): A = 0x1 B = 0x2 C = 0x4 D = 0x8 E = 0x10 F = 0x20 G = 0x40 H = 0x80 a = TestFlag.A | TestFlag.B | TestFlag.D | TestFlag.F | TestFlag.G b = TestFlag.A | TestFlag.B | TestFlag.E c = 0x13 expect_asubb = TestFlag.D | TestFlag.F | TestFlag.G expect_bsuba = TestFlag.E expect_asubc = 0x68 assert a - b == expect_asubb assert b - a == expect_bsuba assert a - c == expect_asubc assert isinstance(a - b, int) assert isinstance(a - b, TestFlag) assert isinstance(a - c, int) assert isinstance(a - c, TestFlag) def test_rsub(self): class TestFlag(enums.Flag): A = 0x1 B = 0x2 C = 0x4 D = 0x8 E = 0x10 F = 0x20 G = 0x40 H = 0x80 a = TestFlag.A | TestFlag.B | TestFlag.D | TestFlag.F | TestFlag.G c = 0x13 expect_csuba = 0x10 assert c - a == expect_csuba assert isinstance(c - a, int) assert isinstance(c - a, TestFlag) def test_union(self): class TestFlag(enums.Flag): FOO = 0x1 BAR = 0x2 BAZ = 0x4 BORK = 0x8 QUX = 0x10 assert isinstance(TestFlag.FOO.union(TestFlag.BAR), int) assert isinstance(TestFlag.FOO.union(TestFlag.BAR), TestFlag) assert isinstance(TestFlag.FOO.union(TestFlag.BAR).union(TestFlag.BAZ), int) assert isinstance(TestFlag.FOO.union(TestFlag.BAR).union(TestFlag.BAZ), TestFlag) assert isinstance(TestFlag.FOO.union(0x2).union(TestFlag.BAZ), int) assert isinstance(TestFlag.FOO.union(0x2).union(TestFlag.BAZ), TestFlag) assert isinstance(TestFlag.FOO.union(0x2), int) assert isinstance(TestFlag.FOO.union(0x2), TestFlag) assert TestFlag.FOO.union(TestFlag.BAR) == 0x3 assert TestFlag.FOO.union(TestFlag.BAR) == TestFlag(0x3) assert TestFlag.FOO.union(0x2) == 0x3 assert TestFlag.FOO.union(0x2) == TestFlag(0x3) def test_xor(self): class TestFlag(enums.Flag): FOO = 0x1 BAR = 0x2 BAZ = 0x4 BORK = 0x8 QUX = 0x10 a = TestFlag.FOO | TestFlag.BAR | TestFlag.BAZ b = TestFlag.BAZ | TestFlag.BORK | TestFlag.QUX assert isinstance(a ^ b, int) assert isinstance(a ^ b, TestFlag) assert isinstance(a ^ 0x1C, int) assert isinstance(a ^ 0x1C, TestFlag) assert a ^ b == b ^ a assert a ^ a == 0 assert b ^ b == 0 assert a ^ b == TestFlag.FOO | TestFlag.BAR | TestFlag.BORK | TestFlag.QUX assert a ^ b == 0x1B assert a ^ 0x1C == TestFlag.FOO | TestFlag.BAR | TestFlag.BORK | TestFlag.QUX assert a ^ 0x1C == 0x1B def test_rxor(self): class TestFlag(enums.Flag): FOO = 0x1 BAR = 0x2 BAZ = 0x4 BORK = 0x8 QUX = 0x10 a = TestFlag.FOO | TestFlag.BAR | TestFlag.BAZ assert isinstance(0x1C ^ a, int) assert isinstance(0x1C ^ a, TestFlag) assert 0x1C ^ a == TestFlag.FOO | TestFlag.BAR | TestFlag.BORK | TestFlag.QUX assert 0x1C ^ a == 0x1B def test_getitem(self): class TestFlag(enums.Flag): FOO = 0x1 BAR = 0x2 BAZ = 0x4 BORK = 0x8 QUX = 0x10 returned = TestFlag["FOO"] assert returned == TestFlag.FOO assert type(returned) == TestFlag def test_repr(self): class TestFlag(enums.Flag): FOO = 0x1 BAR = 0x2 BAZ = 0x4 BORK = 0x8 QUX = 0x10 assert repr(TestFlag) == "<enum TestFlag>" assert repr(TestFlag.FOO) == "<TestFlag.FOO: 1>" def test_allows_overriding_methods(self): class TestFlag(enums.Flag): FOO = 0x1 BAR = 0x2 BAZ = 0x4 BORK = 0x8 QUX = 0x10 def __int__(self): return 855555 assert int(TestFlag.FOO | TestFlag.BAR) == 855555
StarcoderdataPython
6641574
<filename>S4/S4 Library/simulation/traits/trait_commands.py import sims4 from server_commands.argument_helpers import OptionalTargetParam, get_optional_target, TunableInstanceParam, RequiredTargetParam from traits.trait_type import TraitType @sims4.commands.Command('traits.show_traits', command_type=sims4.commands.CommandType.Automation) def show_traits(opt_sim:OptionalTargetParam=None, _connection=None): sim = get_optional_target(opt_sim, _connection) if sim is not None: trait_tracker = sim.sim_info.trait_tracker if trait_tracker is None: sims4.commands.output("Sim {} doesn't have trait tracker".format(sim), _connection) return sims4.commands.output('Sim {} has {} traits equipped, {} slots left'.format(sim, len(trait_tracker), trait_tracker.empty_slot_number), _connection) sims4.commands.automation_output('TraitsList; Status:Begin', _connection) for trait in trait_tracker.equipped_traits: s = 'Equipped: {}'.format(trait.__name__) sims4.commands.output(s, _connection) sims4.commands.automation_output('TraitsList; Status:Data, Trait:{}'.format(trait.__name__), _connection) sims4.commands.automation_output('TraitsList; Status:End', _connection) @sims4.commands.Command('traits.equip_trait', command_type=sims4.commands.CommandType.Live) def equip_trait(trait_type:TunableInstanceParam(sims4.resources.Types.TRAIT), opt_sim:OptionalTargetParam=None, _connection=None): sim = get_optional_target(opt_sim, _connection) if sim is not None: sim.sim_info.add_trait(trait_type) return True return False @sims4.commands.Command('traits.remove_trait', command_type=sims4.commands.CommandType.Live) def remove_trait(trait_type:TunableInstanceParam(sims4.resources.Types.TRAIT), opt_sim:OptionalTargetParam=None, _connection=None): sim = get_optional_target(opt_sim, _connection) if sim is not None: sim.sim_info.remove_trait(trait_type) return True return False @sims4.commands.Command('traits.clear_traits', command_type=sims4.commands.CommandType.Automation) def clear_traits(opt_sim:OptionalTargetParam=None, _connection=None): sim = get_optional_target(opt_sim, _connection) if sim is not None: trait_tracker = sim.sim_info.trait_tracker if trait_tracker is None: sims4.commands.output("Sim {} doesn't have trait tracker".format(sim), _connection) return False else: trait_tracker.clear_traits() return True return False @sims4.commands.Command('traits.show_inherited_traits') def show_inherited_traits(sim_a:RequiredTargetParam=None, sim_b:OptionalTargetParam=None, _connection=None): sim_a = sim_a.get_target() sim_b = get_optional_target(sim_b, _connection) output = sims4.commands.Output(_connection) if sim_a is None or sim_b is None: output('Must specify two valid Sims.') return False output('Potential inherited traits between {} and {}:'.format(sim_a, sim_b)) for (index, inherited_trait_entries) in enumerate(sim_a.trait_tracker.get_inherited_traits(sim_b)): output('Entry {}:'.format(index)) total_weight = sum(entry[0] for entry in inherited_trait_entries) for inherited_trait_entry in inherited_trait_entries: output(' {:24} {:.2%}'.format(inherited_trait_entry[1].__name__, inherited_trait_entry[0]/total_weight if total_weight else 0)) output('End') return True @sims4.commands.Command('traits.show_traits_of_type') def show_traits_of_type(trait_type:TraitType, sim:OptionalTargetParam=None, _connection=None): sim = get_optional_target(sim, _connection) output = sims4.commands.Output(_connection) if sim is None: output('No valid Sim found. Try specifying a SimID as the second argument.') return trait_tracker = sim.sim_info.trait_tracker if trait_tracker is None: output("Sim {} doesn't have trait tracker".format(sim)) return traits = trait_tracker.get_traits_of_type(trait_type) if len(traits) == 0: output('Sim {} has no traits of type {}.'.format(sim, trait_type)) return for trait in traits: output(trait.__name__)
StarcoderdataPython
6611675
<filename>Mr.Lin/0002/0002.py #!/usr/bin/env python # -*- coding: utf-8 -*- # @Author: 30987 # @Date: 2015-01-12 17:22:35 # @Last Modified by: 30987 # @Last Modified time: 2015-01-12 22:33:53 #第 0002 题:将 0001 题生成的 200 个激活码(或者优惠券)保存到 MySQL 关系型数据库中。 import uuid import MySQLdb def creat_code(number=20): result = [] while True is True: tem=str(uuid.uuid1()).replace('-','') if not tem in result: result.append(tem) if len(result) is number: break return result def coont_db(result): num = len(result) db= MySQLdb.connect('localhost','root','','python',charset='utf8') cur = db.cursor() try: db= MySQLdb.connect('localhost','root','','python',charset='utf8') cur = db.cursor() print "OK" for i in xrange(num): cur.execute('insert into code (code_num) values("%s")' % (result[i])) db.commit() except: print "数据库连接错误!" db.rollback() db.close() if __name__ == '__main__': coont_db(creat_code())
StarcoderdataPython
6537562
<filename>backend.py<gh_stars>0 from enum import Enum import tui import console import gui class GuiBackendType(Enum): NONE = 0 TUI = 1 GUI = 2 CONSOLE = 3 def from_str(string: str) -> int: if (string == "tui"): return GuiBackendType.TUI elif (string == "gui"): return GuiBackendType.GUI elif (string == "con"): return GuiBackendType.CONSOLE return GuiBackendType.NONE def run_backend(type: GuiBackendType): if(type == GuiBackendType.TUI): return tui.tui_run() elif(type == GuiBackendType.CONSOLE): return console.console_run() elif(type == GuiBackendType.GUI): return gui.gui_run() else: return 0
StarcoderdataPython
4950474
<filename>simple_rl/agents/LinearQLearningAgentClass.py ''' QLearningAgentClass.py: Class for a basic QLearningAgent ''' # Python imports. import random import numpy import os import math import time from collections import defaultdict from sklearn.linear_model import SGDRegressor from sklearn.multioutput import MultiOutputRegressor import sklearn.pipeline import sklearn.preprocessing from sklearn.kernel_approximation import RBFSampler import pickle # Other imports. from simple_rl.agents.AgentClass import Agent class LinearQLearningAgent(Agent): ''' Implementation for a Q Learning Agent ''' def __init__(self, actions, state_dim, encode_size=None, name="Linear-Q", alpha=0.1, gamma=0.99, epsilon=0.1, random=False, explore="uniform", anneal=False, custom_q_init=None, default_q=0, save_dir="../models/linear-q/", save=False, load=False, learn=True, learn_dynamics=False, load_dynamics=False, learn_encoder=False, load_encoder=False, baseline_encoder=False, save_interval=None, checkpoint=None): ''' Args: actions (list): Contains strings denoting the actions. name (str): Denotes the name of the agent. alpha (float): Learning rate. gamma (float): Discount factor. epsilon (float): Exploration term. explore (str): One of {softmax, uniform}. Denotes explore policy. custom_q_init (defaultdict{state, defaultdict{action, float}}): a dictionary of dictionaries storing the initial q-values. Can be used for potential shaping (Wiewiora, 2003) default_q (float): the default value to initialize every entry in the q-table with [by default, set to 0.0] ''' name_ext = "-" + explore if explore != "uniform" else "" Agent.__init__(self, name=name + name_ext, actions=actions, gamma=gamma) # Set/initialize parameters and other relevant classwide data self.alpha, self.alpha_init = alpha, alpha self.epsilon, self.epsilon_init = epsilon, epsilon self.step_number = 0 self.anneal = anneal self.default_q = default_q # 0 # 1 / (1 - self.gamma) self.explore = explore self.custom_q_init = custom_q_init self.random = random self.state_dim = state_dim self.encode_size = encode_size self.save_dir = save_dir self.save = save self.learn = learn self.learn_dynamics = learn_dynamics self.learn_encoder = learn_encoder self.normalize = True self.baseline_encoder = baseline_encoder self.save_interval = save_interval # Q Function: # if self.custom_q_init: # self.q_func = self.custom_q_init # else: # self.q_func = defaultdict(lambda: defaultdict(lambda: self.default_q)) if load: self.q_models = pickle.load(open(save_dir+"models.pkl", 'rb')) print("loading") else: self.q_models = {} sz = self.encode_size if self.encode_size else self.state_dim for action in self.actions: model = SGDRegressor(learning_rate="constant") model.partial_fit([numpy.zeros(sz)], [0]) self.q_models[action] = model if load_dynamics: self.dynamics_regressors = pickle.load(open(save_dir+"dynamics.pkl", 'rb')) print("loading dynamics") elif self.learn_dynamics: self.dynamics_regressors = {} for action in self.actions: self.dynamics_regressors[action]=MultiOutputRegressor(SGDRegressor(random_state=0)) else: self.dynamics_regressors = None if load_encoder: if checkpoint: self.encoder = pickle.load(open(save_dir+checkpoint, 'rb')) print("loading encoder", checkpoint) else: self.encoder = pickle.load(open(save_dir+"encoder.pkl", 'rb')) print("loading encoder") elif self.learn_encoder: self.encoder = MultiOutputRegressor(SGDRegressor(random_state=0)) self.encoder.partial_fit([numpy.zeros(state_dim)], [numpy.zeros(encode_size)]) elif self.baseline_encoder: self.create_base_encoder() else: self.encoder = None def create_base_encoder(self): observation_examples = numpy.random.rand(10000, self.state_dim).astype('float32') * math.sqrt(self.encode_size) # print(observation_examples) # self.scaler = sklearn.preprocessing.StandardScaler() # self.scaler.fit(observation_examples) self.featurizer = RBFSampler(gamma=1.0, n_components=int(self.encode_size)) self.featurizer.fit(observation_examples) # self.featurizer = sklearn.pipeline.FeatureUnion([ # ("rbf2", RBFSampler(gamma=2.0, n_components=int(self.encode_size//2))), # ("rbf3", RBFSampler(gamma=1.0, n_components=int(self.encode_size-self.encode_size//2))), # ]) # self.featurizer.fit(self.scaler.transform(observation_examples)) def test_dynamics(self): sz = self.encode_size if self.encode_size else self.state_dim for i in range(sz): x = numpy.zeros(sz) x[i] = 1 print("\nx", x) for action in self.actions: print("action", action) predict = self.dynamics_regressors[action].predict([x])[0] print("predict", predict) def get_parameters(self): ''' Returns: (dict) key=param_name (str) --> val=param_val (object). ''' param_dict = defaultdict(int) param_dict["alpha"] = self.alpha param_dict["gamma"] = self.gamma param_dict["epsilon"] = self.epsilon_init param_dict["anneal"] = self.anneal param_dict["explore"] = self.explore return param_dict # -------------------------------- # ---- CENTRAL ACTION METHODS ---- # -------------------------------- def act(self, state, reward, learning=True, verbose=False): ''' Args: state (State) reward (float) Returns: (str) Summary: The central method called during each time step. Retrieves the action according to the current policy and performs updates given (s=self.prev_state, a=self.prev_action, r=reward, s'=state) ''' # print("state", state) if learning and self.learn: self.update(self.prev_state, self.prev_action, reward, state) if learning and self.learn_dynamics: self.update_dynamics(self.prev_state, self.prev_action, reward, state) if learning and self.learn_encoder: self.update_encoder(self.prev_state, self.prev_action, reward, state) if verbose and self.prev_state and self.prev_action: print("state", self.prev_state.encode(), "action", self.prev_action) if self.baseline_encoder: print("encode", self._get_encode(state)) elif self.encoder: encode = self.encoder.predict([self.prev_state.encode()])[0] print("encode", encode) predict = self.dynamics_regressors[self.prev_action].predict([encode])[0] print("next", self.encoder.predict([state.encode()])[0], "predict", predict) elif self.dynamics_regressors: predict = self.dynamics_regressors[self.prev_action].predict([self.prev_state.encode()])[0] print("next", state, "predict", predict) if self.random: action = numpy.random.choice(self.actions) elif self.explore == "softmax": # Softmax exploration action = self.soft_max_policy(state) else: # Uniform exploration action = self.epsilon_greedy_q_policy(state) self.prev_state = state self.prev_action = action self.step_number += 1 # Anneal params. if learning and self.anneal: self._anneal() if self.save_interval and self.step_number % self.save_interval == 0: self._save_checkpoint() return action def _save_checkpoint(self): if self.learn_encoder: path = os.path.join(self.save_dir, "checkpoints") if not os.path.isdir(path): os.makedirs(path) pickle.dump(self.encoder, open(path + "/encoder_{}.pkl".format(self.step_number), 'wb')) def _get_encode(self, state): if self.baseline_encoder: encode = self.featurizer.transform([state])[0] # scaled = self.scaler.transform([state]) # encode = self.featurizer.transform(scaled)[0] elif self.encoder: encode = self.encoder.predict([state.encode()])[0] else: encode = state.encode() return encode def epsilon_greedy_q_policy(self, state): ''' Args: state (State) Returns: (str): action. ''' # Policy: Epsilon of the time explore, otherwise, greedyQ. if numpy.random.random() > self.epsilon: # Exploit. action = self.get_max_q_action(state) else: # Explore action = numpy.random.choice(self.actions) return action def soft_max_policy(self, state): ''' Args: state (State): Contains relevant state information. Returns: (str): action. ''' return numpy.random.choice(self.actions, 1, p=self.get_action_distr(state))[0] # --------------------------------- # ---- Q VALUES AND PARAMETERS ---- # --------------------------------- def update_dynamics(self, state, action, reward, next_state): if not action or not state: return self.dynamics_regressors[action].partial_fit([state.encode()], [next_state.encode()]) # predict = self.dynamics_regressors[action].predict([state.encode()])[0] # loss = numpy.linalg.norm(predict-next_state.encode()) def update_encoder(self, state, action, reward, next_state): if not action or not state: return cur_feature = self.encoder.predict([state.encode()])[0] predict_next = self.dynamics_regressors[action].predict([cur_feature])[0] if self.normalize: predict_norm = numpy.linalg.norm(predict_next) if predict_norm: predict_next /= predict_norm self.encoder.partial_fit([next_state.encode()], [predict_next]) # encoded_next = self.encoder.predict([next_state.encode()])[0] # loss = numpy.linalg.norm(predict_next-encoded_next) # print("loss", loss) def update(self, state, action, reward, next_state): ''' Args: state (State) action (str) reward (float) next_state (State) Summary: Updates the internal Q Function according to the Bellman Equation. (Classic Q Learning update) ''' # If this is the first state, just return. if state is None: self.prev_state = next_state return # Update the Q Function. max_q_curr_state = self.get_max_q_value(next_state) y = reward + self.gamma*max_q_curr_state # print("state id", state.id, "y", y) # prev_q_val = self.get_q_value(state, action) # print("prev q", prev_q_val) encode = self._get_encode(state) self.q_models[action].partial_fit([encode], [y]) # q_val = self.get_q_value(state, action) # print("curr q", q_val) # self.q_func[state][action] = (1 - self.alpha) * prev_q_val + self.alpha * (reward + self.gamma*max_q_curr_state) def _anneal(self): # Taken from "Note on learning rate schedules for stochastic optimization, by <NAME> Moody (Yale)": self.alpha = self.alpha_init / (1.0 + (self.step_number / 1000.0)*(self.episode_number + 1) / 2000.0 ) self.epsilon = self.epsilon_init / (1.0 + (self.step_number / 1000.0)*(self.episode_number + 1) / 2000.0 ) def _compute_max_qval_action_pair(self, state): ''' Args: state (State) Returns: (tuple) --> (float, str): where the float is the Qval, str is the action. ''' # Grab random initial action in case all equal best_action = random.choice(self.actions) max_q_val = float("-inf") shuffled_action_list = self.actions[:] random.shuffle(shuffled_action_list) # Find best action (action w/ current max predicted Q value) for action in shuffled_action_list: q_s_a = self.get_q_value(state, action) if q_s_a > max_q_val: max_q_val = q_s_a best_action = action return max_q_val, best_action def get_max_q_action(self, state): ''' Args: state (State) Returns: (str): denoting the action with the max q value in the given @state. ''' return self._compute_max_qval_action_pair(state)[1] def get_max_q_value(self, state): ''' Args: state (State) Returns: (float): denoting the max q value in the given @state. ''' return self._compute_max_qval_action_pair(state)[0] def get_value(self, state): ''' Args: state (State) Returns: (float) ''' return self.get_max_q_value(state) def get_q_value(self, state, action): ''' Args: state (State) action (str) Returns: (float): denoting the q value of the (@state, @action) pair. ''' # state_encoding = numpy.zeros(self.state_dim) # state_encoding[state.id] = 1 encode = self._get_encode(state) return self.q_models[action].predict([encode])[0] # return self.q_func[state][action] def get_action_distr(self, state, beta=0.2): ''' Args: state (State) beta (float): Softmax temperature parameter. Returns: (list of floats): The i-th float corresponds to the probability mass associated with the i-th action (indexing into self.actions) ''' all_q_vals = [] for i, action in enumerate(self.actions): all_q_vals.append(self.get_q_value(state, action)) # Softmax distribution. total = sum([numpy.exp(beta * qv) for qv in all_q_vals]) softmax = [numpy.exp(beta * qv) / total for qv in all_q_vals] return softmax def reset(self): self.step_number = 0 self.episode_number = 0 if self.save: pickle.dump(self.q_models, open(self.save_dir+"models.pkl", 'wb')) if self.learn_dynamics: pickle.dump(self.dynamics_regressors, open(self.save_dir+"dynamics.pkl", 'wb')) if self.learn_encoder: pickle.dump(self.encoder, open(self.save_dir+"encoder.pkl", 'wb')) self.q_models = {} sz = self.encode_size if self.encode_size else self.state_dim for action in self.actions: model = SGDRegressor(learning_rate="constant") model.partial_fit([numpy.zeros(sz)], [0]) self.q_models[action] = model # if self.custom_q_init: # self.q_func = self.custom_q_init # else: # self.q_func = defaultdict(lambda : defaultdict(lambda: self.default_q)) Agent.reset(self) def end_of_episode(self): ''' Summary: Resets the agents prior pointers. ''' if self.anneal: self._anneal() Agent.end_of_episode(self) def print_v_func(self): ''' Summary: Prints the V function. ''' for state in self.q_func.keys(): print(state, self.get_value(state)) def print_q_func(self): ''' Summary: Prints the Q function. ''' if len(self.q_func) == 0: print("Q Func empty!") else: for state, actiond in self.q_func.items(): print(state) for action, q_val in actiond.items(): print(" ", action, q_val)
StarcoderdataPython
6522091
from app import db class User(db.Model): id = db.Column(db.Integer, primary_key=True) mcuser = db.Column(db.String(16), index=True, unique=True) mcemail = db.Column(db.String(120), index=True, unique=True) applicant_age = db.Column(db.SmallInteger(), default=15) applicant_skills = db.Column(db.String(), index=True, unique=False) applicant_ip = db.Column(db.String(120), index=True, unique=True) fishbanned = db.Column(db.Boolean(), index=True, unique=False) def __init__(self, mcuser=None, mcemail=None, applicant_age=None, applicant_skills=None, applicant_ip=None, fishbanned=None): self.mcuser = mcuser self.mcemail = mcemail self.applicant_age = applicant_age self.applicant_skills = applicant_skills self.applicant_ip = applicant_ip self.fishbanned = fishbanned def __repr__(self): return '<User %r>' % (self.mcuser) class UserApplication(db.Model): id = db.Column(db.Integer, primary_key=True) body = db.Column(db.String(140)) timestamp = db.Column(db.DateTime) user_id = db.Column(db.Integer, db.ForeignKey('user.id')) def __init__(self, body=None, timestamp=None, user_id=None): self.body = body self.timestamp = timestamp self.user_id = user_id def __repr__(self): return '<Post %r>' % (self.body)
StarcoderdataPython
3475719
<gh_stars>1-10 import sys import uctypes if sys.byteorder != "little": print("SKIP") sys.exit() desc = { "ptr": (uctypes.PTR | 0, uctypes.UINT8), "ptr16": (uctypes.PTR | 0, uctypes.UINT16), "ptr2": (uctypes.PTR | 0, {"b": uctypes.UINT8 | 0}), } bytes = b"01" addr = uctypes.addressof(bytes) buf = addr.to_bytes(4) S = uctypes.struct(desc, uctypes.addressof(buf), uctypes.NATIVE) print(S.ptr[0]) assert S.ptr[0] == ord("0") print(S.ptr[1]) assert S.ptr[1] == ord("1") print(hex(S.ptr16[0])) assert hex(S.ptr16[0]) == "0x3130" print(S.ptr2[0].b, S.ptr2[1].b) print (S.ptr2[0].b, S.ptr2[1].b) print(hex(S.ptr16[0])) assert (S.ptr2[0].b, S.ptr2[1].b) == (48, 49)
StarcoderdataPython
254958
<gh_stars>0 from math import sqrt from fn import * #find = 3.14159265 find = 1.414213 #find = 7 acc = 0.005 expr_len = (3, 10) nms = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'] #nms = ['2'] mfn = ['+', '-', '*', '/'] fnc = ['sqrt('] brc = ['(', ')'] use_brc = True fll = nms + mfn + fnc + brc*use_brc if __name__ == "__main__": ress = [] fr, to = len(fll)**expr_len[0], len(fll)**expr_len[1] for i in range(fr, to): expr = to_expr(i, fll) try: if not is_correct_expr(expr): continue res = eval(expr) if abs(find-res) <= acc: ress.append([expr, res]) print(i, '/', to-fr, ress) except: #print('err', expr) #exit(0) pass if i % 3000 == 0: print(i-fr, '/', to-fr, ress) print('search depth:', to-fr, f'from {fr}, to {to}') print(sorted(ress, key=lambda x: abs(find-x[1]))) print('count of results:', len(ress))
StarcoderdataPython
9766404
<reponame>unmonoqueteclea/pygame-search-algorithms-visualizer<filename>util.py # -*- coding: utf-8 -*- ''' title :util.py description :Utilities for different parts of the project author :<NAME> (unmonoqueteclea) date :20160623 notes : python_version :2.7.6 ''' from __future__ import print_function import collections,heapq ################## CLASSES ######################################### class Queue: '''Queue class It is a data structure used by the search algorithm to decide the order in which to process the locations. Itś just a wrapper around the built-in collections.deque class. Attrs: elements = It's a deque ''' def __init__(self): '''It creates a new empty deque''' self.elements = collections.deque() def empty(self): '''Returns True if the queue is empty and False otherwise''' return len(self.elements) == 0 def put(self, x): '''Adds a new element to the queue''' self.elements.append(x) def get(self): '''Gets a new element from the left of the queue''' return self.elements.popleft() class PriorityQueue: ''' Here’s a reasonably fast priority queue that uses binary heaps, but does not support reprioritize. To get the right ordering, we’ll use tuples (priority, item). When an element is inserted that is already in the queue, we’ll have a duplicate; Attrs: elements: (List) The different elements of the Queue ''' def __init__(self): self.elements = [] def empty(self): return len(self.elements) == 0 def put(self, item, priority): heapq.heappush(self.elements, (priority, item)) def get(self): return heapq.heappop(self.elements)[1] #################################### FUNCTIONS ##################################3 def getInfoMessage(state): '''Returns the info message corresponding to an specific state :param state: (int) Represents an execution state (STATE_NONE,STATE_STARTING_POINT,STATE_GOAL_POINT) :return: (String) Info message ''' if(state==STATE_NONE): return" Press 'w' and click on the grid to add or remove a wall" elif(state == STATE_STARTING_POINT): #Waiting user to add starting point in the grid return "Click anywhere on the grid to add a starting point" elif (state == STATE_GOAL_POINT): #Waiting user to add goal point in the grid return "Click anywhere on the grid to add a goal point." def getSecondaryInfoMessage(state): '''Returns the info message corresponding to an specific state :param state: (int) Represents an execution state (STATE_NONE,STATE_STARTING_POINT,STATE_GOAL_POINT) :return: (String) Info message ''' if (state == STATE_NONE): return " Click on the grid to change the cost of a location. Click Play button to start animation." elif (state == STATE_STARTING_POINT): # Waiting user to add starting point in the grid return "" elif (state == STATE_GOAL_POINT): # Waiting user to add goal point in the grid return "" def getColorTerrain(cost): if(cost <= 2): return (14,112,45) elif(2<cost<=4): return (87,233,119) elif(4<cost<=7): return (243,255,67) elif(cost>7): return (99,84,3) ################## CONSTANTS ################################### WIDTH = 640 HEIGHT = 480 BOTTOM_BAR_HEIGHT = 210 COLOR_START_POINT=(255,0,0) COLOR_GOAL_POINT=(112,14,107) COLOR_WALL=(50,50,50) COLOR_WHITE=(255,255,255) STATE_NONE=0 STATE_STARTING_POINT=1 STATE_GOAL_POINT=2 BUTTON_PLAY=0 BUTTON_START_POINT=1 BUTTON_GOAL_POINT=2 BUTTON_RESET=3 BUTTON_BFS = 4 BUTTON_DIJKSTRA=5 BUTTON_ASTAR=6 ALG_NAMES=["Breadth First Search Algorithm","Dijkstra Search Algorithm"]
StarcoderdataPython
385571
<filename>Code/EYESensors/microphone.py import pyaudio import wave class audio: def __init__(self,RECORD_TIME): self.FORMAT = pyaudio.paInt16 self.CHANNELS = 1 self.RATE = 44100 self.CHUNK = 512 self.RECORD_SECONDS = RECORD_TIME self.WAVE_OUTPUT_FILENAME = "file.wav" self.audio = pyaudio.PyAudio() self.frames = [] #start Recording print('test') def run(self): print('run') stream = self.audio.open(format=self.FORMAT, channels=self.CHANNELS, rate=self.RATE, input=True, frames_per_buffer=self.CHUNK) #print("recording...") for i in range(0, int(self.RATE / self.CHUNK * self.RECORD_SECONDS)): data = stream.read(self.CHUNK) self.frames.append(data) #print("finished recording") #stop Recording stream.stop_stream() stream.close() self.audio.terminate() #Save File waveFile = wave.open(self.WAVE_OUTPUT_FILENAME, 'wb') waveFile.setnchannels(self.CHANNELS) waveFile.setsampwidth(self.audio.get_sample_size(self.FORMAT)) waveFile.setframerate(self.RATE) waveFile.writeframes(b''.join(self.frames)) waveFile.close()
StarcoderdataPython
4928927
<reponame>dmilos/IceRay #__all__ = [ 'core' ] #print( '<' + __name__ + 'name=\'' + __file__ + '\'/>' )
StarcoderdataPython
11274348
#!/usr/bin/env python3 from checkeddeco import checked @checked class Movie: title: str year: int box_office: float if __name__ == '__main__': # No static type checker can understand this... movie = Movie(title='The Godfather', year=1972, box_office=137) # type: ignore print(movie.title) print(movie) try: # remove the "type: ignore" comment to see Mypy correctly spot the error movie.year = 'MCMLXXII' # type: ignore except TypeError as e: print(e) try: # Again, no static type checker can understand this... blockbuster = Movie(title='Avatar', year=2009, box_office='billions') # type: ignore except TypeError as e: print(e)
StarcoderdataPython
1630127
<filename>Python/Tests/TestData/Grammar/AwaitStmtIllegal.py await None def quox(): await fob class quox: await fob
StarcoderdataPython
1989086
VALID_RUNNERS = ("manual", "monkey", "grodd", "grodd2")
StarcoderdataPython
4826479
""" Helper methods """ class Helper(object): @classmethod def parse_definition_string(cls, definition): ''' Parse the definition string and return the list of dependent classes. :type definition: str :param definition: string with a list of dependent classes :return: a list of class strings ''' class_list_raw = definition.split('|') class_list = [cls.strip_extra(class_def) for class_def in class_list_raw] return class_list @classmethod def strip_extra(cls, one_def): """ Remove extra characters around the class name. :type one_def: str """ no_var = one_def.replace('@var', '') no_star = no_var.replace('*', '') class_def = no_star.lstrip().rstrip() return class_def @classmethod def get_var_name(cls, class_name): """ Return the variable name based on the class name The name is the same with the first letter turned into local case. :type class_name: str """ var_name = class_name[0].lower() + class_name[1:] return var_name
StarcoderdataPython
9672845
# -*- coding: utf-8 -*- """ API interfaces. License: BSD (c) 2008 ::: www.CodeResort.com - BV Network AS (<EMAIL>) """ from trac.core import Interface class IBlogChangeListener(Interface): """Extension point interface for components that should get notified about creation, change or deletion of blog posts + adds and deletes of blog comments.""" def blog_post_changed(postname, version): """Called when a new blog post 'postname' with 'version' is added . version==1 denotes a new post, version>1 is a new version on existing post.""" def blog_post_deleted(postname, version, fields): """Called when a blog post is deleted: version==0 means all versions (or last remaining) version is deleted. Any version>0 denotes a specific version only. Fields is a dict with the pre-existing values of the blog post. If all (or last) the dict will contain the 'current' version contents.""" def blog_comment_added(postname, number): """Called when Blog comment number N on post 'postname' is added.""" def blog_comment_deleted(postname, number, fields): """Called when blog post comment 'number' is deleted. number==0 denotes all comments is deleted and fields will be empty. (usually follows a delete of the blog post). number>0 denotes a specific comment is deleted, and fields will contain the values of the fields as they existed pre-delete.""" class IBlogManipulator(Interface): """Extension point interface for components that need to manipulate the content of blog posts and comments before insertion. Unlike change listeners, a manipulator can reject changes being committed to the database. """ def validate_blog_post(req, postname, version, fields): """Validate blog post fields before they are to be inserted as new version. version==1 denotes a new post, version>1 is a new version on existing post. Fields is a dict of the fields needed for insert by model.BlogPost. Must return a list of `(field, message)` tuples, one for each problem detected. `field` can be `None` to indicate an overall problem with the post. Therefore, a return value of `[]` means everything is OK.""" def validate_blog_comment(req, postname, fields): """Validate new blog fields before comment gets added to 'postname' Fields is a dict of the fields needed for insert by model.BlogComment. Must return a list of `(field, message)` tuples, one for each problem detected. `field` can be `None` to indicate an overall problem with the comment. Therefore, a return value of `[]` means everything is OK."""
StarcoderdataPython
1683462
<filename>Defs/ex109b/dinheiro/__init__.py ''' def aumentar(n=0, taxa=0): soma = n + (n*taxa)/100 return soma def diminuir(n=0, taxa=0): dim = n - (n*taxa)/100 return dim def real(n=0, simbolo='R$'): return f'\033[31m{simbolo}{n:0.2f}\033[m'.replace('.',',') ''' def aumento(n=0, taxa=0, formato=False): soma = n + (n * taxa)/100 return f'{soma:0.0f}' if formato is False else real(soma) def diminuir(n, taxa, formato=False): dim = n - (n * taxa)/100 return f'{dim:0.0f}' if formato is False else real(dim) def formatando(msg): tamanho = len(msg) + 4 print('='*tamanho) print(' ', msg) print('='*tamanho) def real(n=0, simbolo='R$'): return f'{simbolo}{n:0.2f}'.replace('.',',')
StarcoderdataPython
209043
import time from typing import List, Dict import matplotlib.pyplot as plt import seaborn as sns from scipy import stats import board_reader import expirement_gui.one_dim_control as one_dim import expirement_gui.tk_plots as tk_plots import feature_extraction channels = {"o1": 1, "c3": 2, "fp2": 3, "fp1": 4, "c4": 5, "cz": 6, "fz": 7, "o2": 8} PRE_EXPERIMENT_AVG_TIME_S = 5 SAMP_RATE = 250 BAND_FEATURE_LOW_FREQ = 10 BAND_FEATURE_HIGH_FREQ = 12 TRIAL_LENGTH_S = 10 NUM_TRIALS = 20 def get_psd_feature( board: board_reader.BoardReader, psd_extractor: feature_extraction.PSDFeatureExtractor, data_len_s: float, channel_id: str = "c3", ): data = board.get_board_data(int(data_len_s * board.get_sampling_rate())) c3 = data[channels[channel_id]] psd_extractor.process_data(c3) return psd_extractor.get_band_power(BAND_FEATURE_LOW_FREQ, BAND_FEATURE_HIGH_FREQ) def chart_bands( primary_feature, psd_extractor: feature_extraction.PSDFeatureExtractor, band_power_chart: tk_plots.BandPowerChart, ): standard_bands = [(0.5, 4), (4, 7), (8, 15), (16, 31)] values = [primary_feature] # start with our primary feature and append others on for band in standard_bands: start_freq, end_freq = band values.append(psd_extractor.get_band_power(start_freq, end_freq)) band_power_chart.bar(values) def pre_experiment( board: board_reader.BoardReader, psd_extractor: feature_extraction.PSDFeatureExtractor, band_power_chart, psd_chart: tk_plots.PSDPlot, ) -> float: """ :return: average band power for pre-experiment phase """ print( f"Taking PSD baseline before start, will take {PRE_EXPERIMENT_AVG_TIME_S} seconds" ) time.sleep(PRE_EXPERIMENT_AVG_TIME_S) # let the board reader collect data band_power_feature = get_psd_feature( board, psd_extractor, PRE_EXPERIMENT_AVG_TIME_S ) chart_bands(band_power_feature, psd_extractor, band_power_chart) psd_chart.plot_psd(psd_extractor.psd) return band_power_feature def run_single_trial( board: board_reader.BoardReader, psd_extractor: feature_extraction.PSDFeatureExtractor, band_power_chart: tk_plots.BandPowerChart, psd_chart: tk_plots.PSDPlot, one_dim_experiment: one_dim.OneDimensionControlExperiment, band_power_avg: float, ) -> List[float]: band_power_values = [] print("Starting experiment") time_start = time.time() while ( time.time() - time_start < TRIAL_LENGTH_S and not one_dim_experiment.target_reached ): time_remaining = int(time_start + TRIAL_LENGTH_S - time.time()) one_dim_experiment.write_status_text( f"Trial in progress... {time_remaining} seconds remaining" ) time.sleep(0.1) # let another tenth of a second worth of data accrue band_power_feature = get_psd_feature(board, psd_extractor, data_len_s=3) print( f"Band power {BAND_FEATURE_LOW_FREQ}-{BAND_FEATURE_HIGH_FREQ}Hz for last {3} seconds: {band_power_feature} - compared against average {band_power_avg}" ) band_power_values.append(band_power_feature) chart_bands(band_power_feature, psd_extractor, band_power_chart) psd_chart.plot_psd(psd_extractor.psd) velocity = 0 if band_power_feature < 1.2: velocity = -150 if band_power_feature > 1.8: velocity = 50 if band_power_feature > 2.1: velocity = 200 if band_power_feature > 4: velocity = 400 # if band_power_feature > band_power_avg: # one_dim_experiment.cursor.set_velocity(150) # down # else: # one_dim_experiment.cursor.set_velocity(-150) # up one_dim_experiment.cursor.set_velocity(velocity) one_dim_experiment.update() print(f"Target reached: {one_dim_experiment.target_reached}") if not one_dim_experiment.target_reached: one_dim_experiment.notify_target_not_reached() one_dim_experiment.update() one_dim_experiment.cursor.set_velocity(0) return band_power_values def main(): band_power_values_all_trials: Dict[ one_dim.OneDimensionControlExperiment.TargetPos, List[float] ] = { one_dim.OneDimensionControlExperiment.TargetPos.TOP: [], one_dim.OneDimensionControlExperiment.TargetPos.BOTTOM: [], } one_dim_experiment = one_dim.OneDimensionControlExperiment(num_trials=NUM_TRIALS) band_power_chart = tk_plots.BandPowerChart( one_dim_experiment.plots_canvas, y_min=0, y_max=10, band_labels=[ f"{BAND_FEATURE_LOW_FREQ}-{BAND_FEATURE_HIGH_FREQ} Hz", "Delta", "Theta", "Alpha", "Beta", ], ) psd_chart = tk_plots.PSDPlot( one_dim_experiment.plots_canvas, highlight_region=(BAND_FEATURE_LOW_FREQ, BAND_FEATURE_HIGH_FREQ), ) board = board_reader.BoardReader() # defaults to Cyton board_reader.FileWriter(board) psd_feature_extractor = feature_extraction.PSDFeatureExtractor( board.get_sampling_rate() ) with board: one_dim_experiment.write_status_text("5 second PSD averaging") # average = pre_experiment( # board, psd_feature_extractor, band_power_chart, psd_chart # ) time.sleep(3) average = 1 print(f"Average band power 10-12Hz = {average}") for i in range(0, NUM_TRIALS): band_power_values = run_single_trial( board, psd_feature_extractor, band_power_chart, psd_chart, one_dim_experiment, average, ) band_power_values_all_trials[one_dim_experiment.target_position].extend( band_power_values ) print("Waiting 3 seconds before next trial") time.sleep(3) print("Resetting GUI") if i != NUM_TRIALS - 1: # don't reset at end of experiment one_dim_experiment.reset() print("Experiment complete") print( f"Final results:\n" f"\tTop hit: {one_dim_experiment.top_hit}" f"\t\tBottom hit - {one_dim_experiment.bottom_hit}\n" f"\tNum top - {NUM_TRIALS / 2}" f"\t\tNum bottom - {NUM_TRIALS / 2}" ) plt.close("all") for pos in [ one_dim.OneDimensionControlExperiment.TargetPos.TOP, one_dim.OneDimensionControlExperiment.TargetPos.BOTTOM, ]: pos_data = band_power_values_all_trials[pos] ax: plt.Axes = sns.distplot( pos_data, hist=False, kde=True, kde_kws={"shade": True, "linewidth": 3}, label=f"{pos} - nobs: {len(pos_data)}", ) ax.set_xlim(0, 7.5) plt.xlabel("Power Spectral Density") plt.ylabel("Frequency Density") plt.title("PSD Distribution by Target Position") plt.legend() plt.show() print("Top target band power stats:") print( stats.describe( band_power_values_all_trials[one_dim_experiment.TargetPos.TOP] ) ) print("Bottom target band power stats:") print( stats.describe( band_power_values_all_trials[one_dim_experiment.TargetPos.BOTTOM] ) ) if __name__ == "__main__": main()
StarcoderdataPython
12840621
<gh_stars>0 import atnp.utils as utils import requests import csv import re import os def slice_url(url): match = re.search(utils.LINK_PATTERN, url) return match.group(1), match.group(2), match.group(3) def gen_unique_name(domain, path): return "{}__{}".format(domain, path.replace("/", "_")) def makerequest(row, header): url = row[header.index("url")] request = requests.get(url) _, domain, path = slice_url(url) print("[%d] %s" % (request.status_code, url)) return { "fileid": gen_unique_name(domain, path), "url": url, "subject": row[header.index("subject")], "journal": row[header.index("journal")], "html": request.text } def report(links_file, destination): print("Making report of file %s to %s" % (links_file, destination)) utils.create_if_not_exists(destination) files = os.listdir(destination) links = csv.reader(open(links_file, newline="\n"), delimiter=',') header = next(links) lines = count_not_downloaded = count_dupl = file_abnormal = 0 fileids = [] for row in links: lines = lines + 1 _, domain, path = slice_url(row[header.index("url")]) fileid = gen_unique_name(domain, path) + ".json" if fileid not in files: count_not_downloaded = count_not_downloaded + 1 print("[%s] Not Downloaded" % row[header.index("url")]) if fileid in fileids: count_dupl = count_dupl + 1 print("[%s] Duplicatet" % fileid) fileids.append(fileid) for filename in files: if filename not in fileids: file_abnormal = file_abnormal + 1 print("[%s] Abnormal" % filename) print("\n########################\n") print("%0*d Lines in csv %s" % (3, lines, links_file)) print("%0*d Files Downloaded" % (3, len(files))) print("%0*d Files not downloaded" % (3, count_not_downloaded)) print("%0*d Files duplicated" % (3, count_dupl)) print("%0*d Files abnormals" % (3, file_abnormal)) def download(links_file, destination): print("Making requests of file %s to %s" % (links_file, destination)) with open(links_file, newline="\n") as links: reader = csv.reader(links, delimiter=',') header = next(reader) for row in reader: filejson = makerequest(row, header=header) utils.save_json(destination, filejson["fileid"], filejson)
StarcoderdataPython
4877551
<filename>tests/test_onchain_registry.py import pytest from brownie import BadgerRegistry # noqa from brownie import accounts from badger_utils.constants import ETHEREUM_NETWORK from badger_utils.registry import chain_registries @pytest.mark.parametrize( "network", [ "eth", "polygon", "arbitrum", ] ) def test_get_all_registries(isolate, badger_registry, mocker, network): mocker.patch( "badger_utils.registry.on_chain_registries.BADGER_REGISTRY_ADDRESS", badger_registry.address, ) chain_registries.initialize() assert chain_registries.get_registry(network) is not None @pytest.mark.parametrize( "network", [ "mainnet", "polygon", "arbitrum", ] ) def test_get_active_registry(isolate, badger_registry, mocker, network): mocker.patch( "badger_utils.network_manager.network.show_active", return_value=network, ) mocker.patch( "badger_utils.registry.on_chain_registries.BADGER_REGISTRY_ADDRESS", badger_registry.address, ) chain_registries.initialize() assert chain_registries.get_active_chain_registry() is not None def test_get_key_value_from_registry(isolate, badger_registry, mocker): # Deploy badger registry mocker.patch( "badger_utils.registry.on_chain_registries.BADGER_REGISTRY_ADDRESS", badger_registry.address, ) chain_registries.initialize() badger_registry.initialize(accounts[0], {'from': accounts[0]}) # Populate with some random address value badger_registry.set( "badgerTree", "0x635EB2C39C75954bb53Ebc011BDC6AfAAcE115A6", {'from': accounts[0]}, ) eth_registry = chain_registries.get_registry(ETHEREUM_NETWORK) assert eth_registry.get("badgerTree") == "0x635EB2C39C75954bb53Ebc011BDC6AfAAcE115A6" def test_has_key_registry(isolate, badger_registry, mocker): # Deploy badger registry mocker.patch( "badger_utils.registry.on_chain_registries.BADGER_REGISTRY_ADDRESS", badger_registry.address, ) chain_registries.initialize() assert chain_registries.has_registry(ETHEREUM_NETWORK) assert not chain_registries.has_registry("cardano") def test_get_key_value_from_registry__does_not_exist(isolate, badger_registry, mocker): # Deploy badger registry mocker.patch( "badger_utils.registry.on_chain_registries.BADGER_REGISTRY_ADDRESS", badger_registry.address, ) chain_registries.initialize() badger_registry.initialize(accounts[0], {'from': accounts[0]}) eth_registry = chain_registries.get_registry(ETHEREUM_NETWORK) assert eth_registry.get("badgerTree") == "0x0000000000000000000000000000000000000000"
StarcoderdataPython
94558
import pickle from collections import Counter from math import log from typing import List, Dict, Tuple import numpy as np from scipy.sparse import csr_matrix from scipy.spatial.distance import cosine from common import check_data_set, flatten_nested_iterables from preprocessors.configs import PreProcessingConfigs from utils.file_ops import create_dir, check_paths def extract_word_to_doc_ids(docs_of_words: List[List[str]]) -> Dict[str, List[int]]: """Extracted the document ids where unique words appeared.""" word_to_doc_ids = {} for doc_id, words in enumerate(docs_of_words): appeared_words = set() for word in words: if word not in appeared_words: if word in word_to_doc_ids: word_to_doc_ids[word].append(doc_id) else: word_to_doc_ids[word] = [doc_id] appeared_words.add(word) return word_to_doc_ids def extract_word_to_doc_counts(word_to_doc_ids: Dict[str, List[int]]) -> Dict[str, int]: return {word: len(doc_ids) for word, doc_ids in word_to_doc_ids.items()} def extract_windows(docs_of_words: List[List[str]], window_size: int) -> List[List[str]]: """Word co-occurrence with context windows""" windows = [] for doc_words in docs_of_words: doc_len = len(doc_words) if doc_len <= window_size: windows.append(doc_words) else: for j in range(doc_len - window_size + 1): window = doc_words[j: j + window_size] windows.append(window) return windows def extract_word_counts_in_windows(windows_of_words: List[List[str]]) -> Dict[str, int]: """Find the total count of unique words in each window, each window is bag-of-words""" bags_of_words = map(set, windows_of_words) return Counter(flatten_nested_iterables(bags_of_words)) def extract_word_ids_pair_to_counts(windows_of_words: List[List[str]], word_to_id: Dict[str, int]) -> Dict[str, int]: word_ids_pair_to_counts = Counter() for window in windows_of_words: for i in range(1, len(window)): word_id_i = word_to_id[window[i]] for j in range(i): word_id_j = word_to_id[window[j]] if word_id_i != word_id_j: word_ids_pair_to_counts.update(['{},{}'.format(word_id_i, word_id_j), '{},{}'.format(word_id_j, word_id_i)]) return dict(word_ids_pair_to_counts) def extract_pmi_word_weights(windows_of_words: List[List[str]], word_to_id: Dict[str, int], vocab: List[str], train_size: int) -> Tuple[List[int], List[int], List[float]]: """Calculate PMI as weights""" weight_rows = [] # type: List[int] weight_cols = [] # type: List[int] pmi_weights = [] # type: List[float] num_windows = len(windows_of_words) word_counts_in_windows = extract_word_counts_in_windows(windows_of_words=windows_of_words) word_ids_pair_to_counts = extract_word_ids_pair_to_counts(windows_of_words, word_to_id) for word_id_pair, count in word_ids_pair_to_counts.items(): word_ids_in_str = word_id_pair.split(',') word_id_i, word_id_j = int(word_ids_in_str[0]), int(word_ids_in_str[1]) word_i, word_j = vocab[word_id_i], vocab[word_id_j] word_freq_i, word_freq_j = word_counts_in_windows[word_i], word_counts_in_windows[word_j] pmi_score = log((1.0 * count / num_windows) / (1.0 * word_freq_i * word_freq_j / (num_windows * num_windows))) if pmi_score > 0.0: weight_rows.append(train_size + word_id_i) weight_cols.append(train_size + word_id_j) pmi_weights.append(pmi_score) return weight_rows, weight_cols, pmi_weights def extract_cosine_similarity_word_weights(vocab: List[str], train_size: int, word_vec_path: str) -> Tuple[List[int], List[int], List[float]]: """Calculate Cosine Similarity of Word Vectors as weights""" word_vectors = pickle.load(file=open(word_vec_path, 'rb')) # type: Dict[str,List[float]] weight_rows = [] # type: List[int] weight_cols = [] # type: List[int] cos_sim_weights = [] # type: List[float] for i, word_i in enumerate(vocab): for j, word_j in enumerate(vocab): if word_i in word_vectors and word_j in word_vectors: vector_i = np.array(word_vectors[word_i]) vector_j = np.array(word_vectors[word_j]) similarity = 1.0 - cosine(vector_i, vector_j) if similarity > 0.9: print(word_i, word_j, similarity) weight_rows.append(train_size + i) weight_cols.append(train_size + j) cos_sim_weights.append(similarity) return weight_rows, weight_cols, cos_sim_weights def extract_doc_word_ids_pair_to_counts(docs_of_words: List[List[str]], word_to_id: Dict[str, int]) -> Dict[str, int]: doc_word_freq = Counter() for doc_id, doc_words in enumerate(docs_of_words): for word in doc_words: word_id = word_to_id[word] doc_word_freq.update([str(doc_id) + ',' + str(word_id)]) return dict(doc_word_freq) def extract_tf_idf_doc_word_weights( adj_rows: List[int], adj_cols: List[int], adj_weights: List[float], vocab: List[str], train_size: int, docs_of_words: List[List[str]], word_to_id: Dict[str, int]) -> Tuple[List[int], List[int], List[float]]: """Extract Doc-Word weights with TF-IDF""" doc_word_ids_pair_to_counts = extract_doc_word_ids_pair_to_counts(docs_of_words, word_to_id) word_to_doc_ids = extract_word_to_doc_ids(docs_of_words=docs_of_words) word_to_doc_counts = extract_word_to_doc_counts(word_to_doc_ids=word_to_doc_ids) vocab_len = len(vocab) num_docs = len(docs_of_words) for doc_id, doc_words in enumerate(docs_of_words): doc_word_set = set() for word in doc_words: if word not in doc_word_set: word_id = word_to_id[word] word_ids_pair_count = doc_word_ids_pair_to_counts[str(doc_id) + ',' + str(word_id)] adj_rows.append(doc_id if doc_id < train_size else doc_id + vocab_len) adj_cols.append(train_size + word_id) doc_word_idf = log(1.0 * num_docs / word_to_doc_counts[vocab[word_id]]) adj_weights.append(word_ids_pair_count * doc_word_idf) doc_word_set.add(word) return adj_rows, adj_cols, adj_weights def build_adjacency(ds_name: str, cfg: PreProcessingConfigs): """Build Adjacency Matrix of Doc-Word Heterogeneous Graph""" # input files ds_corpus = cfg.corpus_shuffled_dir + ds_name + ".txt" ds_corpus_vocabulary = cfg.corpus_shuffled_vocab_dir + ds_name + '.vocab' ds_corpus_train_idx = cfg.corpus_shuffled_split_index_dir + ds_name + '.train' ds_corpus_test_idx = cfg.corpus_shuffled_split_index_dir + ds_name + '.test' # checkers check_data_set(data_set_name=ds_name, all_data_set_names=cfg.data_sets) check_paths(ds_corpus, ds_corpus_vocabulary, ds_corpus_train_idx, ds_corpus_test_idx) create_dir(dir_path=cfg.corpus_shuffled_adjacency_dir, overwrite=False) docs_of_words = [line.split() for line in open(file=ds_corpus)] vocab = open(ds_corpus_vocabulary).read().splitlines() # Extract Vocabulary. word_to_id = {word: i for i, word in enumerate(vocab)} # Word to its id. train_size = len(open(ds_corpus_train_idx).readlines()) # Real train-size, not adjusted. test_size = len(open(ds_corpus_test_idx).readlines()) # Real test-size. windows_of_words = extract_windows(docs_of_words=docs_of_words, window_size=20) # Extract word-word weights rows, cols, weights = extract_pmi_word_weights(windows_of_words, word_to_id, vocab, train_size) # As an alternative, use cosine similarity of word vectors as weights: # ds_corpus_word_vectors = cfg.CORPUS_WORD_VECTORS_DIR + ds_name + '.word_vectors' # rows, cols, weights = extract_cosine_similarity_word_weights(vocab, train_size, ds_corpus_word_vectors) # Extract word-doc weights rows, cols, weights = extract_tf_idf_doc_word_weights(rows, cols, weights, vocab, train_size, docs_of_words, word_to_id) adjacency_len = train_size + len(vocab) + test_size adjacency_matrix = csr_matrix((weights, (rows, cols)), shape=(adjacency_len, adjacency_len)) # Dump Adjacency Matrix with open(cfg.corpus_shuffled_adjacency_dir + "/ind.{}.adj".format(ds_name), 'wb') as f: pickle.dump(adjacency_matrix, f) print("[INFO] Adjacency Dir='{}'".format(cfg.corpus_shuffled_adjacency_dir)) print("[INFO] ========= EXTRACTED ADJACENCY MATRIX: Heterogenous doc-word adjacency matrix. =========")
StarcoderdataPython
8045888
from crispy_forms.helper import FormHelper from django import forms from payroll.models import CSV class CSVForm(forms.ModelForm): class Meta: model = CSV fields = ("file_name",) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.Helper = FormHelper()
StarcoderdataPython
9756207
<gh_stars>0 import unittest from models.user import User, pwd_context from models.error import UserError class TestUser(unittest.TestCase): def setUp(self): self.username = "testuser" self.password = "<PASSWORD>" self.wrong_pass = "<PASSWORD>" self.secret_key = "to test or not to test" def test_user_model_rejects_init_without_argument(self): error = None try: User() except TypeError as e: error = e self.assertNotEqual(error, None) # error must be defined def test_user_model_rejects_init_without_username(self): error = None try: User({}) except UserError as e: error = e self.assertNotEqual(error, None) # error must be defined self.assertEqual(error.message, "user_data must have property 'username'") def test_user_model_rejects_init_with_non_object(self): error = None try: User(9) except TypeError as e: error = e self.assertNotEqual(error, None) # error must be defined def test_user_model_rejects_init_with_nonstring_username(self): error = None try: User({"username": 9}) except UserError as e: error = e self.assertNotEqual(error, None) # error must be defined self.assertEqual(error.message, "username must be a string") def test_user_model_accepts_init_with_username(self): user = User({"username": self.username}) self.assertEqual(user.username, self.username) def test_user_model_rejects_nonstring_password(self): user = User({"username": self.username}) error = None try: user.hash_password(9) except TypeError as e: error = e self.assertNotEqual(error, None) # error must be defined def test_user_model_accepts_string_password(self): user = User({"username": self.username}) user.hash_password(self.password) self.assertTrue(pwd_context.verify(self.password, user.password_hash)) def test_user_model_rejects_wrong_password(self): user = User({"username": self.username}) user.hash_password(self.password) self.assertFalse(user.verify_password(self.wrong_pass)) def test_user_object_has_json_representation(self): user = User({"username": self.username}) user.hash_password(self.password) user_json = user.json() self.assertEqual(user_json["username"], self.username) self.assertTrue("password_hash" in user_json)
StarcoderdataPython
3220983
<filename>Documents/Router/CVE-2017-7494/impacket/Dot11KeyManager.py # Copyright (c) 2003-2016 CORE Security Technologies # # This software is provided under under a slightly modified version # of the Apache Software License. See the accompanying LICENSE file # for more information. # # Description: # IEEE 802.11 Network packet codecs. # # Author: # <NAME> from array import array class KeyManager: def __init__(self): self.keys = {} def __get_bssid_hasheable_type(self, bssid): # List is an unhashable type if not isinstance(bssid, (list,tuple,array)): raise Exception('BSSID datatype must be a tuple, list or array') return tuple(bssid) def add_key(self, bssid, key): bssid=self.__get_bssid_hasheable_type(bssid) if not bssid in self.keys: self.keys[bssid] = key return True else: return False def replace_key(self, bssid, key): bssid=self.__get_bssid_hasheable_type(bssid) self.keys[bssid] = key return True def get_key(self, bssid): bssid=self.__get_bssid_hasheable_type(bssid) if self.keys.has_key(bssid): return self.keys[bssid] else: return False def delete_key(self, bssid): bssid=self.__get_bssid_hasheable_type(bssid) if not isinstance(bssid, list): raise Exception('BSSID datatype must be a list') if self.keys.has_key(bssid): del self.keys[bssid] return True return False
StarcoderdataPython
1742286
<filename>tests/schema/test_checker.py<gh_stars>0 import pytest from align.schema.checker import Z3Checker, SolutionNotFoundError @pytest.fixture def checker(): return Z3Checker() def test_single_bbox_checking(checker): b1 = checker.bbox_vars('M1') checker.append(b1.llx < b1.urx) checker.solve() checker.append(b1.urx < b1.llx) with pytest.raises(SolutionNotFoundError): checker.solve() def test_multi_bbox_checking(checker): b1, b2 = checker.iter_bbox_vars(['M1', 'M2']) checker.append(b1.llx < b1.urx) checker.append(b2.llx < b2.urx) checker.append(b2.urx <= b1.llx) checker.solve() checker.append(b1.urx <= b1.llx) with pytest.raises(SolutionNotFoundError): checker.solve()
StarcoderdataPython
11397134
<filename>openbook_posts/migrations/0068_profilepostscommunityexclusion.py # Generated by Django 2.2.5 on 2020-01-30 13:27 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('openbook_communities', '0033_auto_20191209_1337'), ('openbook_posts', '0067_merge_20191202_1731'), ] operations = [ migrations.CreateModel( name='ProfilePostsCommunityExclusion', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created', models.DateTimeField(db_index=True, editable=False)), ('community', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='profile_posts_community_exclusions', to='openbook_communities.Community')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='profile_posts_community_exclusions', to=settings.AUTH_USER_MODEL)), ], options={ 'unique_together': {('user', 'community')}, }, ), ]
StarcoderdataPython
8095751
from flask import render_template, url_for, request, redirect from web_app import app from web_app import get_stats @app.route("/", methods=["GET", "POST"]) @app.route("/home.html", methods=["GET", "POST"]) def home(): if request.method == "POST": band_name = request.form['band'] if get_stats.return_stats(band_name) == "albums_not_found": # TODO make something print("Albums not found") return render_template('home.html')
StarcoderdataPython
1941157
import os from alive_progress import alive_bar import numpy as np import torch from common.camera import normalize_screen_coordinates, world_to_camera from common.loss import mpjpe, p_mpjpe from common.utils import deterministic_random from model.VideoPose3D import TemporalModel, TemporalModelOptimized1f def load_dataset(data_dir: str, dataset_type: str, keypoints_type: str): print('Loading dataset...') dataset_path = data_dir + 'data_3d_' + dataset_type + '.npz' if dataset_type == "h36m": from datasets.h36m import Human36mDataset dataset = Human36mDataset(dataset_path) else: raise KeyError('Invalid dataset') print('Preparing data') # TODO ? for subject in dataset.subjects(): for action in dataset[subject].keys(): anim = dataset[subject][action] if 'positions' in anim: positions_3d = [] for cam in anim['cameras']: pos_3d = world_to_camera( anim['positions'], R=cam['orientation'], t=cam['translation']) # Remove global offset, but keep trajectory in first position pos_3d[:, 1:] -= pos_3d[:, :1] positions_3d.append(pos_3d) anim['positions_3d'] = positions_3d print('Loading 2D detections...') keypoints = np.load(data_dir + 'data_2d_' + dataset_type + '_' + keypoints_type + '.npz', allow_pickle=True) keypoints_metadata = keypoints['metadata'].item() keypoints_symmetry = keypoints_metadata['keypoints_symmetry'] kps_left, kps_right = list( keypoints_symmetry[0]), list(keypoints_symmetry[1]) joints_left, joints_right = list(dataset.skeleton().joints_left()), list( dataset.skeleton().joints_right()) keypoints = keypoints['positions_2d'].item() for subject in dataset.subjects(): assert subject in keypoints, 'Subject {} is missing from the 2D detections dataset'.format( subject) for action in dataset[subject].keys(): assert action in keypoints[subject], 'Action {} of subject {} is missing from the 2D detections dataset'.format( action, subject) if 'positions_3d' not in dataset[subject][action]: continue for cam_idx in range(len(keypoints[subject][action])): # We check for >= instead of == because some videos in H3.6M contain extra frames mocap_length = dataset[subject][action]['positions_3d'][cam_idx].shape[0] assert keypoints[subject][action][cam_idx].shape[0] >= mocap_length if keypoints[subject][action][cam_idx].shape[0] > mocap_length: # Shorten sequence keypoints[subject][action][cam_idx] = keypoints[subject][action][cam_idx][:mocap_length] assert len(keypoints[subject][action]) == len( dataset[subject][action]['positions_3d']) for subject in keypoints.keys(): for action in keypoints[subject]: for cam_idx, kps in enumerate(keypoints[subject][action]): # Normalize camera frame cam = dataset.cameras()[subject][cam_idx] kps[..., :2] = normalize_screen_coordinates( kps[..., :2], w=cam['res_w'], h=cam['res_h']) keypoints[subject][action][cam_idx] = kps return dataset, keypoints, keypoints_metadata, kps_left, kps_right, joints_left, joints_right def load_dataset_ntu(data_dir: str, dataset_type: str, keypoints_type: str, use_depth: bool): print('Loading dataset...') dataset_path = data_dir + 'data_3d_' + dataset_type + '.npz' if dataset_type == "ntu": from datasets.ntu_rgbd import NTU_RGBD dataset = NTU_RGBD(dataset_path) else: raise KeyError('Invalid dataset') print('Preparing data NTU') for subject in dataset.subjects(): for action in dataset[subject].keys(): anim = dataset[subject][action] positions_3d = [] for cam in anim.keys(): for seg in anim[cam].keys(): pos_3d = anim[cam][seg] pos_3d[:, 1:] -= pos_3d[:, :1] positions_3d.append(pos_3d) anim['positions_3d'] = positions_3d print('Loading 2D detections...') keypoints = np.load(data_dir + 'data_2d_' + dataset_type + '_' + keypoints_type + '.npz', allow_pickle=True) # keypoints_metadata = keypoints['metadata'].item() # keypoints_metadata = keypoints_metadata['keypoints_symmetry'] kps_left, kps_right = list(dataset.skeleton().joints_left()), list( dataset.skeleton().joints_right()) keypoints_metadata = [kps_left, kps_right] # not use joints_left, joints_right = list(dataset.skeleton().joints_left()), list( dataset.skeleton().joints_right()) keypoints = keypoints['positions_2d'].item() depth_vecs = {} if use_depth: print("Loading depth vec...") depth_vecs = np.load(data_dir+'data_dep'+'_'+dataset_type + '.npz', allow_pickle=True) depth_vecs = depth_vecs['depths'].item() valid_indexes = dataset.valid_indexes() for subject in dataset.subjects(): assert subject in keypoints, 'Subject {} is missing from the 2D detections dataset'.format( subject) for action in dataset[subject].keys(): assert action in keypoints[subject], 'Action {} of subject {} is missing from the 2D detections dataset'.format( action, subject) if 'positions_3d' not in dataset[subject][action]: continue keypoints_2d = [] for cam in keypoints[subject][action].keys(): for seg in keypoints[subject][action][cam].keys(): kpt_2d = keypoints[subject][action][cam][seg][:, valid_indexes] if use_depth: d_vec = depth_vecs[subject][action][cam][seg][:, valid_indexes] kpt_2d = np.concatenate((kpt_2d, d_vec), -1) assert kpt_2d.shape[-1] == 3 keypoints_2d.append(kpt_2d) keypoints[subject][action] = keypoints_2d assert len(keypoints[subject][action]) == len( dataset[subject][action]['positions_3d']) for subject in keypoints.keys(): for action in keypoints[subject]: for seg_idx, kps in enumerate(keypoints[subject][action]): # Normalize camera frame kps[..., :2] = normalize_screen_coordinates( kps[..., :2], w=1920, h=1080) if use_depth: assert kps.shape[-1] == 3, "No depth dimentions with tensor shape: {}".format( kps.shape) kps[..., 2] = kps[..., 2] / 20.0 # TODO: better norm keypoints[subject][action][seg_idx] = kps return dataset, keypoints, keypoints_metadata, kps_left, kps_right, joints_left, joints_right def fetch(subjects, dataset, keypoints, action_filter=None, downsample=5, subset=1, parse_3d_poses=True): out_poses_3d = [] out_poses_2d = [] out_camera_params = [] for subject in subjects: for action in keypoints[subject].keys(): if action_filter is not None: found = False for a in action_filter: if action.startswith(a): found = True break if not found: continue poses_2d = keypoints[subject][action] for i in range(len(poses_2d)): # Iterate across cameras out_poses_2d.append(poses_2d[i]) if subject in dataset.cameras(): cams = dataset.cameras()[subject] assert len(cams) == len(poses_2d), 'Camera count mismatch' for cam in cams: if 'intrinsic' in cam: out_camera_params.append(cam['intrinsic']) if parse_3d_poses and 'positions_3d' in dataset[subject][action]: poses_3d = dataset[subject][action]['positions_3d'] assert len(poses_3d) == len(poses_2d), 'Camera count mismatch' for i in range(len(poses_3d)): # Iterate across cameras out_poses_3d.append(poses_3d[i]) if len(out_camera_params) == 0: out_camera_params = None if len(out_poses_3d) == 0: out_poses_3d = None stride = downsample if subset < 1: for i in range(len(out_poses_2d)): n_frames = int(round(len(out_poses_2d[i])//stride * subset)*stride) start = deterministic_random( 0, len(out_poses_2d[i]) - n_frames + 1, str(len(out_poses_2d[i]))) out_poses_2d[i] = out_poses_2d[i][start:start+n_frames:stride] if out_poses_3d is not None: out_poses_3d[i] = out_poses_3d[i][start:start+n_frames:stride] elif stride > 1: # Downsample as requested for i in range(len(out_poses_2d)): out_poses_2d[i] = out_poses_2d[i][::stride] if out_poses_3d is not None: out_poses_3d[i] = out_poses_3d[i][::stride] return out_camera_params, out_poses_3d, out_poses_2d def fetch_ntu(subjects, dataset, keypoints, action_filter=None, downsample=5, subset=1, parse_3d_poses=True): out_poses_3d = [] out_poses_2d = [] out_camera_params = [] for subject in subjects: for action in keypoints[subject].keys(): if action_filter is not None: found = False for a in action_filter: if action.startswith(a): found = True break if not found: continue poses_2d = keypoints[subject][action] for i in range(len(poses_2d)): # Iterate across segs out_poses_2d.append(poses_2d[i]) if parse_3d_poses and 'positions_3d' in dataset[subject][action]: poses_3d = dataset[subject][action]['positions_3d'] assert len(poses_3d) == len(poses_2d), 'seg count mismatch' for i in range(len(poses_3d)): # Iterate across cameras out_poses_3d.append(poses_3d[i]) if len(out_camera_params) == 0: out_camera_params = None if len(out_poses_3d) == 0: out_poses_3d = None stride = downsample if subset < 1: for i in range(len(out_poses_2d)): n_frames = int(round(len(out_poses_2d[i])//stride * subset)*stride) start = deterministic_random( 0, len(out_poses_2d[i]) - n_frames + 1, str(len(out_poses_2d[i]))) out_poses_2d[i] = out_poses_2d[i][start:start+n_frames:stride] if out_poses_3d is not None: out_poses_3d[i] = out_poses_3d[i][start:start+n_frames:stride] elif stride > 1: # Downsample as requested for i in range(len(out_poses_2d)): out_poses_2d[i] = out_poses_2d[i][::stride] if out_poses_3d is not None: out_poses_3d[i] = out_poses_3d[i][::stride] return out_camera_params, out_poses_3d, out_poses_2d def create_model(cfg, dataset, poses_valid_2d): filter_widths = [int(x) for x in cfg.architecture.split(",")] if not cfg.disable_optimizations and not cfg.dense and cfg.stride == 1: # Use optimized model for single-frame predictions model_pos_train = TemporalModelOptimized1f(poses_valid_2d[0].shape[-2], poses_valid_2d[0].shape[-1], dataset.skeleton().num_joints(), filter_widths=filter_widths, causal=cfg.causal, dropout=cfg.dropout, channels=cfg.channels) else: # When incompatible settings are detected (stride > 1, dense filters, or disabled optimization) fall back to normal model model_pos_train = TemporalModel(poses_valid_2d[0].shape[-2], poses_valid_2d[0].shape[-1], dataset.skeleton().num_joints(), filter_widths=filter_widths, causal=cfg.causal, dropout=cfg.dropout, channels=cfg.channels, dense=cfg.dense) model_pos = TemporalModel(poses_valid_2d[0].shape[-2], poses_valid_2d[0].shape[-1], dataset.skeleton().num_joints(), filter_widths=filter_widths, causal=cfg.causal, dropout=cfg.dropout, channels=cfg.channels, dense=cfg.dense) receptive_field = model_pos.receptive_field() pad = (receptive_field - 1) // 2 # padding on each side if cfg.causal: causal_shift = pad else: causal_shift = 0 return model_pos_train, model_pos, pad, causal_shift def load_weight(cfg, model_pos_train, model_pos): checkpoint = dict() if cfg.resume or cfg.evaluate: chk_filename = os.path.join( cfg.checkpoint, cfg.resume if cfg.resume else cfg.evaluate) print("Loading checkpoint", chk_filename) checkpoint = torch.load(chk_filename) # print("This model was trained for {} epochs".format(checkpoint["epoch"])) model_pos_train.load_state_dict(checkpoint["model_pos"]) model_pos.load_state_dict(checkpoint["model_pos"]) return model_pos_train, model_pos, checkpoint def train(accelerator, model_pos_train, train_loader, optimizer): epoch_loss_3d_train = 0 N = 0 # TODO dataloader and tqdm total = len(train_loader) with alive_bar(total, title='Train', spinner='elements') as bar: for batch_data in train_loader: inputs_3d, inputs_2d = batch_data[-2], batch_data[-1] inputs_3d[:, :, 0] = 0 optimizer.zero_grad() # Predict 3D poses predicted_3d_pos = model_pos_train(inputs_2d) loss_3d_pos = mpjpe(predicted_3d_pos, inputs_3d) epoch_loss_3d_train += inputs_3d.shape[0] * \ inputs_3d.shape[1] * loss_3d_pos.item() N += inputs_3d.shape[0] * inputs_3d.shape[1] loss_total = loss_3d_pos # accelerator backward accelerator.backward(loss_total) optimizer.step() bar() epoch_losses_eva = epoch_loss_3d_train / N return epoch_losses_eva def eval(model_train_dict, model_pos, test_loader, train_loader_eval): N = 0 epoch_loss_3d_valid = 0 epoch_loss_3d_train_eval = 0 with torch.no_grad(): model_pos.load_state_dict(model_train_dict) model_pos.eval() # Evaluate on test set total_test = len(test_loader) with alive_bar(total_test, title='Test ', spinner='flowers') as bar: for batch_data in test_loader: inputs_3d, inputs_2d = batch_data[-2], batch_data[-1] inputs_3d[:, :, 0] = 0 # Predict 3D poses predicted_3d_pos = model_pos(inputs_2d) loss_3d_pos = mpjpe(predicted_3d_pos, inputs_3d) epoch_loss_3d_valid += inputs_3d.shape[0] * \ inputs_3d.shape[1] * loss_3d_pos.item() N += inputs_3d.shape[0] * inputs_3d.shape[1] bar() losses_3d_valid_ave = epoch_loss_3d_valid / N # Evaluate on training set, this time in evaluation mode N = 0 total_eval = len(train_loader_eval) with alive_bar(total_eval, title='Eval ', spinner='flowers') as bar: for batch_data in train_loader_eval: inputs_3d, inputs_2d = batch_data[-2], batch_data[-1] if inputs_2d.shape[1] == 0: # This happens only when downsampling the dataset continue inputs_3d[:, :, 0] = 0 # Compute 3D poses predicted_3d_pos = model_pos(inputs_2d) loss_3d_pos = mpjpe(predicted_3d_pos, inputs_3d) epoch_loss_3d_train_eval += inputs_3d.shape[0] * \ inputs_3d.shape[1] * loss_3d_pos.item() N += inputs_3d.shape[0] * inputs_3d.shape[1] bar() losses_3d_train_eval_ave = epoch_loss_3d_train_eval / N return losses_3d_valid_ave, losses_3d_train_eval_ave def prepare_actions(subjects_test, dataset): all_actions = {} all_actions_by_subject = {} for subject in subjects_test: if subject not in all_actions_by_subject: all_actions_by_subject[subject] = {} for action in dataset[subject].keys(): action_name = action.split(' ')[0] if action_name not in all_actions: all_actions[action_name] = [] if action_name not in all_actions_by_subject[subject]: all_actions_by_subject[subject][action_name] = [] all_actions[action_name].append((subject, action)) all_actions_by_subject[subject][action_name].append( (subject, action)) return all_actions, all_actions_by_subject def fetch_actions(actions, keypoints, dataset, downsample=1): out_poses_3d = [] out_poses_2d = [] for subject, action in actions: poses_2d = keypoints[subject][action] for i in range(len(poses_2d)): # Iterate across camera out_poses_2d.append(poses_2d[i]) poses_3d = dataset[subject][action]['positions_3d'] assert len(poses_3d) == len(poses_2d), 'Camera count mismatch' for i in range(len(poses_3d)): # Iterate across cameras out_poses_3d.append(poses_3d[i]) stride = downsample if stride > 1: # Downsample as requested for i in range(len(out_poses_2d)): out_poses_2d[i] = out_poses_2d[i][::stride] if out_poses_3d is not None: out_poses_3d[i] = out_poses_3d[i][::stride] return out_poses_3d, out_poses_2d def evaluate(test_loader, model_pos, action=None, log=None, joints_left=None, joints_right=None, test_augment=True): epoch_loss_3d_pos = 0 epoch_loss_3d_pos_procrustes = 0 with torch.no_grad(): model_pos.eval() N = 0 for batch_data in test_loader: inputs_3d, inputs_2d = batch_data[-2], batch_data[-1] if test_augment: inputs_2d = torch.squeeze(inputs_2d, 0) inputs_3d = torch.squeeze(inputs_3d, 0) inputs_3d[:, :, 0] = 0 # Positional model predicted_3d_pos = model_pos(inputs_2d) if test_augment: assert joints_left is not None and joints_right is not None predicted_3d_pos[1, :, :, 0] *= -1 predicted_3d_pos[1, :, joints_left + joints_right] = predicted_3d_pos[1, :, joints_right + joints_left] predicted_3d_pos = torch.mean( predicted_3d_pos, dim=0, keepdim=True) inputs_3d = inputs_3d[:1] error = mpjpe(predicted_3d_pos, inputs_3d) epoch_loss_3d_pos += inputs_3d.shape[0] * \ inputs_3d.shape[1] * error.item() N += inputs_3d.shape[0] * inputs_3d.shape[1] inputs = inputs_3d.cpu().numpy().reshape(-1, inputs_3d.shape[-2], inputs_3d.shape[-1]) predicted_3d_pos = predicted_3d_pos.cpu().numpy( ).reshape(-1, inputs_3d.shape[-2], inputs_3d.shape[-1]) epoch_loss_3d_pos_procrustes += inputs_3d.shape[0] * \ inputs_3d.shape[1] * p_mpjpe(predicted_3d_pos, inputs) e1 = (epoch_loss_3d_pos / N) * 1000 e2 = (epoch_loss_3d_pos_procrustes / N) * 1000 if log is not None: if action is None: log.info('----------') else: log.info('----{}----'.format(action)) log.info('Protocol #1 Error (MPJPE): {} mm'.format(e1)) log.info('Protocol #2 Error (P-MPJPE): {} mm'.format(e2)) log.info('----------') return e1, e2 def predict(test_generator, model_pos): with torch.no_grad(): model_pos.eval() batch_2d = next(test_generator.next_epoch())[-1] inputs_2d = torch.from_numpy(batch_2d.astype('float32')) if torch.cuda.is_available(): inputs_2d = inputs_2d.cuda() predicted_3d_pos = model_pos(inputs_2d) return predicted_3d_pos.squeeze(0).cpu().numpy()
StarcoderdataPython
163721
"""blog URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/2.2/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ from django.contrib import admin from django.urls import path from app import views from django.conf import settings from django.conf.urls.static import static urlpatterns = [ path('admin/', admin.site.urls), path('', views.index, name='index'), path('post_details/<int:post_id>/', views.post_details, name='post_details'), path('register/', views.register, name='register'), path('login/', views.login_user, name='login'), path('logout/', views.logout_user, name='logout'), path('profile', views.profile, name='profile'), path('profile_update', views.profile_update, name='profile_update'), path('new_post', views.new_post, name='new_post'), path('update_post/<int:post_id>', views.update_post, name='update_post'), path('delete_post/<int:post_id>', views.delete_post, name='delete_post'), ] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
StarcoderdataPython
8056396
from ipyparallel import Client import sys import numpy as np ######################################################################################################################## # set up parallel client rc = Client() dview = rc[:] with dview.sync_imports(): from generate_tay_sde_obs import experiment exps = [] seed = 0 diffusion = [0.1, 0.25, 0.5, 0.75, 1.0] tanl = [0.1, 0.25, 0.5, 1] for i in diffusion: for j in tanl: exps.append([seed, i, j]) # run the experiments given the parameters and write to text files, in parallel over the initializations completed = dview.map_sync(experiment, exps) print(completed) sys.exit() ########################################################################################################################
StarcoderdataPython
1611897
import pandas as pd import numpy as np import statsmodels as sm import statsmodels.api as smapi import math from pyqstrat.pq_utils import monotonically_increasing, infer_frequency from pyqstrat.plot import TimeSeries, DateLine, Subplot, HorizontalLine, BucketedValues, Plot import matplotlib as mpl import matplotlib.figure as mpl_fig from typing import Tuple, Sequence, Mapping, MutableMapping, Optional, Any, Callable, Dict def compute_periods_per_year(timestamps: np.ndarray) -> float: """ Computes trading periods per year for an array of numpy datetime64's. e.g. if most of the timestamps are separated by 1 day, will return 252. Args: timestamps: a numpy array of datetime64's >>> compute_periods_per_year(np.array(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-09'], dtype='M8[D]')) 252.0 >>> round(compute_periods_per_year(np.array(['2018-01-01 10:00', '2018-01-01 10:05', '2018-01-01 10:10'], dtype='M8[m]')), 2) 72576.05 """ if not len(timestamps): return np.nan freq = infer_frequency(timestamps) return 252. / freq if freq != 0 else np.nan def compute_amean(returns: np.ndarray, periods_per_year: int) -> float: ''' Computes arithmetic mean of a return array, ignoring NaNs Args: returns: Represents returns at any frequency periods_per_year: Frequency of the returns, e.g. 252 for daily returns >>> compute_amean(np.array([0.003, 0.004, np.nan]), 252) 0.882 ''' if not len(returns): return np.nan return np.nanmean(returns) * periods_per_year def compute_num_periods(timestamps: np.ndarray, periods_per_year: float) -> float: ''' Given an array of timestamps, we compute how many periods there are between the first and last element, where the length of a period is defined by periods_per_year. For example, if there are 6 periods per year, then each period would be approx. 2 months long. Args: timestamps (np.ndarray of np.datetime64): a numpy array of returns, can contain nans periods_per_year: number of periods between first and last return >>> compute_num_periods(np.array(['2015-01-01', '2015-03-01', '2015-05-01'], dtype='M8[D]'), 6) 2.0 ''' if not len(timestamps): return np.nan assert(monotonically_increasing(timestamps)) fraction_of_year = (timestamps[-1] - timestamps[0]) / (np.timedelta64(1, 's') * 365 * 24 * 60 * 60) return round(fraction_of_year * periods_per_year) def compute_gmean(timestamps: np.ndarray, returns: np.ndarray, periods_per_year: float) -> float: """ Compute geometric mean of an array of returns Args: returns: a numpy array of returns, can contain nans periods_per_year: Used for annualizing returns >>> round(compute_gmean(np.array(['2015-01-01', '2015-03-01', '2015-05-01'], dtype='M8[D]'), np.array([0.001, 0.002, 0.003]), 252.), 6) 0.018362 """ if not len(returns): return np.nan assert(len(returns) == len(timestamps)) assert(isinstance(timestamps, np.ndarray) and isinstance(returns, np.ndarray)) mask = np.isfinite(returns) timestamps = timestamps[mask] returns = returns[mask] num_periods = compute_num_periods(timestamps, periods_per_year) g_mean = ((1.0 + returns).prod())**(1.0 / num_periods) g_mean = np.power(g_mean, periods_per_year) - 1.0 return g_mean def compute_std(returns: np.ndarray) -> float: """ Computes standard deviation of an array of returns, ignoring nans """ if not len(returns): return np.nan return np.nanstd(returns) def compute_sortino(returns: np.ndarray, amean: float, periods_per_year: float) -> float: ''' Note that this assumes target return is 0. Args: returns: a numpy array of returns amean: arithmetic mean of returns periods_per_year: number of trading periods per year >>> print(round(compute_sortino(np.array([0.001, -0.001, 0.002]), 0.001, 252), 6)) 0.133631 ''' if not len(returns) or not np.isfinite(amean) or periods_per_year <= 0: return np.nan returns = np.where((~np.isfinite(returns)), 0.0, returns) normalized_rets = np.where(returns > 0.0, 0.0, returns) sortino_denom = np.std(normalized_rets) sortino = np.nan if sortino_denom == 0 else amean / (sortino_denom * np.sqrt(periods_per_year)) return sortino def compute_sharpe(returns: np.ndarray, amean: float, periods_per_year: float) -> float: ''' Note that this does not take into risk free returns so it's really a sharpe0, i.e. assumes risk free returns are 0 Args: returns: a numpy array of returns amean: arithmetic mean of returns periods_per_year: number of trading periods per year >>> round(compute_sharpe(np.array([0.001, -0.001, 0.002]), 0.001, 252), 6) 0.050508 ''' if not len(returns) or not np.isfinite(amean) or periods_per_year <= 0: return np.nan returns = np.where((~np.isfinite(returns)), 0.0, returns) s = np.std(returns) sharpe = np.nan if s == 0 else amean / (s * np.sqrt(periods_per_year)) return sharpe def compute_k_ratio(equity: np.ndarray, periods_per_year: int, halflife_years: float = None) -> float: ''' Compute k-ratio (2013 or original versions by <NAME>). See https://papers.ssrn.com/sol3/papers.cfm?abstract_id=2230949 We also implement a modification that allows higher weighting for more recent returns. Args: equity: a numpy array of the equity in your account periods_per_year: 252 for daily values halflife_years: If set, we use weighted linear regression to give less weight to older returns. In this case, we compute the original k-ratio which does not use periods per year or number of observations If not set, we compute the 2013 version of the k-ratio which weights k-ratio by sqrt(periods_per_year) / nobs Returns: weighted or unweighted k-ratio >>> np.random.seed(0) >>> t = np.arange(1000) >>> ret = np.random.normal(loc = 0.0025, scale = 0.01, size = len(t)) >>> equity = (1 + ret).cumprod() >>> assert(math.isclose(compute_k_ratio(equity, 252, None), 3.888, abs_tol=0.001)) >>> assert(math.isclose(compute_k_ratio(equity, 252, 0.5), 602.140, abs_tol=0.001)) ''' equity = equity[np.isfinite(equity)] equity = np.log(equity) t = np.arange(len(equity)) if halflife_years: halflife = halflife_years * periods_per_year k = math.log(0.5) / halflife w = np.empty(len(equity), dtype=np.float) w = np.exp(k * t) w = w ** 2 # Statsmodels requires square of weights w = w[::-1] fit = sm.regression.linear_model.WLS(endog=equity, exog=t, weights=w, hasconst=False).fit() k_ratio = fit.params[0] / fit.bse[0] else: fit = smapi.OLS(endog=equity, exog=np.arange(len(equity)), hasconst=False).fit() k_ratio = fit.params[0] * math.sqrt(periods_per_year) / (fit.bse[0] * len(equity)) return k_ratio def compute_equity(timestamps: np.ndarray, starting_equity: float, returns: np.ndarray) -> np.ndarray: ''' Given starting equity, timestamps and returns, create a numpy array of equity at each date''' return starting_equity * np.cumprod(1. + returns) def compute_rolling_dd(timestamps: np.ndarray, equity: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: ''' Compute numpy array of rolling drawdown percentage Args: timestamps: numpy array of datetime64 equity: numpy array of equity ''' assert(len(timestamps) == len(equity)) if not len(timestamps): return np.array([], dtype='M8[ns]'), np.array([], dtype=np.float) s = pd.Series(equity, index=timestamps) rolling_max = s.expanding(min_periods=1).max() dd = np.where(s >= rolling_max, 0.0, -(s - rolling_max) / rolling_max) return timestamps, dd def compute_maxdd_pct(rolling_dd: np.ndarray) -> float: '''Compute max drawdown percentage given a numpy array of rolling drawdowns, ignoring NaNs''' if not len(rolling_dd): return np.nan return np.nanmax(rolling_dd) def compute_maxdd_date(rolling_dd_dates: np.ndarray, rolling_dd: np.ndarray) -> float: ''' Compute date of max drawdown given numpy array of timestamps, and corresponding rolling dd percentages''' if not len(rolling_dd_dates): return pd.NaT assert(len(rolling_dd_dates) == len(rolling_dd)) return rolling_dd_dates[np.argmax(rolling_dd)] def compute_maxdd_start(rolling_dd_dates: np.ndarray, rolling_dd: np.ndarray, mdd_date: np.datetime64) -> np.datetime64: '''Compute date when max drawdown starts, given numpy array of timestamps corresponding rolling dd percentages and date of the max draw down''' if not len(rolling_dd_dates) or pd.isnull(mdd_date): return pd.NaT assert(len(rolling_dd_dates) == len(rolling_dd)) return rolling_dd_dates[(rolling_dd <= 0) & (rolling_dd_dates < mdd_date)][-1] def compute_mar(returns: np.ndarray, periods_per_year: float, mdd_pct: float) -> float: '''Compute MAR ratio, which is annualized return divided by biggest drawdown since inception.''' if not len(returns) or np.isnan(mdd_pct) or mdd_pct == 0: return np.nan return np.mean(returns) * periods_per_year / mdd_pct def compute_dates_3yr(timestamps: np.ndarray) -> np.ndarray: ''' Given an array of numpy datetimes, return those that are within 3 years of the last date in the array''' if not len(timestamps): return np.array([], dtype='M8[D]') last_date = timestamps[-1] d = pd.to_datetime(last_date) start_3yr = np.datetime64(d.replace(year=d.year - 3)) return timestamps[timestamps > start_3yr] def compute_returns_3yr(timestamps: np.ndarray, returns: np.ndarray) -> np.ndarray: '''Given an array of numpy datetimes and an array of returns, return those that are within 3 years of the last date in the datetime array ''' if not len(timestamps): return np.array([], dtype=np.float) assert(len(timestamps) == len(returns)) timestamps_3yr = compute_dates_3yr(timestamps) return returns[timestamps >= timestamps_3yr[0]] def compute_rolling_dd_3yr(timestamps: np.ndarray, equity: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: '''Compute rolling drawdowns over the last 3 years''' if not len(timestamps): return np.array([], dtype='M8[D]') last_date = timestamps[-1] d = pd.to_datetime(last_date) start_3yr = np.datetime64(d.replace(year=d.year - 3)) equity = equity[timestamps >= start_3yr] timestamps = timestamps[timestamps >= start_3yr] return compute_rolling_dd(timestamps, equity) def compute_maxdd_pct_3yr(rolling_dd_3yr: np.ndarray) -> float: '''Compute max drawdown percentage over the last 3 years''' return compute_maxdd_pct(rolling_dd_3yr) def compute_maxdd_date_3yr(rolling_dd_3yr_timestamps: np.ndarray, rolling_dd_3yr: np.ndarray) -> np.datetime64: '''Compute max drawdown date over the last 3 years''' return compute_maxdd_date(rolling_dd_3yr_timestamps, rolling_dd_3yr) def compute_maxdd_start_3yr(rolling_dd_3yr_timestamps: np.ndarray, rolling_dd_3yr: np.ndarray, mdd_date_3yr: np.datetime64) -> np.datetime64: '''Comput max drawdown start date over the last 3 years''' return compute_maxdd_start(rolling_dd_3yr_timestamps, rolling_dd_3yr, mdd_date_3yr) def compute_calmar(returns_3yr: np.ndarray, periods_per_year: float, mdd_pct_3yr: float) -> float: '''Compute Calmar ratio, which is the annualized return divided by max drawdown over the last 3 years''' return compute_mar(returns_3yr, periods_per_year, mdd_pct_3yr) def compute_bucketed_returns(timestamps: np.ndarray, returns: np.ndarray) -> Tuple[Sequence[int], Sequence[np.ndarray]]: ''' Bucket returns by year Returns: A tuple with the first element being a list of years and the second a list of numpy arrays containing returns for each corresponding year ''' assert(len(timestamps) == len(returns)) if not len(timestamps): return np.array([], dtype=np.str), np.array([], dtype=np.float) s = pd.Series(returns, index=timestamps) years_list = [] rets_list = [] for year, rets in s.groupby(s.index.map(lambda x: x.year)): years_list.append(year) rets_list.append(rets.values) return years_list, rets_list def compute_annual_returns(timestamps: np.ndarray, returns: np.ndarray, periods_per_year: float) -> Tuple[np.ndarray, np.ndarray]: '''Takes the output of compute_bucketed_returns and returns geometric mean of returns by year Returns: A tuple with the first element being an array of years (integer) and the second element an array of annualized returns for those years ''' assert(len(timestamps) == len(returns) and periods_per_year > 0) if not len(timestamps): return np.array([], dtype=np.str), np.array([], dtype=np.float) df = pd.DataFrame({'ret': returns, 'timestamp': timestamps}) years = [] gmeans = [] for k, g in df.groupby(df.timestamp.map(lambda x: x.year)): years.append(k) gmeans.append(compute_gmean(g.timestamp.values, g.ret.values, periods_per_year)) return np.array(years), np.array(gmeans) class Evaluator: """You add functions to the evaluator that are dependent on the outputs of other functions. The evaluator will call these functions in the right order so dependencies are computed first before the functions that need their output. You can retrieve the output of a metric using the metric member function >>> evaluator = Evaluator(initial_metrics={'x': np.array([1, 2, 3]), 'y': np.array([3, 4, 5])}) >>> evaluator.add_metric('z', lambda x, y: sum(x, y), dependencies=['x', 'y']) >>> evaluator.compute() >>> evaluator.metric('z') array([ 9, 10, 11]) """ def __init__(self, initial_metrics: Dict[str, Any]) -> None: """Inits Evaluator with a dictionary of initial metrics that are used to compute subsequent metrics Args: initial_metrics: a dictionary of string name -> metric. metric can be any object including a scalar, an array or a tuple """ assert(type(initial_metrics) == dict) self.metric_values: Dict[str, Any] = initial_metrics.copy() self._metrics: MutableMapping[str, Tuple[Callable, Sequence[str]]] = {} def add_metric(self, name: str, func: Callable, dependencies: Sequence[str]) -> None: self._metrics[name] = (func, dependencies) def compute(self, metric_names: Sequence[str] = None) -> None: '''Compute metrics using the internal dependency graph Args: metric_names: an array of metric names. If not passed in, evaluator will compute and store all metrics ''' if metric_names is None: metric_names = list(self._metrics.keys()) for metric_name in metric_names: self.compute_metric(metric_name) def compute_metric(self, metric_name: str) -> None: ''' Compute and store a single metric: Args: metric_name: string representing the metric to compute ''' func, dependencies = self._metrics[metric_name] for dependency in dependencies: if dependency not in self.metric_values: self.compute_metric(dependency) dependency_values = {k: self.metric_values[k] for k in dependencies} values = func(**dependency_values) self.metric_values[metric_name] = values def metric(self, metric_name: str) -> Any: '''Return the value of a single metric given its name''' return self.metric_values[metric_name] def metrics(self) -> Mapping[str, Any]: '''Return a dictionary of metric name -> metric value''' return self.metric_values def handle_non_finite_returns(timestamps: np.ndarray, rets: np.ndarray, leading_non_finite_to_zeros: bool, subsequent_non_finite_to_zeros: bool) -> Tuple[np.ndarray, np.ndarray]: ''' >>> np.set_printoptions(formatter={'float': '{: .6g}'.format}) >>> timestamps = np.arange(np.datetime64('2019-01-01'), np.datetime64('2019-01-07')) >>> rets = np.array([np.nan, np.nan, 3, 4, np.nan, 5]) >>> handle_non_finite_returns(timestamps, rets, leading_non_finite_to_zeros = False, subsequent_non_finite_to_zeros = True) (array(['2019-01-03', '2019-01-04', '2019-01-05', '2019-01-06'], dtype='datetime64[D]'), array([ 3, 4, 0, 5])) >>> handle_non_finite_returns(timestamps, rets, leading_non_finite_to_zeros = True, subsequent_non_finite_to_zeros = False) (array(['2019-01-01', '2019-01-02', '2019-01-03', '2019-01-04', '2019-01-06'], dtype='datetime64[D]'), array([ 0, 0, 3, 4, 5])) >>> handle_non_finite_returns(timestamps, rets, leading_non_finite_to_zeros = False, subsequent_non_finite_to_zeros = False) (array(['2019-01-01', '2019-01-02', '2019-01-03', '2019-01-04', '2019-01-06'], dtype='datetime64[D]'), array([ 0, 0, 3, 4, 5])) >>> rets = np.array([1, 2, 3, 4, 4.5, 5]) >>> handle_non_finite_returns(timestamps, rets, leading_non_finite_to_zeros = False, subsequent_non_finite_to_zeros = True) (array(['2019-01-01', '2019-01-02', '2019-01-03', '2019-01-04', '2019-01-05', '2019-01-06'], dtype='datetime64[D]'), array([ 1, 2, 3, 4, 4.5, 5])) ''' first_non_nan_index = np.ravel(np.nonzero(~np.isnan(rets))) if len(first_non_nan_index): first_non_nan_index = first_non_nan_index[0] else: first_non_nan_index = -1 if first_non_nan_index > 0 and first_non_nan_index < len(rets): if leading_non_finite_to_zeros: rets[:first_non_nan_index] = np.nan_to_num(rets[:first_non_nan_index]) else: timestamps = timestamps[first_non_nan_index:] rets = rets[first_non_nan_index:] if subsequent_non_finite_to_zeros: rets = np.nan_to_num(rets) else: timestamps = timestamps[np.isfinite(rets)] rets = rets[np.isfinite(rets)] return timestamps, rets def compute_return_metrics(timestamps: np.ndarray, rets: np.ndarray, starting_equity: float, leading_non_finite_to_zeros: bool = False, subsequent_non_finite_to_zeros: bool = True) -> Evaluator: ''' Compute a set of common metrics using returns (for example, of an instrument or a portfolio) Args: timestamps (np.array of datetime64): Timestamps for the returns rets (nd.array of float): The returns, use 0.01 for 1% starting_equity (float): Starting equity value in your portfolio leading_non_finite_to_zeros (bool, optional): If set, we replace leading nan, inf, -inf returns with zeros. For example, you may need a warmup period for moving averages. Default False subsequent_non_finite_to_zeros (bool, optional): If set, we replace any nans that follow the first non nan value with zeros. There may be periods where you have no prices but removing these returns would result in incorrect annualization. Default True Returns: An Evaluator object containing computed metrics off the returns passed in. If needed, you can add your own metrics to this object based on the values of existing metrics and recompute the Evaluator. Otherwise, you can just use the output of the evaluator using the metrics function. >>> timestamps = np.array(['2015-01-01', '2015-03-01', '2015-05-01', '2015-09-01'], dtype='M8[D]') >>> rets = np.array([0.01, 0.02, np.nan, -0.015]) >>> starting_equity = 1.e6 >>> ev = compute_return_metrics(timestamps, rets, starting_equity) >>> metrics = ev.metrics() >>> assert(round(metrics['gmean'], 6) == 0.021061) >>> assert(round(metrics['sharpe'], 6) == 0.599382) >>> assert(all(metrics['returns_3yr'] == np.array([0.01, 0.02, 0, -0.015]))) ''' assert(starting_equity > 0.) assert(type(rets) == np.ndarray and rets.dtype == np.float64) assert(type(timestamps) == np.ndarray and np.issubdtype(timestamps.dtype, np.datetime64) and monotonically_increasing(timestamps)) timestamps, rets = handle_non_finite_returns(timestamps, rets, leading_non_finite_to_zeros, subsequent_non_finite_to_zeros) ev = Evaluator({'timestamps': timestamps, 'returns': rets, 'starting_equity': starting_equity}) ev.add_metric('periods_per_year', compute_periods_per_year, dependencies=['timestamps']) ev.add_metric('amean', compute_amean, dependencies=['returns', 'periods_per_year']) ev.add_metric('std', compute_std, dependencies=['returns']) ev.add_metric('up_periods', lambda returns: len(returns[returns > 0]), dependencies=['returns']) ev.add_metric('down_periods', lambda returns: len(returns[returns < 0]), dependencies=['returns']) ev.add_metric('up_pct', lambda up_periods, down_periods: up_periods * 1.0 / (up_periods + down_periods) if (up_periods + down_periods) != 0 else np.nan, dependencies=['up_periods', 'down_periods']) ev.add_metric('gmean', compute_gmean, dependencies=['timestamps', 'returns', 'periods_per_year']) ev.add_metric('sharpe', compute_sharpe, dependencies=['returns', 'periods_per_year', 'amean']) ev.add_metric('sortino', compute_sortino, dependencies=['returns', 'periods_per_year', 'amean']) ev.add_metric('equity', compute_equity, dependencies=['timestamps', 'starting_equity', 'returns']) ev.add_metric('k_ratio', compute_k_ratio, dependencies=['equity', 'periods_per_year']) ev.add_metric('k_ratio_weighted', lambda equity, periods_per_year: compute_k_ratio(equity, periods_per_year, 3), dependencies=['equity', 'periods_per_year']) # Drawdowns ev.add_metric('rolling_dd', compute_rolling_dd, dependencies=['timestamps', 'equity']) ev.add_metric('mdd_pct', lambda rolling_dd: compute_maxdd_pct(rolling_dd[1]), dependencies=['rolling_dd']) ev.add_metric('mdd_date', lambda rolling_dd: compute_maxdd_date(rolling_dd[0], rolling_dd[1]), dependencies=['rolling_dd']) ev.add_metric('mdd_start', lambda rolling_dd, mdd_date: compute_maxdd_start(rolling_dd[0], rolling_dd[1], mdd_date), dependencies=['rolling_dd', 'mdd_date']) ev.add_metric('mar', compute_mar, dependencies=['returns', 'periods_per_year', 'mdd_pct']) ev.add_metric('timestamps_3yr', compute_dates_3yr, dependencies=['timestamps']) ev.add_metric('returns_3yr', compute_returns_3yr, dependencies=['timestamps', 'returns']) ev.add_metric('rolling_dd_3yr', compute_rolling_dd_3yr, dependencies=['timestamps', 'equity']) ev.add_metric('mdd_pct_3yr', lambda rolling_dd_3yr: compute_maxdd_pct_3yr(rolling_dd_3yr[1]), dependencies=['rolling_dd_3yr']) ev.add_metric('mdd_date_3yr', lambda rolling_dd_3yr: compute_maxdd_date_3yr(rolling_dd_3yr[0], rolling_dd_3yr[1]), dependencies=['rolling_dd_3yr']) ev.add_metric('mdd_start_3yr', lambda rolling_dd_3yr, mdd_date_3yr: compute_maxdd_start_3yr(rolling_dd_3yr[0], rolling_dd_3yr[1], mdd_date_3yr), dependencies=['rolling_dd_3yr', 'mdd_date_3yr']) ev.add_metric('calmar', compute_calmar, dependencies=['returns_3yr', 'periods_per_year', 'mdd_pct_3yr']) ev.add_metric('annual_returns', compute_annual_returns, dependencies=['timestamps', 'returns', 'periods_per_year']) ev.add_metric('bucketed_returns', compute_bucketed_returns, dependencies=['timestamps', 'returns']) ev.compute() return ev def display_return_metrics(metrics: Mapping[str, Any], float_precision: int = 3) -> pd.DataFrame: ''' Creates a dataframe making it convenient to view the output of the metrics obtained using the compute_return_metrics function. Args: float_precision: Change if you want to display floats with more or less significant figures than the default, 3 significant figures. Returns: A one row dataframe with formatted metrics. ''' from IPython.core.display import display _metrics = {} cols = ['gmean', 'amean', 'std', 'shrp', 'srt', 'k', 'calmar', 'mar', 'mdd_pct', 'mdd_start', 'mdd_date', 'dd_3y_pct', 'up_periods', 'down_periods', 'up_pct', 'mdd_start_3yr', 'mdd_date_3yr'] translate = {'shrp': 'sharpe', 'srt': 'sortino', 'dd_3y_pct': 'mdd_pct_3yr', 'k': 'k_ratio'} for col in cols: key = col if col in translate: key = translate[col] _metrics[col] = metrics[key] _metrics['mdd_dates'] = f'{str(metrics["mdd_start"])[:10]}/{str(metrics["mdd_date"])[:10]}' _metrics['up_dwn'] = f'{metrics["up_periods"]}/{metrics["down_periods"]}/{metrics["up_pct"]:.3g}' _metrics['dd_3y_timestamps'] = f'{str(metrics["mdd_start_3yr"])[:10]}/{str(metrics["mdd_date_3yr"])[:10]}' years = metrics['annual_returns'][0] ann_rets = metrics['annual_returns'][1] for i, year in enumerate(years): _metrics[str(year)] = ann_rets[i] format_str = '{:.' + str(float_precision) + 'g}' for k, v in _metrics.items(): if isinstance(v, np.float) or isinstance(v, float): _metrics[k] = format_str.format(v) cols = ['gmean', 'amean', 'std', 'shrp', 'srt', 'k', 'calmar', 'mar', 'mdd_pct', 'mdd_dates', 'dd_3y_pct', 'dd_3y_timestamps', 'up_dwn'] + [ str(year) for year in sorted(years)] df = pd.DataFrame(index=['']) for metric_name, metric_value in _metrics.items(): df.insert(0, metric_name, metric_value) df = df[cols] display(df) return df def plot_return_metrics(metrics: Mapping[str, Any], title: str = None) -> Optional[Tuple[mpl_fig.Figure, mpl.axes.Axes]]: ''' Plot equity, rolling drawdowns and and a boxplot of annual returns given the output of compute_return_metrics. ''' timestamps = metrics['timestamps'] equity = metrics['equity'] equity = TimeSeries('equity', timestamps=timestamps, values=equity) mdd_date, mdd_start = metrics['mdd_start'], metrics['mdd_date'] mdd_date_3yr, mdd_start_3yr = metrics['mdd_start_3yr'], metrics['mdd_date_3yr'] drawdown_lines = [DateLine(name='max dd', date=mdd_start, color='red'), DateLine(date=mdd_date, color='red'), DateLine(name='3y dd', date=mdd_start_3yr, color='orange'), DateLine(date=mdd_date_3yr, color='orange')] equity_subplot = Subplot(equity, ylabel='Equity', height_ratio=0.6, log_y=True, y_tick_format='${x:,.0f}', date_lines=drawdown_lines, horizontal_lines=[HorizontalLine(metrics['starting_equity'], color='black')]) rolling_dd = TimeSeries('drawdowns', timestamps=metrics['rolling_dd'][0], values=metrics['rolling_dd'][1]) zero_line = HorizontalLine(y=0, color='black') dd_subplot = Subplot(rolling_dd, ylabel='Drawdowns', height_ratio=0.2, date_lines=drawdown_lines, horizontal_lines=[zero_line]) years = metrics['bucketed_returns'][0] ann_rets = metrics['bucketed_returns'][1] ann_ret = BucketedValues('annual returns', bucket_names=years, bucket_values=ann_rets) ann_ret_subplot = Subplot(ann_ret, ylabel='Annual Returns', height_ratio=0.2, horizontal_lines=[zero_line]) plt = Plot([equity_subplot, dd_subplot, ann_ret_subplot], title=title) return plt.draw() def test_evaluator() -> None: from datetime import datetime, timedelta np.random.seed(10) timestamps = np.arange(datetime(2018, 1, 1), datetime(2018, 3, 1), timedelta(days=1)) rets = np.random.normal(size=len(timestamps)) / 1000 starting_equity = 1.e6 ev = compute_return_metrics(timestamps, rets, starting_equity) display_return_metrics(ev.metrics()) plot_return_metrics(ev.metrics()) assert(round(ev.metric('sharpe'), 6) == 2.932954) assert(round(ev.metric('sortino'), 6) == 5.690878) assert(ev.metric('annual_returns')[0] == [2018]) assert(round(ev.metric('annual_returns')[1][0], 6) == [0.063530]) assert(ev.metric('mdd_start') == np.datetime64('2018-01-19')) assert(ev.metric('mdd_date') == np.datetime64('2018-01-22')) if __name__ == "__main__": test_evaluator() import doctest doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
StarcoderdataPython
211851
<filename>ProgramsToRead/ExercisesFromClasses/ex001agosto26.py vogais = { 'a': 0, 'e': 0, 'i': 0, 'o': 0, 'u': 0 } texto = str(input('insira um texto: ')).strip().lower() for letra in texto: if letra in 'a': vogais['a'] += 1 elif letra in 'e': vogais['e'] += 1 elif letra in 'i': vogais['i'] += 1 elif letra in 'o': vogais['o'] += 1 elif letra in 'u': vogais['u'] += 1 print(vogais)
StarcoderdataPython
8106816
<filename>tests/core/gridftp_test.py """ Test script for gridftp """ from __future__ import absolute_import, division, print_function from tests.util import unittest_reporter, glob_tests import logging logger = logging.getLogger('gridftp') import os, sys, time import shutil import random import string import subprocess import tempfile from threading import Event try: import cPickle as pickle except: import pickle import unittest import iceprod.core.gridftp skip_tests = False if (subprocess.call(['which','uberftp']) or subprocess.call(['which','globus-url-copy'])): skip_tests = True class gridftp_test(unittest.TestCase): def setUp(self): super(gridftp_test,self).setUp() self._timeout = 1 self.test_dir = tempfile.mkdtemp(dir=os.getcwd()) self.server_test_dir = os.path.join('gsiftp://gridftp.icecube.wisc.edu/data/sim/sim-new/tmp/test', str(random.randint(0,2**32))) try: iceprod.core.gridftp.GridFTP.mkdir(self.server_test_dir, parents=True, request_timeout=self._timeout) except: pass if not os.path.exists(self.test_dir): os.mkdir(self.test_dir) def cleanup(): try: iceprod.core.gridftp.GridFTP.rmtree(self.server_test_dir, request_timeout=self._timeout) except: pass shutil.rmtree(self.test_dir) self.addCleanup(cleanup) @unittest_reporter(skip=skip_tests) def test_01_supported_address(self): """Test supported_address""" bad_addresses = ['test','file:/test','gsiftp:test','gsiftp:/test', 'ftp:test','http://x2100.icecube.wisc.edu', 'ftp:/test'] good_addresses = ['gsiftp://data.icecube.wisc.edu','ftp://gnu.org', 'gsiftp://gridftp-rr.icecube.wisc.edu/data/sim/sim-new'] for i in range(0,10): for a in bad_addresses: ret = iceprod.core.gridftp.GridFTP.supported_address(a) if ret is True: raise Exception('Bad address %s was called good'%a) for a in good_addresses: ret = iceprod.core.gridftp.GridFTP.supported_address(a) if ret is not True: raise Exception('Good address %s was called bad'%a) @unittest_reporter(skip=skip_tests) def test_02_address_split(self): """Test address_split""" good_addresses = {'gsiftp://data.icecube.wisc.edu':('gsiftp://data.icecube.wisc.edu','/'), 'ftp://gnu.org':('ftp://gnu.org','/'), 'gsiftp://gridftp-rr.icecube.wisc.edu/data/sim/sim-new':('gsiftp://gridftp-rr.icecube.wisc.edu','/data/sim/sim-new')} for i in range(0,10): for a in good_addresses: pieces = iceprod.core.gridftp.GridFTP.address_split(a) if pieces != good_addresses[a]: raise Exception('Address %s was not split properly'%a) @unittest_reporter(skip=skip_tests,name='put() with str') def test_100_put_str(self): """Test put with a str - synchronous""" address = os.path.join(self.server_test_dir,'test') filecontents = 'this is a test' iceprod.core.gridftp.GridFTP.put(address,data=filecontents, request_timeout=self._timeout) @unittest_reporter(skip=skip_tests,name='put() with file') def test_101_put_file(self): """Test put with a file - synchronous""" address = os.path.join(self.server_test_dir,'test') # make temp file filename = os.path.join(self.test_dir,'test') filecontents = 'this is a test' with open(filename,'w') as f: f.write(filecontents) iceprod.core.gridftp.GridFTP.put(address,filename=filename, request_timeout=self._timeout) @unittest_reporter(skip=skip_tests,name='get() with str') def test_110_get_str(self): """Test get with a str - synchronous""" address = os.path.join(self.server_test_dir,'test') filecontents = 'this is a test' # put str iceprod.core.gridftp.GridFTP.put(address,data=filecontents, request_timeout=self._timeout) # get str ret = iceprod.core.gridftp.GridFTP.get(address, request_timeout=self._timeout) self.assertEqual(ret, filecontents) @unittest_reporter(skip=skip_tests,name='get() with file') def test_111_get_file(self): """Test get with a file - synchronous""" address = os.path.join(self.server_test_dir,'test') # make temp file filename = os.path.join(self.test_dir,'test') filename2 = os.path.join(self.test_dir,'test2') filecontents = 'this is a test' with open(filename,'w') as f: f.write(filecontents) # put file iceprod.core.gridftp.GridFTP.put(address,filename=filename, request_timeout=self._timeout) # get file ret = iceprod.core.gridftp.GridFTP.get(address,filename=filename2, request_timeout=self._timeout) if not os.path.exists(filename2): raise Exception('dest file does not exist') newcontents = open(filename2).read() self.assertEqual(filecontents, newcontents) @unittest_reporter(skip=skip_tests,name='list(dir)') def test_120_list(self): """Test list of directory - synchronous""" address = os.path.join(self.server_test_dir,'test') iceprod.core.gridftp.GridFTP.mkdir(address, request_timeout=self._timeout) # get listing ret = iceprod.core.gridftp.GridFTP.list(address) self.assertEqual(ret, []) @unittest_reporter(skip=skip_tests,name='list(file)') def test_121_list(self): """Test list of file - synchronous""" address = os.path.join(self.server_test_dir,'test_file') data = 'this is a test' iceprod.core.gridftp.GridFTP.put(address,data=data, request_timeout=self._timeout) # get listing ret = iceprod.core.gridftp.GridFTP.list(address, request_timeout=self._timeout) self.assertEqual(ret, ['test_file']) @unittest_reporter(skip=skip_tests,name='list(dir,dotfiles)') def test_122_list(self): """Test list of dir with dotfiles - synchronous""" address = os.path.join(self.server_test_dir,'test') iceprod.core.gridftp.GridFTP.mkdir(address, request_timeout=self._timeout) # get listing ret = iceprod.core.gridftp.GridFTP.list(address,dotfiles=True, request_timeout=self._timeout) self.assertEqual(ret, ['.','..']) @unittest_reporter(skip=skip_tests,name='list(file,dotfiles)') def test_123_list(self): """Test list of file with dotfiles - synchronous""" address = os.path.join(self.server_test_dir,'test_file') data = 'this is a test' iceprod.core.gridftp.GridFTP.put(address,data=data, request_timeout=self._timeout) # get listing ret = iceprod.core.gridftp.GridFTP.list(address,dotfiles=True, request_timeout=self._timeout) self.assertEqual(ret, ['test_file']) @unittest_reporter(skip=skip_tests,name='list(dir,details)') def test_124_list(self): """Test list of dir with details - synchronous""" address = os.path.join(self.server_test_dir,'test') iceprod.core.gridftp.GridFTP.mkdir(address, request_timeout=self._timeout) # get listing ret = iceprod.core.gridftp.GridFTP.list(address,details=True, request_timeout=self._timeout) self.assertEqual(ret, []) @unittest_reporter(skip=skip_tests,name='list(file,details)') def test_125_list(self): """Test list of file with details - synchronous""" address = os.path.join(self.server_test_dir,'test_file') data = 'this is a test' iceprod.core.gridftp.GridFTP.put(address,data=data, request_timeout=self._timeout) # get listing ret = iceprod.core.gridftp.GridFTP.list(address,details=True, request_timeout=self._timeout) if len(ret) != 1 or ret[0].directory: logger.info('actual: %r',ret) raise Exception('list did not return expected results') @unittest_reporter(skip=skip_tests,name='list(dir,details,dotfiles)') def test_126_list(self): """Test list of dir with details and dotfiles - synchronous""" address = os.path.join(self.server_test_dir,'test') ret = iceprod.core.gridftp.GridFTP.mkdir(address, request_timeout=self._timeout) # get listing ret = iceprod.core.gridftp.GridFTP.list(address,details=True, dotfiles=True, request_timeout=self._timeout) if (len(ret) != 2 or not any([x.name == '.' for x in ret]) or not any([x.name == '..' for x in ret])): logger.info("expected: ['..','.']") logger.info('actual: %r',ret) raise Exception('list did not return expected results') @unittest_reporter(skip=skip_tests,name='list(file,details,dotfiles)') def test_127_list(self): """Test list of file with details and dotfiles - synchronous""" address = os.path.join(self.server_test_dir,'test_file') data = 'this is a test' iceprod.core.gridftp.GridFTP.put(address,data=data, request_timeout=self._timeout) # get listing ret = iceprod.core.gridftp.GridFTP.list(address,details=True, dotfiles=True, request_timeout=self._timeout) if len(ret) != 1 or ret[0].name != 'test_file': logger.info("expected: ['test_file']") logger.info('actual: %r',ret) raise Exception('list did not return expected results') @unittest_reporter(skip=skip_tests) def test_130_delete(self): """Test delete - synchronous""" address = os.path.join(self.server_test_dir,'test') filecontents = 'this is a test' # put str iceprod.core.gridftp.GridFTP.put(address,data=filecontents, request_timeout=self._timeout) iceprod.core.gridftp.GridFTP.delete(address, request_timeout=self._timeout) @unittest_reporter(skip=skip_tests,name='rmtree(file)') def test_140_rmtree(self): """Test rmtree of a file - synchronous""" address = os.path.join(self.server_test_dir,'file_test') filecontents = 'this is a test' # put str iceprod.core.gridftp.GridFTP.put(address,data=filecontents, request_timeout=self._timeout) iceprod.core.gridftp.GridFTP.rmtree(address, request_timeout=self._timeout) @unittest_reporter(skip=skip_tests,name='rmtree(empty dir)') def test_141_rmtree(self): """Test rmtree of an empty dir - synchronous""" address = os.path.join(self.server_test_dir,'test') # mkdir iceprod.core.gridftp.GridFTP.mkdir(address, request_timeout=self._timeout) iceprod.core.gridftp.GridFTP.rmtree(address, request_timeout=self._timeout) @unittest_reporter(skip=skip_tests,name='rmtree(dir + file)') def test_142_rmtree(self): """Test rmtree of a directory with a file - synchronous""" address = os.path.join(self.server_test_dir,'test') # mkdir iceprod.core.gridftp.GridFTP.mkdir(address, request_timeout=self._timeout) address2 = os.path.join(self.server_test_dir,'test','file_test') filecontents = 'this is a test' # put str iceprod.core.gridftp.GridFTP.put(address2,data=filecontents, request_timeout=self._timeout) iceprod.core.gridftp.GridFTP.rmtree(address, request_timeout=self._timeout) @unittest_reporter(skip=skip_tests,name='rmtree(dir + dir + file)') def test_143_rmtree(self): """Test rmtree of dir with subdir and subfile - synchronous""" address = os.path.join(self.server_test_dir,'test') # mkdir iceprod.core.gridftp.GridFTP.mkdir(address, request_timeout=self._timeout) # mkdir address2 = os.path.join(self.server_test_dir,'test','test2') iceprod.core.gridftp.GridFTP.mkdir(address2, request_timeout=self._timeout) address3 = os.path.join(self.server_test_dir,'test','test2','file_test') filecontents = 'this is a test' # put str iceprod.core.gridftp.GridFTP.put(address3,data=filecontents, request_timeout=self._timeout) iceprod.core.gridftp.GridFTP.rmtree(address, request_timeout=self._timeout) @unittest_reporter(skip=skip_tests) def test_160_exists(self): """Test exists - synchronous""" address = os.path.join(self.server_test_dir,'test') filecontents = 'this is a test' ret = iceprod.core.gridftp.GridFTP.exists(address, request_timeout=self._timeout) if ret is True: raise Exception('exists succeeded when it should have failed') # put str iceprod.core.gridftp.GridFTP.put(address,data=filecontents, request_timeout=self._timeout) ret = iceprod.core.gridftp.GridFTP.exists(address, request_timeout=self._timeout) if ret is not True: raise Exception('exists failed: ret=%r'%ret) @unittest_reporter(skip=skip_tests) def test_170_move(self): """Test move - synchronous""" address = os.path.join(self.server_test_dir,'test') address2 = os.path.join(self.server_test_dir,'test2') filecontents = 'this is a test' # put str iceprod.core.gridftp.GridFTP.put(address,data=filecontents, request_timeout=self._timeout) ret = iceprod.core.gridftp.GridFTP.exists(address2, request_timeout=self._timeout) if ret is True: raise Exception('exists succeeded before move') iceprod.core.gridftp.GridFTP.move(address,address2, request_timeout=self._timeout) ret = iceprod.core.gridftp.GridFTP.exists(address, request_timeout=self._timeout) if ret is True: raise Exception('exists succeeded on old address') ret = iceprod.core.gridftp.GridFTP.exists(address2, request_timeout=self._timeout) if ret is not True: raise Exception('exists failed on new address') @unittest_reporter(skip=skip_tests) def test_180_checksum(self): """Test checksums - synchronous""" address = os.path.join(self.server_test_dir,'test') filecontents = b'this is a test' import hashlib # put str iceprod.core.gridftp.GridFTP.put(address,data=filecontents, request_timeout=self._timeout) ret = iceprod.core.gridftp.GridFTP.md5sum(address, request_timeout=self._timeout) correct = hashlib.md5(filecontents).hexdigest() if ret != correct: raise Exception('md5sum failed: ret=%r and correct=%r'%(ret,correct)) ret = iceprod.core.gridftp.GridFTP.sha1sum(address, request_timeout=self._timeout) correct = hashlib.sha1(filecontents).hexdigest() if ret != correct: raise Exception('sha1sum failed: ret=%r and correct=%r'%(ret,correct)) ret = iceprod.core.gridftp.GridFTP.sha256sum(address, request_timeout=self._timeout) correct = hashlib.sha256(filecontents).hexdigest() if ret != correct: raise Exception('sha256sum failed: ret=%r and correct=%r'%(ret,correct)) ret = iceprod.core.gridftp.GridFTP.sha512sum(address, request_timeout=self._timeout) correct = hashlib.sha512(filecontents).hexdigest() if ret != correct: raise Exception('sha512sum failed: ret=%r and correct=%r'%(ret,correct)) @unittest_reporter(skip=skip_tests) def test_190_size(self): """Test size - synchronous""" address = os.path.join(self.server_test_dir,'test') filecontents = 'this is a test' # put str iceprod.core.gridftp.GridFTP.put(address,data=filecontents, request_timeout=self._timeout) ret = iceprod.core.gridftp.GridFTP.size(address, request_timeout=self._timeout) correct = len(filecontents) self.assertEqual(ret, correct) def load_tests(loader, tests, pattern): suite = unittest.TestSuite() alltests = glob_tests(loader.getTestCaseNames(gridftp_test)) suite.addTests(loader.loadTestsFromNames(alltests,gridftp_test)) return suite
StarcoderdataPython
4913130
import re def translate(code: str) -> (str, str): """Translates text with bash escape sequences to normal text Input: raw shell input Ouput: (type of line, processed line without control sequences) """ code = re.sub('\x07', '', code) # Звук при ошибке. Не нужен code = re.sub(r'(\s|\S)*:\t', '', code) # Удаление tab-ов new_line = '' digit = '' state = 'standard' position = 0 line_type = 'input' if (re.search(r'\[[\s|\S]*~\]#', code) or re.search(r'\(reverse-i-search\)`[\s |\S]*\':', code)) else 'output' if line_type == 'input': code = re.sub(r'\S*\[[\s|\S]*~\]# ', '', code) # иногда в начале бывают лишние символы code = re.sub(r'\(reverse-i-search\)`[\s |\S]*\':', '', code) for letter in code: if state == 'standard': if letter == '\x1b': state = 'escape' elif letter == '\x08': position = max(0, position - 1) else: new_line = new_line[:position] + letter + new_line[position + 1:] position += 1 else: if letter == '[': # начало управляющей последовательности pass elif '0' <= letter <= '9': digit += letter elif letter == 'K': digit = '' if not digit or (number := int(digit)) == 0: new_line = new_line[:position] elif number == 1: new_line = new_line[position:] position = 0 else: new_line = '' position = 0 state = 'standard' elif letter == 'P': # удаляет символы справа number = int(digit) digit = '' new_line = new_line[:position] + new_line[position + number:] state = 'standard' elif letter == '@': number = int(digit) digit = '' new_line = new_line[:position] + chr(1234) * number + new_line[position:] state = 'standard' else: raise ValueError() if new_line and new_line[-1] == '\r': new_line = new_line[:-1] new_line = re.sub(r'(\s|\S)*\r', '', new_line) # \r - это еще и возврат каретки return line_type, new_line.strip()
StarcoderdataPython
260930
import sys import typing def copy(): ''' Copy the material settings and nodes ''' pass def new(): ''' Add a new material ''' pass def paste(): ''' Paste the material settings and nodes ''' pass
StarcoderdataPython
1738530
#!/usr/bin/python # -*- coding: UTF-8 -*- """ 使用自建的接口识别来自网络的验证码 需要配置参数: remote_url = "https://www.xxxxxxx.com/getImg" 验证码链接地址 rec_times = 1 识别的次数 """ import datetime import requests from io import BytesIO import time import json import os def recognize_captcha(index, test_path, save_path, image_suffix): image_file_name = 'captcha.{}'.format(image_suffix) with open(test_path, "rb") as f: content = f.read() # 识别 s = time.time() url = "http://127.0.0.1:5000/b" files = {'image_file': (image_file_name, BytesIO(content), 'application')} r = requests.post(url=url, files=files) e = time.time() # 测试参数 result_dict = json.loads(r.text)["value"] # 响应 predict_text = result_dict["value"] # 识别结果 whole_time_for_work = int((e - s) * 1000) speed_time_by_rec = result_dict["speed_time(ms)"] # 模型识别耗时 request_time_by_rec = whole_time_for_work - speed_time_by_rec # 请求耗时 now_time = datetime.datetime.now().strftime('%Y-%m-%d@%H:%M:%S') # 当前时间 # 记录日志 log = "{},{},{},{},{},{}\n"\ .format(index, predict_text, now_time, whole_time_for_work, speed_time_by_rec, request_time_by_rec) with open("./test.csv", "a+") as f: f.write(log) # 输出结果到控制台 print("次数:{},结果:{},时刻:{},总耗时:{}ms,识别:{}ms,请求:{}ms" .format(index, predict_text, now_time, whole_time_for_work, speed_time_by_rec, request_time_by_rec)) # 保存文件 # img_name = "{}_{}.{}".format(predict_text, str(time.time()).replace(".", ""), image_suffix) # path = os.path.join(save_path, img_name) # with open(path, "wb") as f: # f.write(content) def main(): with open("conf/sample_config.json", "r") as f: sample_conf = json.load(f) # 配置相关参数 test_file = "sample/test/alzha_4rio3zjl2.jpg" # 测试识别的图片路径 save_path = sample_conf["local_image_dir"] # 保存的地址 image_suffix = sample_conf["image_suffix"] # 文件后缀 for i in range(20000): recognize_captcha(i, test_file, save_path, image_suffix) if __name__ == '__main__': main()
StarcoderdataPython
150587
from enum import Enum from typing import Optional import requests import typer class ListType(str, Enum): blacklist = "blacklist" whitelist = "whitelist" class ListAction(str, Enum): add = "add" clear = "clear" class BlacklistSource(str, Enum): firebog_ticked = "firebog_ticked" firebog_noncrossed = "firebog_noncrossed" firebog_all = "firebog_all" @property def url(self): # Yes, this is horrible. Typer uses the value, not the key, so this makes the user experience better" mappings = [ { "key": BlacklistSource.firebog_ticked, "value": "https://v.firebog.net/hosts/lists.php?type=tick", }, { "key": BlacklistSource.firebog_noncrossed, "value": "https://v.firebog.net/hosts/lists.php?type=nocross", }, { "key": BlacklistSource.firebog_all, "value": "https://v.firebog.net/hosts/lists.php?type=all", }, ] return next(item["value"] for item in mappings if item["key"] == self) class WhitelistSource(str, Enum): anudeepND_safe = "anudeepND_safe" anudeepND_optional = "anudeepND_optional" anudeepND_referral = "anudeepND_referral" anudeepND_safe_plus_optional = "anudeepND_safe_plus_optional" @property def url(self): # Yes, this is horrible. Typer uses the value, not the key, so this makes the user experience better" mappings = [ { "key": WhitelistSource.anudeepND_safe, "value": [ "https://raw.githubusercontent.com/anudeepND/whitelist/master/domains/whitelist.txt" ], }, { "key": WhitelistSource.anudeepND_optional, "value": [ "https://raw.githubusercontent.com/anudeepND/whitelist/master/domains/optional-list.txt" ], }, { "key": WhitelistSource.anudeepND_referral, "value": [ "https://raw.githubusercontent.com/anudeepND/whitelist/master/domains/referral-sites.txt" ], }, { "key": WhitelistSource.anudeepND_safe_plus_optional, "value": [ "https://raw.githubusercontent.com/anudeepND/whitelist/master/domains/optional-list.txt", "https://raw.githubusercontent.com/anudeepND/whitelist/master/domains/whitelist.txt", ], }, ] return next(item["value"] for item in mappings if item["key"] == self) class AdguardListManager: def main( self, list_type: ListType = typer.Option(..., prompt=True), list_action: ListAction = typer.Option(..., prompt=True), blacklist_source: BlacklistSource = typer.Option( None, help="Use one of the predefined blacklist sources" ), whitelist_source: WhitelistSource = typer.Option( None, help="Use one of the predefined whitelist sources" ), custom_source_list: list[str] = typer.Option( None, help="Any custom source lists. Use this when you have one URL which just lists a bunch of URLs to add. Repeat this argument for each url. Only for blacklists", ), custom_url: list[str] = typer.Option( None, help="Any custom urls. Repeat this argument for each url. Urls will be added directly", ), host: str = typer.Option(..., prompt=True, help="Example: 192.168.1.5"), port: int = typer.Option(..., prompt=True, help="Example: 80"), username: str = typer.Option( ..., prompt=True, help="Username to log in to the AdGuard UI" ), password: str = typer.Option( ..., prompt=True, hide_input=True, help="Password to log into the AdGuard UI", ), ) -> None: self.host = f"http://{host}:{port}" self.get_logged_in_session(username, password) try: if list_type == ListType.blacklist and list_action == ListAction.add: if custom_source_list is not None and len(custom_source_list) > 0: for url in list(custom_source_list): self.add_blacklists_from_url(list_source_url=url) elif custom_url is not None and len(custom_url) > 0: self.add_to_list(list(custom_url)) elif blacklist_source is not None: self.add_blacklists_from_url(list_source_url=blacklist_source.url) else: typer.run(self.add_blacklists) elif list_type == ListType.blacklist and list_action == ListAction.clear: self.clear_list() elif list_type == ListType.whitelist and list_action == ListAction.add: if custom_url is not None and len(custom_url) > 0: self.add_to_list(list(custom_url)) if whitelist_source is not None: self.add_to_list(whitelist_source.url, whitelist=True) else: typer.run(self.add_whitelists) elif list_type == ListType.whitelist and list_action == ListAction.clear: self.clear_list(whitelist=True) else: print("Unknown or unimplemented path. Sorry!") except Exception as e: print(f"Something bad happened! See the error for more details:") print(e) finally: self.session.close() def get_logged_in_session(self, username: str, password: str) -> requests.Session: self.session = requests.Session() response = self.session.post( f"{self.host}/control/login", json={"name": username, "password": password} ) response.raise_for_status() def add_blacklists_from_url(self, list_source_url: str): urls = self.get_lists_from_url(list_source_url) self.add_to_list(urls) def add_blacklists( self, list_source: BlacklistSource = typer.Option(..., prompt=True) ): urls = self.get_lists_from_url(list_source.url) self.add_to_list(urls) def add_whitelists( self, list_source: WhitelistSource = typer.Option(..., prompt=True) ): self.add_to_list(list_source.url, whitelist=True) def get_lists_from_url(self, url: str): response = requests.get(url) response.raise_for_status() urls = list(set(response.text.split("\n"))) return [x for x in urls if x] def add_to_list(self, urls: list, whitelist: bool = False) -> None: for url in urls: response = self.session.post( f"{self.host}/control/filtering/add_url", json={"name": url, "url": url, "whitelist": whitelist}, ) response.raise_for_status() def clear_list(self, whitelist: bool = False) -> None: response = self.session.get(f"{self.host}/control/filtering/status") response.raise_for_status() filters = response.json().get("filters", []) if filters is not None: for filter in filters: response = self.session.post( f"{self.host}/control/filtering/remove_url", json={"url": filter["url"], "whitelist": whitelist}, ) response.raise_for_status() if __name__ == "__main__": instance = AdguardListManager() typer.run(instance.main)
StarcoderdataPython
8179676
from types import SimpleNamespace from typing import Any from ikea_api.abc import ( AsyncExecutor, BaseAPI, Endpoint, EndpointInfo, RequestInfo, ResponseInfo, SessionInfo, SyncExecutor, endpoint, ) from tests.conftest import EndpointTester, ExecutorContext, MockResponseInfo def test_endpoint_decorator(): def handler(_: ResponseInfo) -> None: # pragma: no cover ... def func() -> Endpoint[None]: # pragma: no cover ... decorated_endpoint = endpoint(handlers=[handler])(func) info = decorated_endpoint() assert isinstance(info, EndpointInfo) assert info.func.func == func assert info.handlers == [handler] def test_sync_executor(executor_context: ExecutorContext): class MyExecutor(SyncExecutor): @staticmethod def request(request: RequestInfo): assert request == executor_context.request return executor_context.response res = MyExecutor.run(executor_context.func()) assert res == executor_context.endpoint_response executor_context.handler.assert_called_with(executor_context.response) async def test_async_executor(executor_context: ExecutorContext): async def something(): pass class MyExecutor(AsyncExecutor): @staticmethod async def request(request: RequestInfo): assert request == executor_context.request await something() return executor_context.response res = await MyExecutor.run(executor_context.func()) assert res == executor_context.endpoint_response executor_context.handler.assert_called_with(executor_context.response) def test_error_handlers(): def handle_no_anotherthing(response: ResponseInfo) -> None: try: response.json["anotherthing"] except KeyError: response.json["anotherthing"] = "You're welcome!" class Something: pass class API(BaseAPI): def _get_session_info(self) -> SessionInfo: return SessionInfo("", {}) @endpoint(handlers=[handle_no_anotherthing]) def get_something(self, something: str) -> Endpoint[Something]: response = yield self._RequestInfo("POST", "", json={"name": something}) return response.json["anotherthing"] t = EndpointTester(API().get_something("somecoolthing")) resp = t.parse(MockResponseInfo(json_={"onlysomethng": "tada"}), handle_errors=True) assert resp == "You're welcome!" def test_base_api_request_info(): session = SessionInfo("", {}) mock_instance: Any = SimpleNamespace(_session_info=session) res = BaseAPI._RequestInfo( mock_instance, "POST", url=None, params=None, headers=None, json=None, data=None ) assert res == RequestInfo( session_info=session, method="POST", url="", params={}, headers={}, data=None, json=None, )
StarcoderdataPython
4868209
import unittest import xmlrunner from main import * from main import database_loader class all_test_suite(unittest.TestCase): #def test_android_permission(self): #from main import platform #self.assertRaises(AttributeError, platform.request_permissions("android.permission.INTERNET")) def test_udp_ip(self): self.assertEqual(udp_ip, '255.255.255.255') self.assertEqual(udp_port, 9) def test_db_files(self): self.assertEqual(database_file, 'db.tx') self.assertEqual(credentials_file, 'db_creds.tx') def test_prepare_mac(self): self.assertEqual(prepare_mac("00:00:00:00:00:00"), bytearray(b'\x00\x00\x00\x00\x00\x00')) def test_translate_ip_to_mac(self): self.assertIsNone(translate_ip_to_mac("something")) def test_database_loader(self): self.assertIsNone(database_loader()) def test_database_loader_exception_no_SSH(self): self.assertRaises(Exception, database_loader()) def test_dump_database_to_file(self): self.assertIsNone(dump_database_to_file()) ##################################################################################### def test_ssh_handler_shutdown_proxmox_via_ssh(self): self.assertIsNone(ssh_handler.shutdown_proxmox_via_ssh("0.0.0.0")) self.assertIsNone(ssh_handler.shutdown_proxmox_via_ssh("999.999.999.999")) self.assertRaises(Exception, ssh_handler.shutdown_proxmox_via_ssh("999.999.999.999")) def test_ssh_handler_close_ssh_connection(self): self.assertIsNone(ssh_handler.close_ssh_connection()) #def test_ssh_handler_test_function(self): #self.assertRaises(IndexError, ssh_handler.test_function("192.168.0.3")) ##################################################################################### def test_udp_socket_ping_selected_ip(self): self.assertFalse(udp_socket.ping_selected_ip("Pick item")) def test_udp_socket_send_magic_packet(self): self.assertNotEqual(udp_socket.send_magic_packet("Pick item"), "Magic packet sent") ##################################################################################### def test_discovery_convert_CIDR(self): self.assertIsNone(discovery.convert_CIDR({'999.999.999.999': '999'})) self.assertIsNone(discovery.convert_CIDR({'999.999.999.999.999': '999'})) def test_discovery_scan_network(self): self.assertIsNone(discovery.scan_network(start_ip="999.999.999.999", end_ip="999.999.999.999", subnetwork_port="999")) def test_discovery_verify_if_proxmox(self): #mock_request.side_effect = requests.exceptions.ConnectionError #self.assertIsNone(discovery.verify_if_proxmox("999.999.999.999")) self.assertIsNotNone(discovery.verify_if_proxmox(proxmox_ips=[])) def test_discovery_retrive_proxmox_mac(self): self.assertRaises(Exception, discovery.retrive_proxmox_mac("999.999.999.999")) def test_discovery_append_ips_mac_to_list(self): self.assertIsNotNone(discovery.append_ips_mac_to_list()) ##################################################################################### #def test_GUI_dump_data(self): #self.assertRaises(NameError, GUI.dump_data(self="foo", event="bar")) def test_GUI(self): self.assertIsNotNone(GUI()) def run_test_suite_generate_xml_report(test_class_name): test_suite = unittest.TestSuite() test = unittest.makeSuite(test_class_name) test_suite.addTest(test) test_runner = xmlrunner.XMLTestRunner(output='test-reports') test_runner.run(test_suite) run_test_suite_generate_xml_report(all_test_suite)
StarcoderdataPython
3484022
<reponame>UPstartDeveloper/Problem_Solving_Practice """ Power Set: Write a method to return all subsets of a set. Clarifying questions: - so ok, let's start off with an example to make this clearer - is the input modifiable? no s = {5, 7, 6, -8, 9, 10}, right? Assumptions about the set: - unordered collection - all unique elements - elements can only be integers (positive or negative?) - return the output in an array of sets so from here, what would my output be? Subsets of 1: {5}, {7}, {6}, ... Subsets of 2: {5, 6}, {7,6} - so it's basically every kind combination, given a size from 1 -> n? Intuition: - for each size in range 1 --> n: - backtrack through all the possible combinations - return all the subsets in one array at the end Edge case: - what if there's an empty set as an input? - then return an empty set - can there be an nested elements? - assume there's none for now Approach: backtracking # A: init the list for the return value # B: iterate over all possible sizes Size: 3 set = {5, 7, 6, -8, 9, 10}, subsets = { {5}, {7}, {6}, {-8} , {9}, {10}, - n iterations { 5 , 7 }, { 5 , 6 }, { 5 , -8} - n^2 iterations {5 7 6} } Subset = { _ , _ , _} {5, 7, 6, -8, 9, 10} _ _ _ / 5 _ _ / / 5 6 5 7 / \ \ \ 5 7 6 5 7 -8 5 7 9 5, 7 10 """ def power_set(values): """Return all the set of all subsets of a set.""" # A: init the set for the return value subsets = list() def size_subsets(size, current, values): """Finds all subsets of a given size in a set.""" # Base Case: the current subset is full if len(current) == size: # add the current subset to the output (if not already there) current_set = set(current) if current_set not in subsets: subsets.append(current_set) # Recursive Case: need to find more values for the subset else: # len(current) < size # iterate over adding the others for _ in range(len(values)): new_item = values.pop() print(f"Current and new item: {current, new_item}") # add one more value to the current subset current.append(new_item) size_subsets(size, current, {v for v in values}) print(f"Values left: {values}") # remove one of the elements, so we reach all possibilities current.pop() return None # B: iterate over all possible sizes for size in range(1, len(values) + 1): # add the subsets of the given size to the output size_subsets(size, [], {v for v in values}) # C: return all the subsets return len(subsets) """ Time and Space: O(sum(n! / ((n - k)! k!)), where k goes from 1 to n) values = {6, 7, 8} ss = { {6}, } size = 1 current = [] [], v = {7, 8} [7], v = {8} """ if __name__ == "__main__": # values = {5, 7, 6, -8, 9, 10} values = {6, 7, 8} # values = {} print(power_set(values))
StarcoderdataPython
11360397
<filename>tests/test_api_rename_lines.py import gfapy import unittest class TestAPIRenameLines(unittest.TestCase): def test_rename(self): gfa = gfapy.Gfa(["S\t0\t*", "S\t1\t*", "S\t2\t*", "L\t0\t+\t2\t-\t12M", "C\t1\t+\t0\t+\t12\t12M", "P\t4\t2+,0-\t12M"]) gfa.segment("0").name = "X" with self.assertRaises(gfapy.NotFoundError): gfa.try_get_segment("0") self.assertEqual(set(["X", "1", "2"]), set(gfa.segment_names)) self.assertEqual("L\tX\t+\t2\t-\t12M", str(gfa.dovetails[0])) self.assertEqual("C\t1\t+\tX\t+\t12\t12M", str(gfa.containments[0])) self.assertEqual("P\t4\t2+,X-\t12M", str(gfa.paths[0])) with self.assertRaises(gfapy.NotFoundError): gfa.try_get_segment("0").dovetails_of_end("R") self.assertEqual("L\tX\t+\t2\t-\t12M", str(gfa.segment("X").dovetails_of_end("R")[0])) self.assertEqual("C\t1\t+\tX\t+\t12\t12M", str(gfa.try_get_segment("1").edges_to_contained[0])) with self.assertRaises(gfapy.NotFoundError): gfa.try_get_segment("0").containers self.assertEqual("C\t1\t+\tX\t+\t12\t12M", str(gfa.try_get_segment("X").edges_to_containers[0])) self.assertEqual("P\t4\t2+,X-\t12M", str(gfa.try_get_segment("X").paths[0]))
StarcoderdataPython
1748850
<reponame>tienne-B/mit-tab<filename>mittab/libs/backup/__init__.py import shutil import time import os from wsgiref.util import FileWrapper from django.conf import settings from mittab.apps.tab.models import TabSettings from mittab.libs import errors from mittab.settings import BASE_DIR from mittab.libs.backup.strategies.local_dump import LocalDump def _generate_unique_key(base): if LocalDump(base).exists(): return "%s_%s" % (base, int(time.time())) else: return base def backup_round(dst_filename=None, round_number=None, btime=None): if round_number is None: round_number = TabSettings.get("cur_round", "no-round-number") if btime is None: btime = int(time.time()) print("Trying to backup to backups directory") if dst_filename is None: dst_filename = "site_round_%i_%i" % (round_number, btime) dst_filename = _generate_unique_key(dst_filename) return LocalDump(dst_filename).backup() def handle_backup(f): dst_key = _generate_unique_key(f.name) print(("Tried to write {}".format(dst_key))) try: return LocalDump.from_upload(dst_key, f) except Exception: errors.emit_current_exception() def list_backups(): print("Checking backups directory") return [dump.key for dump in LocalDump.all()] def restore_from_backup(src_key): print("Restoring from backups directory") return LocalDump(src_key).restore() def get_wrapped_file(src_key): return LocalDump(src_key).downloadable()
StarcoderdataPython
1678215
#!/usr/bin/env python from distutils.version import LooseVersion from setuptools import setup, find_packages def get_docker_client_requirement(): DOCKER_PY_REQUIREMENT = 'docker-py >= 1.8.1, < 2' DOCKER_RRQUIREMENT = 'docker >= 2.0.0, < 3' docker_client_installed = True try: import docker except ImportError: docker_client_installed = False if docker_client_installed and\ LooseVersion(docker.__version__) < LooseVersion('2.0.0'): return DOCKER_PY_REQUIREMENT return DOCKER_RRQUIREMENT def find_requirements(fn): lines = [] with open(fn) as f: for line in f: line = line.strip() if not line.startswith('#'): lines.append(line) return lines setup( name='docker-make', description='build,tag,and push a bunch of related docker images via a single command', version='1.1.7', author='jizhilong', author_email='<EMAIL>', url='https://github.com/CtripCloud/docker-make', license='Apache', keywords=['docker', 'image',' build'], packages=find_packages(exclude=['tests']), entry_points={ 'console_scripts': [ 'docker-make = dmake.cli:main' ] }, install_requires=find_requirements('requirements.pip') +\ [get_docker_client_requirement()], tests_require=find_requirements('test-requirements.pip'), test_suite='nose.collector', classifiers=[], )
StarcoderdataPython
118785
import abc # Abstract Base Class from eth_account import Account from collections import defaultdict import sqlalchemy from sqlalchemy import create_engine from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import Column, Integer from sqlalchemy.orm import sessionmaker from sqlalchemy.sql import func Base = declarative_base() class IERC20(abc.ABC): @abc.abstractmethod def totalSupply(self): pass @abc.abstractmethod def balanceOf(self, address): pass @abc.abstractmethod def transfer(self, address_to, value, address_from): # In Web3 interface we sign the transaction from address_from # For simplicity here just add as extra argument pass # @abc.abstractmethod # def approve(self, address_spender, value, address_from): # pass # # @abc.abstractmethod # def allowance(self, address_owner, address_spender): # pass # # @abc.abstractmethod # def transferFrom(self, address_from, address_to, value): # pass class SimpleERC20(IERC20): def __init__(self, supply): self.balance = defaultdict(int) self.allowance = defaultdict(int) self.transfers = {} self.approvals = {} self.total_supply = supply # Start with all funds in account 0 self.balance[0] = supply def totalSupply(self): return self.total_supply def balanceOf(self, address): return self.balance[address] def transfer(self, address_to, value, address_from): try: assert value >= 0, ValueError('Can only transfer positive values') assert self.balance[address_from] >= value, ValueError('Insufficient funds') self.balance[address_from] -= value self.balance[address_to] += value self.transfers[len(self.transfers) + 1] = { 'from': address_from, 'to': address_to, 'value': value } return True except AssertionError as ex: print(ex) return False @property def count_transfer(self): return len(self.transfers) # Database backed ERC20 contract class Balance(Base): __tablename__ = 'balance' account = Column(Integer, primary_key=True) value = Column(Integer) class Transfer(Base): __tablename__ = 'transfer' tx_id = Column(Integer, primary_key=True) address_from = Column(Integer) address_to = Column(Integer) value = Column(Integer) class DbERC20(IERC20): def __init__(self, supply): self.engine = create_engine('sqlite:///:memory:', echo=False) _session = sessionmaker(bind=self.engine) self.session = _session() Base.metadata.create_all(self.engine) self.session.add(Balance(account=0, value=supply)) self.session.commit() def totalSupply(self): return self.session.query(func.sum(Balance.value)).scalar() def balanceOf(self, address): account = self.session.query(Balance.value).filter_by(account=address).first() return 0 if account is None else account.value def transfer(self, address_to, value, address_from): try: _address_from = self.session.query(Balance).filter_by(account=address_from).first() _address_to = self.session.query(Balance).filter_by(account=address_to).first() assert value >= 0, ValueError('Can only transfer positive values') assert _address_from.value >= value, ValueError('Insufficient funds') _address_from.value -= value if _address_to is not None: _address_to.value += value else: self.session.add(Balance(account=address_to, value=value)) self.session.add( Transfer(tx_id=self.count_transfer, address_from=address_from, address_to=address_to, value=value) ) self.session.commit() return True except AssertionError as ex: print(ex) return False @property def transfers(self): res = {} for tx in self.session.query(Transfer).order_by(Transfer.tx_id): res[tx.tx_id] = { 'from': tx.address_from, 'to': tx.address_to, 'value': tx.value } return res @property def count_transfer(self): return self.session.query(Transfer.tx_id).count()
StarcoderdataPython
11221075
<gh_stars>1-10 # coding=utf-8 import numpy as np # para metodos regularizados from numpy.linalg import norm import scipy.sparse.linalg from sys import version_info if(version_info >= (3,0)): from importlib import reload as reload import FarFieldSignal reload(FarFieldSignal) from FarFieldSignal import * import GridArray reload(GridArray) from GridArray import * import Imager reload(Imager) from Imager import * import operations reload(operations) from operations import * import omp reload(omp) from omp import * from numpy import pi from scipy.linalg import lstsq class SignalEstimator(Imager): def __init__(self, gridArray, freq, farFieldReference = None, fieldRes = None): Imager.__init__(self, gridArray, freq, farFieldReference = farFieldReference, fieldRes = fieldRes) def estimator_mode(self, mode, **kwargs): if mode == "OMP": if kwargs['sparsity'] < 1: s = np.floor(kwargs['sparsity'] * self.nDir) else: s = int(kwargs['sparsity']) if 'eps' in kwargs: eps = kwargs['eps'] else: eps = 0 if 'x0' in kwargs: x0 = kwargs['x0'] else: x0 = None estimator = lambda x: OMP(self.V, x, s, eps = eps, x0 = x0)[0] if mode == "OMP_kron": if kwargs['sparsity'] < 1: s = np.floor(kwargs['sparsity'] * self.nDir) else: s = int(kwargs['sparsity']) if 'eps' in kwargs: eps = kwargs['eps'] else: eps = 0 if 'x0' in kwargs: x0 = kwargs['x0'] else: x0 = None estimator = lambda x: OMP_kron(self.Vx,self.Vy, x, s, eps = eps, x0 = x0)[0] if mode == "l0LS": if 'l0LS_params' in kwargs: l0LS_params = kwargs['l0LS_params'] else: l0LS_params = [0.99,0.01,100] estimator = lambda x: l0LS(self.V,x,*l0LS_params) if mode == "least-squares": estimator = lambda x: np.linalg.lstsq(self.V, x)[0] # Convex estimators if mode in ["lasso","basis-pursuit","BPDN","TV"]: import convex reload(convex) if mode == "lasso": estimator = lambda x: convex.lasso(self.V, x, kwargs['lambd']) if mode == "basis-pursuit": estimator = lambda x: convex.bp(self.V, x) if mode == "BPDN": estimator = lambda x: convex.bpdn(self.V, x,kwargs['noise']) if mode == "TV": estimator = lambda x: np.reshape(convex.tv(self.Vx, self.Vy, np.reshape(x,[self.nMicX,self.nMicY])), self.nDir) # Beamformer estimators if mode in ["DAS", "Barlett", "MPDR", "MVDR"]: self.beamformer = mode estimator = lambda x: np.dot(self.wH, x) if mode == "DAS_kron": estimator = lambda x: self.Vy.T.conj().dot(np.reshape(x,[self.nMicX,self.nMicY])).dot(self.Vx.conj()).flatten()/self.nMic return estimator def plain_estimator(self, mode, **kwargs): nBins = np.shape(self.gridArray.signalFFT)[2] nFreq = np.shape(self.gridArray.signalFFT)[3] y = np.zeros([self.nDir, nFreq, nBins], dtype = np.complex128) estimator = self.estimator_mode(mode,**kwargs) freqs = np.fft.rfftfreq(nFreq)*self.gridArray.sampleRate freq0 = self.freq for freq in freqs: self.setFreq(freq) x = np.reshape(self.gridArray.signalFFT[:,:,:,self.freqBin], [self.nMic, nBins]) for bn in range(nBins): y[:,self.freqBin,bn] = estimator(x[:,bn]) self.setFreq(freq0) return y def OMP_progressive(self, **kwargs): # utilizando xhat da frequencia anterior como x0 da proxima nBins = np.shape(self.gridArray.signalFFT)[2] nFreq = np.shape(self.gridArray.signalFFT)[3] y = np.zeros([self.nDir, nFreq, nBins], dtype = np.complex128) freq0 = self.freq if kwargs['sparsity'] < 1: s = np.floor(kwargs['sparsity'] * self.nDir) else: s = int(kwargs['sparsity']) half_s = int(np.ceil(0.9*s)) freqs = np.fft.rfftfreq(nFreq)*self.gridArray.sampleRate for freq in freqs: self.setFreq(freq) V = self.V x = np.reshape(self.gridArray.signalFFT[:,:,:,self.freqBin], [self.nMic, nBins]) xhat = None for bn in range(nBins): xhat, I = OMP(V, x, s, eps = 0, x0 = xhat) y[:,self.freqBin,bn][I] = xhat[I] try: xhat[I][np.argsort(np.abs(xhat[I]))][:half_s] = np.zeros(half_s) except: shp = xhat[I][np.argsort(np.abs(xhat[I]))][:half_s].shape xhat[I][np.argsort(np.abs(xhat[I]))][:half_s] = np.zeros(shp) self.setFreq(freq0) return y # utilizando xhat da frequencia anterior como x0 da proxima def l0LS_progressive(self,**kwargs): if 'l0LS_params' in kwargs: l0LS_params = kwargs['l0LS_params'] else: l0LS_params = [0.99,0.01,100] nBins = np.shape(self.gridArray.signalFFT)[2] nFreq = np.shape(self.gridArray.signalFFT)[3] y = np.zeros([self.nDir, nFreq, nBins], dtype = np.complex128) freqs = np.fft.rfftfreq(nFreq)*self.gridArray.sampleRate for freq in freqs: self.setFreq(freq) V = self.V x = np.reshape(self.gridArray.signalFFT[:,:,:,self.freqBin], [self.nMic, nBins]) for bn in range(nBins): y[:,self.freqBin,bn] = l0LS(V,x[:,bn],*l0LS_params, x0 = y[:,self.freqBin-1,bn]) return y def DAMAS2_LSQR(self, **kwargs): nBins = np.shape(self.gridArray.signalFFT)[2] nFreq = np.shape(self.gridArray.signalFFT)[3] y = np.zeros([self.nDir, nFreq, nBins], dtype = np.complex128) if kwargs['sparsity'] < 1: s = np.floor(kwargs['sparsity'] * self.nDir) else: s = int(kwargs['sparsity']) freqs = np.fft.rfftfreq(nFreq)*self.gridArray.sampleRate # average image to use same support avg_img = np.zeros([self.nDir]) for freq in freqs: self.setFreq(freq) self.beamform(mode = "CSM-KAT", verbose = False) self.DAMAS2(1000,mode="KAT", verbose = False) avg_img += np.reshape(self.damas2Img,[self.nDir]) support = np.reshape((avg_img != 0),[self.nDir]) l0 = sum(support) # l0 norm of the proposed solution if l0 > s: # limita para os s maiores valores na imagem new_zeros = np.argsort(avg_img)[:self.nDir-s] for index in new_zeros: support[index] = False l0 = s for freq in freqs: self.setFreq(freq) V = self.V x = np.reshape(self.gridArray.signalFFT[:,:,:,self.freqBin], [self.nMic, nBins]) for bn in range(nBins): yhat = lstsq(V[:,support],x)[0] y[:,self.freqBin,bn][support] = yhat.reshape(l0) return y def beamform_estimator(self, beamformer = "DAS", verbose = True, **kwargs): # reconstruction from frequency domain self.beamformer = beamformer if verbose: print("Beamformer mode {}".format(beamformer)) nBins = np.shape(self.gridArray.signalFFT)[2] if 'fieldRes' in kwargs: self.fieldRes = kwargs['fieldRes'] self.nDirX = self.fieldRes[0] self.nDirY = self.fieldRes[1] self.nDir = np.product(self.fieldRes) if self.beamformer in ["DAS", "Barlett", "MPDR"]: signalVector = np.reshape( self.gridArray.signalFFT[:,:,:,self.freqBin], [self.nMic,nBins]) # matriz para conter a fft da estimativa y = np.zeros([self.nDir,nBins], dtype = np.complex128) wH = self.wH ''' for b in range(nBins): y[:,b] = np.dot(wH,signalVector[:,b]) ''' y = np.apply_along_axis(lambda x: np.dot(wH,x),0,signalVector) Y = np.reshape(y,self.fieldRes) if self.beamformer == "X-KAT": # X.shape = [nMicX, nMicY, nBins] X = self.gridArray.signalFFT[:,:,:,self.freqBin] Y = np.zeros([self.nDirX, self.nDirY, nBins], dtype = np.complex128) Vxc = self.Vx.conj() VyH = self.Vy.T.conj() for i in range(nBins): Y[:,:,i] = VyH.dot(X[:,:,i]).dot(Vxc) #Y = Y/np.max(Y) self.bfImg = Y return np.copy(self.bfImg)
StarcoderdataPython
9682700
<filename>ConcurrentSpider/demo_multiprocessing.py import multiprocessing import time from multiprocessing import Process ''' Python 3.X multiprocess 模块演示 Demo 其实完全类似 threading 用法,只不过含义和实质不同而已 multiprocess 的 Process 类基本使用方式(继承重写 run 方法及直接传递方法) ''' class NormalProcess(Process): def __init__(self, name=None): Process.__init__(self, name=name) self.counter = 0 def run(self): print(self.name + ' process is start!') self.do_customer_things() print(self.name + ' process is end!') def do_customer_things(self): while self.counter < 10: time.sleep(1) print('do customer things counter is:'+str(self.counter)) self.counter += 1 def loop_runner(max_counter=5): print(multiprocessing.current_process().name + " process is start!") cur_counter = 0 while cur_counter < max_counter: time.sleep(1) print('loop runner current counter is:' + str(cur_counter)) cur_counter += 1 print(multiprocessing.current_process().name + " process is end!") if __name__ == '__main__': print(multiprocessing.current_process().name + " process is start!") print("cpu count:"+str(multiprocessing.cpu_count())+", active chiled count:"+str(len(multiprocessing.active_children()))) normal_process = NormalProcess("NORMAL PROCESS") normal_process.start() loop_process = Process(target=loop_runner, args=(10,), name='LOOP PROCESS') loop_process.start() print("cpu count:" + str(multiprocessing.cpu_count()) + ", active chiled count:" + str(len(multiprocessing.active_children()))) normal_process.join() loop_process.join() print(multiprocessing.current_process().name + " process is end!")
StarcoderdataPython
4807576
import asyncio from mavsdk import System async def _run(): drone = System() await drone.connect(system_address="udp://:14540") print("Waiting for drone to connect...") async for state in drone.core.connection_state(): if state.is_connected: print(f"Drone discovered with UUID: {state.uuid}") break print("Waiting for drone to have a global position estimate...") async for health in drone.telemetry.health(): if health.is_global_position_ok: print("Global position estimate ok") break # print("-- Arming") # await drone.action.arm() # print("-- Taking off") # await drone.action.takeoff() # await asyncio.sleep(5) print("-- Landing") await drone.action.land() def main(): asyncio.run(_run())
StarcoderdataPython
3273330
#!/usr/bin/env python3 # # This file is part of 'Aleph - A Library for Exploring Persistent # Homology'. It contains code for visualizing extended persistence # hierarchies (also called interlevel set persistence hierarchies) # as TikZ pictures. # # The file processes _all_ command-line arguments and expects them # to be hierarchies. The generated code will be written to STDOUT. # # For more information, please refer to: # # Hierarchies and Ranks for Persistence Pairs # <NAME>, <NAME>, and <NAME> # Proceedings of TopoInVis 2017, Japan import re import sys """ Class describing a persistence pair in the extended persistence hierarchy. This class is basically a container for all critical points of a function. """ class PersistencePair: def __init__(self, creator, destroyer): self.creator = creator self.destroyer = destroyer self.children = list() # Adds a new child to the current node; this function return 'self' in # order to permit chaining (not that we use it in this example). def add_child(self, child): self.children.append(child) return self @staticmethod def get_children(node): return node.children """ Reads an extended persistence hierarchy from a file and converts it to a sequence of nodes, as specified above. """ def load_hierarchy(filename, scale=False, factor=None): reNode = r'(\d+):\s+([\d\.\d]+)\s+(\S+)' reEdge = r'(\d+)\s+--\s+(\d+)' id_2_pair = dict() edges = list() with open(filename) as f: for line in f: match = re.match(reNode, line) if match: id = int(match.group(1)) creator = int(match.group(2)) destroyer = float(match.group(3)) if destroyer == float('inf'): destroyer = int(1e4) # TODO: hard-coded... id_2_pair[id] = PersistencePair(creator, destroyer) else: match = re.match(reEdge, line) if match: edges.append( (int(match.group(1)),int(match.group(2))) ) for id_u,id_v in edges: u = id_2_pair[id_u] v = id_2_pair[id_v] u.add_child(v) # Find root(s): a root is a node that only appears as the source of an # edge but not as the destination sources = set([u for u,_ in edges]) targets = set([v for _,v in edges]) roots = sources.difference(targets) assert len(roots) == 1, "Hierarchy must be connected" return id_2_pair[roots.pop()] """ Formats a persistence pair for TikZ output. """ def format_pair(pair): return "node { $(%d,%d)$ }" % (pair.creator, pair.destroyer) """ Traverses and prints the hierarchy. """ def traverse_hierarchy(node, level): prefix = ' ' * level * 3 output = '' for index,child in enumerate(node.children): output += prefix output += "child {\n" output += prefix + ' ' output += format_pair(child) output += "\n" output += traverse_hierarchy(child, level+1) output += prefix + ' ' + "}" if index+1 == len(node.children) and level == 1: output += ";\n" else: output += "\n" return output filenames = sys.argv[1:] hierarchies = list() for filename in filenames: hierarchies.append( load_hierarchy(filename) ) for hierarchy in hierarchies: print("\\begin{tikzpicture}") print(" \\%s" % format_pair(hierarchy) ) print(" %s" % traverse_hierarchy(hierarchy, 1)) print("\\end{tikzpicture}")
StarcoderdataPython
238383
#!/usr/bin/env python # # Check whether there's warning and error in IPMI sensor status # # Return CRITICAL or WARNING when there's sensor error or there's PFA alert # # <NAME> <<EMAIL>> import argparse import re import subprocess import sys STATE_OK = 0 STATE_WARNING = 1 STATE_CRITICAL = 2 def exit_error(criticality): if criticality == 'warning': sys.exit(STATE_WARNING) else: sys.exit(STATE_CRITICAL) def main(): parser = argparse.ArgumentParser() parser.add_argument('--debug', help='Enable ipmitool output debugging', action='store_true') parser.add_argument('--criticality', help='Set sensu alert level, "warning" or "critical" (default)', default='critical') args = parser.parse_args() # Check Sensor Data Record (SDR) Repository info by elist containing asserted discrete states cmd = "ipmitool sdr elist" try: elist = subprocess.check_output(cmd, shell=True) except subprocess.CalledProcessError as e: print(e.output) exit_error(args.criticality) lines = elist.splitlines() error_messages = [] for s in lines: if s == '': continue if args.debug: print(s) sensor = [x.strip() for x in s.split('|')] name = sensor[0] status = sensor[2] asserted_states = sensor[-1] # Check if there are sensor not in OK or No Status if status not in ['ok', 'ns']: error_messages.append("Sensor [%s] has unexpected status [%s] %s" % (name, status, asserted_states)) # Additionally, check unexpecetd asserted states to discover PFA alerts for RAM and disks if re.match('DIMM\s\d+\Z', name) or re.match('Drive\s\d+\Z', name): if asserted_states == '': continue for state in [x.strip() for x in asserted_states.split(',')]: if state not in ['Drive Present', 'No Reading', 'Presence Detected']: error_messages.append("Sensor [%s] has unexpected assertion [%s]" % (name, state)) if len(error_messages) > 0: for msg in error_messages: print(msg) exit_error(args.criticality) sys.exit(STATE_OK) if __name__ == "__main__": main()
StarcoderdataPython
3401419
<gh_stars>1-10 #from django.contrib.auth.forms import UserCreationForm from django.contrib.auth.models import User from django import forms from django.core.exceptions import ValidationError class CustomUserCreationForm(forms.Form): username = forms.CharField(min_length=4, max_length=150,required=False, widget=forms.TextInput( attrs ={ 'class':'form-control', 'required': 'True', 'style': 'border-color: blue;', 'placeholder': 'username required' } )) email = forms.EmailField(required=False, widget=forms.TextInput( attrs = { 'class':'form-control', 'required': 'True', 'style': 'border-color: blue;', 'placeholder': 'valid email required' } )) password1 = forms.CharField(min_length=6, max_length=20,required=False, widget=forms.PasswordInput( attrs = { 'class':'form-control', 'required': 'True', 'style': 'border-color: blue;', 'placeholder': 'password required' } )) password2 = forms.CharField(min_length=6, max_length=20,required=False, widget=forms.PasswordInput( attrs = { 'class':'form-control', 'required': 'True', 'style': 'border-color: blue;', 'placeholder': 'Confirm password' } )) def clean_username(self): username = self.cleaned_data['username'].lower() r = User.objects.filter(username=username) if r.count(): raise ValidationError("Username already exists") return username def clean_email(self): email = self.cleaned_data['email'].lower() r = User.objects.filter(email=email) if r.count(): raise ValidationError("Email already exists") return email def clean_password2(self): password1 = self.cleaned_data.get('password1') password2 = self.cleaned_data.get('password2') if password1 and password2 and password1 != password2: raise ValidationError("Password don't match") return password2 def save(self, commit=True): user = User.objects.create_user( self.cleaned_data['username'], self.cleaned_data['email'], self.cleaned_data['password1'] ) return user
StarcoderdataPython
324989
import contextlib import datetime import logging from django.contrib.postgres.fields import JSONField from django.core import validators from django.core.files.base import ContentFile from django.core.files.storage import Storage from django.db import DEFAULT_DB_ALIAS from django.db import connection from django.db import connections from django.db import models from django.urls import reverse from django.utils import timezone from django.utils.deconstruct import deconstructible from share.models.fields import EncryptedJSONField from share.models.fuzzycount import FuzzyCountManager from share.models.indexes import ConcurrentIndex from share.util import chunked, placeholders, BaseJSONAPIMeta from share.util.extensions import Extensions logger = logging.getLogger(__name__) __all__ = ('Source', 'RawDatum', 'SourceConfig', 'Harvester', 'Transformer', 'SourceUniqueIdentifier') class SourceIcon(models.Model): source_name = models.TextField(unique=True) image = models.BinaryField() @deconstructible class SourceIconStorage(Storage): def _open(self, name, mode='rb'): assert mode == 'rb' icon = SourceIcon.objects.get(source_name=name) return ContentFile(icon.image) def _save(self, name, content): SourceIcon.objects.update_or_create(source_name=name, defaults={'image': content.read()}) return name def delete(self, name): SourceIcon.objects.get(source_name=name).delete() def get_available_name(self, name, max_length=None): return name def url(self, name): return reverse('source_icon', kwargs={'source_name': name}) def icon_name(instance, filename): return instance.name class NaturalKeyManager(models.Manager): use_in_migrations = True def __init__(self, *key_fields): super(NaturalKeyManager, self).__init__() self.key_fields = key_fields def get_by_natural_key(self, key): return self.get(**dict(zip(self.key_fields, key))) class Source(models.Model): name = models.TextField(unique=True) long_title = models.TextField(unique=True) home_page = models.URLField(null=True, blank=True) icon = models.ImageField(upload_to=icon_name, storage=SourceIconStorage(), blank=True) is_deleted = models.BooleanField(default=False) # Whether or not this SourceConfig collects original content # If True changes made by this source cannot be overwritten # This should probably be on SourceConfig but placing it on Source # is much easier for the moment. # I also haven't seen a situation where a Source has two feeds that we harvest # where one provider unreliable metadata but the other does not. canonical = models.BooleanField(default=False, db_index=True) # TODO replace with object permissions, allow multiple sources per user (SHARE-996) user = models.OneToOneField('ShareUser', null=True, on_delete=models.CASCADE) objects = NaturalKeyManager('name') class JSONAPIMeta(BaseJSONAPIMeta): pass def natural_key(self): return (self.name,) def __repr__(self): return '<{}({}, {}, {})>'.format(self.__class__.__name__, self.pk, self.name, self.long_title) def __str__(self): return repr(self) class SourceConfigManager(NaturalKeyManager): def get_or_create_push_config(self, user, transformer_key): config_label = '{}.{}'.format(user.username, transformer_key) try: return SourceConfig.objects.get(label=config_label) except SourceConfig.DoesNotExist: source, _ = Source.objects.get_or_create( user=user, defaults={ 'name': user.username, 'long_title': user.username, } ) config, _ = SourceConfig.objects.get_or_create( label=config_label, defaults={ 'source': source, 'transformer': Transformer.objects.get(key=transformer_key), } ) return config class SourceConfig(models.Model): # Previously known as the provider's app_label label = models.TextField(unique=True) version = models.PositiveIntegerField(default=1) source = models.ForeignKey('Source', on_delete=models.CASCADE, related_name='source_configs') base_url = models.URLField(null=True) earliest_date = models.DateField(null=True, blank=True) rate_limit_allowance = models.PositiveIntegerField(default=5) rate_limit_period = models.PositiveIntegerField(default=1) # Allow null for push sources harvester = models.ForeignKey('Harvester', null=True, on_delete=models.CASCADE) harvester_kwargs = JSONField(null=True, blank=True) harvest_interval = models.DurationField(default=datetime.timedelta(days=1)) harvest_after = models.TimeField(default='02:00') full_harvest = models.BooleanField(default=False, help_text=( 'Whether or not this SourceConfig should be fully harvested. ' 'Requires earliest_date to be set. ' 'The schedule harvests task will create all jobs necessary if this flag is set. ' 'This should never be set to True by default. ' )) # Allow null for push sources # TODO put pushed data through a transformer, add a JSONLDTransformer or something for backward compatibility transformer = models.ForeignKey('Transformer', null=True, on_delete=models.CASCADE) transformer_kwargs = JSONField(null=True, blank=True) regulator_steps = JSONField(null=True, blank=True) disabled = models.BooleanField(default=False) private_harvester_kwargs = EncryptedJSONField(blank=True, null=True) private_transformer_kwargs = EncryptedJSONField(blank=True, null=True) objects = SourceConfigManager('label') class JSONAPIMeta(BaseJSONAPIMeta): pass def natural_key(self): return (self.label,) def get_harvester(self, **kwargs): """Return a harvester instance configured for this SourceConfig. **kwargs: passed to the harvester's initializer """ return self.harvester.get_class()(self, **kwargs) def get_transformer(self, **kwargs): """Return a transformer instance configured for this SourceConfig. **kwargs: passed to the transformer's initializer """ return self.transformer.get_class()(self, **kwargs) @contextlib.contextmanager def acquire_lock(self, required=True, using='default'): from share.harvest.exceptions import HarvesterConcurrencyError # NOTE: Must be in transaction logger.debug('Attempting to lock %r', self) with connections[using].cursor() as cursor: cursor.execute("SELECT pg_try_advisory_lock(%s::regclass::integer, %s);", (self._meta.db_table, self.id)) locked = cursor.fetchone()[0] if not locked and required: logger.warning('Lock failed; another task is already harvesting %r.', self) raise HarvesterConcurrencyError('Unable to lock {!r}'.format(self)) elif locked: logger.debug('Lock acquired on %r', self) else: logger.warning('Lock not acquired on %r', self) try: yield finally: if locked: cursor.execute("SELECT pg_advisory_unlock(%s::regclass::integer, %s);", (self._meta.db_table, self.id)) logger.debug('Lock released on %r', self) def __repr__(self): return '<{}({}, {})>'.format(self.__class__.__name__, self.pk, self.label) __str__ = __repr__ class Harvester(models.Model): key = models.TextField(unique=True) date_created = models.DateTimeField(auto_now_add=True) date_modified = models.DateTimeField(auto_now=True) objects = NaturalKeyManager('key') @property def version(self): return self.get_class().VERSION def natural_key(self): return (self.key,) def get_class(self): return Extensions.get('share.harvesters', self.key) def __repr__(self): return '<{}({}, {})>'.format(self.__class__.__name__, self.pk, self.key) def __str__(self): return repr(self) class Transformer(models.Model): key = models.TextField(unique=True) date_created = models.DateTimeField(auto_now_add=True) date_modified = models.DateTimeField(auto_now=True) objects = NaturalKeyManager('key') @property def version(self): return self.get_class().VERSION def natural_key(self): return (self.key,) def get_class(self): return Extensions.get('share.transformers', self.key) def __repr__(self): return '<{}({}, {})>'.format(self.__class__.__name__, self.pk, self.key) def __str__(self): return repr(self) class SourceUniqueIdentifier(models.Model): identifier = models.TextField() source_config = models.ForeignKey('SourceConfig', on_delete=models.CASCADE) class Meta: unique_together = ('identifier', 'source_config') def __repr__(self): return '<{}({}, {}, {!r})>'.format('Suid', self.id, self.source_config.label, self.identifier) __str__ = __repr__ class RawDatumManager(FuzzyCountManager): def link_to_job(self, job, datum_ids): if not datum_ids: return True logger.debug('Linking RawData to %r', job) with connection.cursor() as cursor: for chunk in chunked(datum_ids, size=500): if not chunk: break cursor.execute(''' INSERT INTO "{table}" ("{rawdatum}", "{harvestjob}") VALUES {values} ON CONFLICT ("{rawdatum}", "{harvestjob}") DO NOTHING; '''.format( values=', '.join('%s' for _ in range(len(chunk))), # Nasty hack. Fix when psycopg2 2.7 is released with execute_values table=RawDatum.jobs.through._meta.db_table, rawdatum=RawDatum.jobs.through._meta.get_field('rawdatum').column, harvestjob=RawDatum.jobs.through._meta.get_field('harvestjob').column, ), [(raw_id, job.id) for raw_id in chunk]) return True def store_chunk(self, source_config, data, limit=None, db=DEFAULT_DB_ALIAS): """Store a large amount of data for a single source_config. Data MUST be a utf-8 encoded string (Just a str type). Take special care to make sure you aren't destroying data by mis-encoding it. Args: source_config (SourceConfig): data Generator[(str, str)]: (identifier, datum) Returns: Generator[RawDatum] """ hashes = {} identifiers = {} now = timezone.now() if limit == 0: return [] for chunk in chunked(data, 500): if not chunk: break new = [] new_identifiers = set() for fr in chunk: if limit and len(hashes) >= limit: break if fr.sha256 in hashes: if hashes[fr.sha256] != fr.identifier: raise ValueError( '{!r} has already been seen or stored with identifier "{}". ' 'Perhaps your identifier extraction is incorrect?'.format(fr, hashes[fr.sha256]) ) logger.warning('Recieved duplicate datum %s from %s', fr, source_config) continue new.append(fr) hashes[fr.sha256] = fr.identifier new_identifiers.add(fr.identifier) if new_identifiers: suids = SourceUniqueIdentifier.objects.raw(''' INSERT INTO "{table}" ("{identifier}", "{source_config}") VALUES {values} ON CONFLICT ("{identifier}", "{source_config}") DO UPDATE SET id = "{table}".id RETURNING {fields} '''.format( table=SourceUniqueIdentifier._meta.db_table, identifier=SourceUniqueIdentifier._meta.get_field('identifier').column, source_config=SourceUniqueIdentifier._meta.get_field('source_config').column, values=placeholders(len(new_identifiers)), # Nasty hack. Fix when psycopg2 2.7 is released with execute_values fields=', '.join('"{}"'.format(field.column) for field in SourceUniqueIdentifier._meta.concrete_fields), ), [(identifier, source_config.id) for identifier in new_identifiers]) for suid in suids: identifiers[suid.identifier] = suid.pk if new: # Defer 'datum' by omitting it from the returned fields yield from RawDatum.objects.raw( ''' INSERT INTO "{table}" ("{suid}", "{hash}", "{datum}", "{datestamp}", "{date_modified}", "{date_created}") VALUES {values} ON CONFLICT ("{suid}", "{hash}") DO UPDATE SET "{datestamp}" = EXCLUDED."{datestamp}", "{date_modified}" = EXCLUDED."{date_modified}" RETURNING id, "{suid}", "{hash}", "{datestamp}", "{date_modified}", "{date_created}" '''.format( table=RawDatum._meta.db_table, suid=RawDatum._meta.get_field('suid').column, hash=RawDatum._meta.get_field('sha256').column, datum=RawDatum._meta.get_field('datum').column, datestamp=RawDatum._meta.get_field('datestamp').column, date_modified=RawDatum._meta.get_field('date_modified').column, date_created=RawDatum._meta.get_field('date_created').column, values=', '.join('%s' for _ in range(len(new))), # Nasty hack. Fix when psycopg2 2.7 is released with execute_values ), [ (identifiers[fr.identifier], fr.sha256, fr.datum, fr.datestamp or now, now, now) for fr in new ] ) if limit and len(hashes) >= limit: break def store_data(self, config, fetch_result): """ """ (rd, ) = self.store_chunk(config, [fetch_result]) if rd.created: logger.debug('New %r', rd) else: logger.debug('Found existing %r', rd) return rd # Explicit through table to match legacy names class RawDatumJob(models.Model): datum = models.ForeignKey('RawDatum', db_column='rawdatum_id') job = models.ForeignKey('HarvestJob', db_column='harvestlog_id') class Meta: db_table = 'share_rawdatum_logs' class RawDatum(models.Model): datum = models.TextField() suid = models.ForeignKey(SourceUniqueIdentifier, on_delete=models.CASCADE, related_name='raw_data') # The sha256 of the datum sha256 = models.TextField(validators=[validators.MaxLengthValidator(64)]) datestamp = models.DateTimeField(null=True, help_text=( 'The most relevant datetime that can be extracted from this RawDatum. ' 'This may be, but is not limited to, a deletion, modification, publication, or creation datestamp. ' 'Ideally, this datetime should be appropriate for determining the chronological order its data will be applied.' )) date_modified = models.DateTimeField(auto_now=True, editable=False) date_created = models.DateTimeField(auto_now_add=True, editable=False) no_output = models.NullBooleanField(null=True, help_text=( 'Indicates that this RawDatum resulted in an empty graph when transformed. ' 'This allows the RawDataJanitor to find records that have not been processed. ' 'Records that result in an empty graph will not have a NormalizedData associated with them, ' 'which would otherwise look like data that has not yet been processed.' )) jobs = models.ManyToManyField('HarvestJob', related_name='raw_data', through=RawDatumJob) objects = RawDatumManager() @property def created(self): return self.date_modified == self.date_created class Meta: unique_together = ('suid', 'sha256') verbose_name_plural = 'Raw Data' indexes = ( ConcurrentIndex(fields=['no_output']), ) class JSONAPIMeta(BaseJSONAPIMeta): resource_name = 'RawData' def __repr__(self): return '<{}({}, {}, {}...)>'.format(self.__class__.__name__, self.id, self.datestamp, self.sha256[:10]) __str__ = __repr__
StarcoderdataPython
3378526
<gh_stars>0 __all__ = ['ListingColumn', 'listing_column'] from dataclasses import dataclass, field from html import escape as html_escape from elementary_flask.typing import Callable @dataclass() class ListingColumn: name: str title: str = None shrink_cell: bool = False td_class: str = None th_class: str = None safe_html: bool = False generator: Callable = None formatter: Callable = None order: int = 5 _td_class: str = field(init=False, default=None) _th_class: str = field(init=False, default=None) def td(self, item): val = self.generator(item) if self.generator is not None else getattr(item, self.name, "None") val = str(val) if not self.safe_html: val = html_escape(val) if self.formatter: val = self.formatter(val) if self._td_class is None: _td_class = ((self.td_class or "") + " shrink-cell" if self.shrink_cell else "").strip() self._td_class = f'class="{_td_class} align-middle"' if _td_class else 'class="align-middle"' return f"""<td {self._td_class}>{val}</td>""" def th(self): if self.title is None: self.title = self.name if self._th_class is None: _th_class = ((self.th_class or "") + " shrink-cell" if self.shrink_cell else "").strip() self._th_class = f'class="{_th_class}"' if _th_class else "" return f"""<th {self._th_class} scope="col">{self.title}</th>""" def listing_column(name, /, *, title: str = None, shrink_cell: bool = False, td_class: str = None, th_class: str = None, safe_html: bool = False, formatter: Callable = None, order: int = 5): def decorator(f): return ListingColumn(name, title=title, shrink_cell=shrink_cell, td_class=td_class, th_class=th_class, safe_html=safe_html, formatter=formatter, order=order, generator=f) return decorator
StarcoderdataPython
8012885
#!/usr/bin/python import threading import math import config from time import sleep from datetime import datetime IMUtoBTLock = threading.Lock() class IMUtoBT (threading.Thread): def __init__(self, imu, bt): threading.Thread.__init__(self) self.imu = imu self.bt = bt def run(self): while not config.exitStatus: data = self.imu.getData() # Send Kalman X,Y with a checksum self.bt.write("a:%.2f,%.2f,%.2f,%.2f,%.2f,%.2f,%.2f,%.2f,%.2f\n" % (data['originalX'], data['originalY'], data['originalZ'], data['kalmanX'], data['kalmanY'], data['kalmanZ'], data['complementaryX'], data['complementaryY'], data['complementaryZ'])) sleep(config.IMUtoBT_delay) print "IMUtoBT Thread: Shutting down."
StarcoderdataPython
3334167
<filename>great_expectations/render/renderer/content_block/expectation_string.py from great_expectations.render.renderer.content_block.content_block import ( ContentBlockRenderer, ) from great_expectations.render.types import RenderedStringTemplateContent class ExpectationStringRenderer(ContentBlockRenderer): @classmethod def _missing_content_block_fn( cls, configuration=None, result=None, language=None, runtime_configuration=None, **kwargs, ): return [ RenderedStringTemplateContent( **{ "content_block_type": "string_template", "styling": {"parent": {"classes": ["alert", "alert-warning"]}}, "string_template": { "template": "$expectation_type(**$kwargs)", "params": { "expectation_type": configuration.expectation_type, "kwargs": configuration.kwargs, }, "styling": { "params": { "expectation_type": { "classes": ["badge", "badge-warning"], } } }, }, } ) ]
StarcoderdataPython
6495003
<reponame>Ali-Parandeh/Data_Science_Playground<gh_stars>0 ''' Below is the structure of where you'll be working. working_dir ├── text_analyzer │ ├── __init__.py │ ├── counter_utils.py │ ├── document.py └── my_script.py ''' # Import custom text_analyzer package import text_analyzer # Create an instance of Document with datacamp_tweet my_document = text_analyzer.Document(text=datacamp_tweet) # Print the text attribute of the Document instance print(my_document.text) ''' <script.py> output: Basic linear regression example. #DataCamp #DataScience #Python #sklearn '''
StarcoderdataPython
3254669
import os import sys cd = os.path.abspath('.') if cd not in sys.path: # Add the current directory to sys.path so that `python -m` # is not required to run the helper script sys.path.insert(0, cd) SETTINGS = os.environ.get( "CYPRESS_SETTINGS", default=f"{os.path.split(cd)[-1]}.settings.cypress", ) # Set environment variable so the correct settings are used when loading test data # Note: Must set this before importing the settings, otherwise # the wrong settings will be imported os.environ["DJANGO_SETTINGS_MODULE"] = SETTINGS from django.conf import settings DEFAULT_SETUP_TEST_DATA_MODULE = "cypress.db.setup_test_data" DEFAULT_CACHE_KEY = "cypress_last_func" DEFAULT_CACHE_TIMEOUT = 60 * 60 * 24 SETUP_TEST_DATA_MODULE = getattr(settings, "CYPRESS_SETUP_TEST_DATA_MODULE", DEFAULT_SETUP_TEST_DATA_MODULE) CACHE_KEY = getattr(settings, "CYPRESS_CACHE_KEY", DEFAULT_CACHE_KEY) CACHE_TIMEOUT = getattr(settings, "CYPRESS_CACHE_TIMEOUT", DEFAULT_CACHE_TIMEOUT)
StarcoderdataPython
1944187
<filename>app/migrations/0005_auto_20190407_1857.py # Generated by Django 2.1.7 on 2019-04-07 13:27 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('app', '0004_sh0t_severity'), ] operations = [ migrations.AlterModelOptions( name='sh0t', options={'ordering': ('severity', 'title')}, ), ]
StarcoderdataPython
5192549
<reponame>ZREDU-007/mooc # -*- coding: utf-8 -*- from __future__ import unicode_literals __author__ = 'Kris,QQ:1209304692。QQ群:知尔MOOC,760196377' from django.db import models # Create your models here. class Tag(models.Model): name = models.CharField(max_length=16, verbose_name='标签') class Meta: verbose_name = u"标签" verbose_name_plural = verbose_name def __unicode__(self): return self.name
StarcoderdataPython
9777092
<filename>train/train.py<gh_stars>1-10 from __future__ import absolute_import, division, print_function import tensorflow as tf import numpy from tensorflow import keras import numpy as np from minio import Minio from minio.error import ResponseError import os import sys import tempfile import tarfile import pickle from time import time from tensorflow.python.keras.callbacks import TensorBoard print(tf.__version__) tensorboard = TensorBoard(log_dir="/logdir") model = keras.Sequential([ keras.layers.Conv2D(input_shape=(28,28,1), filters=8, kernel_size=3, strides=2, activation='relu', name='Conv1'), keras.layers.Flatten(), keras.layers.Dense(10, activation=tf.nn.softmax, name='Softmax') ]) model.summary() print('Created an untrained keras model') testing = False epochs = 5 model.compile(optimizer=tf.train.AdamOptimizer(), loss='sparse_categorical_crossentropy', metrics=['accuracy']) print('Compiled the model') minioClient = Minio('172.17.0.44:9000', access_key='AKIAIOSFODNN7EXAMPLE', secret_key='<KEY>', secure=False) print('Instantiated Minio client') if len(sys.argv) > 2: bucketNameForTrainImages = sys.argv[1] bucketNameForTrainLabels = sys.argv[2] else: bucketNameForTrainImages = 'normalizedtrainimages' bucketNameForTrainLabels = 'trainlabels' print(bucketNameForTrainImages) print(bucketNameForTrainLabels) try: data = minioClient.get_object('fashionmnist', bucketNameForTrainImages) with open('trainimages', 'wb') as file_data: for d in data.stream(32*1024): file_data.write(d) except ResponseError as err: print(err) print('Training images retrieved from S3 to local file system') train_images = pickle.load( open( "trainimages", "rb" ) ) print('Training images retrieved from local file system to Numpy array') #--- print(bucketNameForTrainLabels) try: data = minioClient.get_object('fashionmnist', bucketNameForTrainLabels) with open('trainlabels', 'wb') as file_data: for d in data.stream(32*1024): file_data.write(d) except ResponseError as err: print(err) print('Training labels retrieved from S3 to local file system') train_labels = pickle.load( open( "trainlabels", "rb" ) ) print('Training labels retrieved from local file system to Numpy array') #--- print('\ntrain_images.shape: {}, of {}'.format(train_images.shape, train_images.dtype)) model.fit(train_images, train_labels, epochs=epochs, callbacks=[tensorboard]) print('Training finished') model.save('my_model.h5') print("Saved model to local disk") #--- try: with open('my_model.h5', 'rb') as file_data: file_stat = os.stat('my_model.h5') print(minioClient.put_object('fashionmnist', 'trainedmodel', file_data, file_stat.st_size)) except ResponseError as err: print(err) # This won't work with KF's viewer yet md_file = open("/mlpipeline-ui-metadata.json", "w") md_file.write('{"version": 1,"outputs": [{"type": "tensorboard","source": "/logdir"}]}') md_file.close() print('Wrote tensorboard metadata') text_file = open("/trainedModelName.txt", "w") text_file.write('trainedmodel') text_file.close() print('Stored trained model in S3')
StarcoderdataPython
1650613
import random import sys from MMU import MMU from BlocoMemoria import BlocoMemoria from Endereco import Endereco from Instrucao import Instrucao from Conjunto import Conjunto from output import Output class TP2: tamanhoRAM: int = 1000 #quantidade de blocos da ram tamanhoCache1: int = 32 #quantidade de blocos da cache1 tamanhoCache2: int = 64 #quantidade de blocos da cache2 tamanhoCache3: int = 128 #quantidade de blocos da cache3 #A partir de agora as caches vão conter uma lista de conjuntos, que por sua vez contem uma lista de blocos tamanhoConjuntosCache1: int = 4 tamanhoConjuntosCache2: int = 4 tamanhoConjuntosCache3: int = 4 tamanhoPrograma: int = 10001 qdePalavrasBloco: int = 4 memoriaInstrucoes: list = [] RAM: list = [] cache1: list = [] cache2: list = [] cache3: list = [] #Registradores: opcode: int = 0 PC: int = 0 custo: int = 0 #Caches 1 e 2, HIT e MISS missC1: int = 0 hitC1: int = 0 missC2: int = 0 hitC2: int = 0 missC3: int = 0 hitC3: int = 0 hitRAM: int = 0 output_type: int = 1 def sincroniza(self, cache1, cache2, cache3, RAM): i = 0 bloco = BlocoMemoria() for conj in cache1: for bloco in conj.blocos: if(bloco.atualizado): RAM[i] = bloco.palavra i = i + 1 for conj in cache2: for bloco in conj.blocos: if(bloco.atualizado): RAM[i] = bloco.palavra i = i + 1 for conj in cache3: for bloco in conj.blocos: if(bloco.atualizado): RAM[i] = bloco.palavra i = i + 1 def maquina(self, PC: int): opcode = 0 while(opcode!=-1): umaInstrucao = self.memoriaInstrucoes[PC] opcode = umaInstrucao.opcode if(opcode!=-1): dadoMemoriaAdd1 = MMU.buscarNasMemorias(umaInstrucao.add1, self.RAM, self.cache1, self.cache2, self.cache3) dadoMemoriaAdd2 = MMU.buscarNasMemorias(umaInstrucao.add2, self.RAM, self.cache1, self.cache2, self.cache3) dadoMemoriaAdd3 = MMU.buscarNasMemorias(umaInstrucao.add3, self.RAM, self.cache1, self.cache2, self.cache3) self.custo += dadoMemoriaAdd1.custo self.custo += dadoMemoriaAdd2.custo self.custo += dadoMemoriaAdd3.custo print(dadoMemoriaAdd1.palavra) print(dadoMemoriaAdd2.palavra) print(dadoMemoriaAdd3.palavra) if(dadoMemoriaAdd1.cacheHit == 1): self.hitC1 += 1 elif(dadoMemoriaAdd1.cacheHit == 2): self.missC1 += 1 self.hitC2 += 1 elif(dadoMemoriaAdd1.cacheHit == 3): self.missC1 += 1 self.missC2 += 1 self.hitC3 += 1 else: self.missC1 += 1 self.missC2 += 1 self.missC3 += 1 self.hitRAM += 1 #endereco 2 if(dadoMemoriaAdd2.cacheHit == 1): self.hitC1 += 1 elif(dadoMemoriaAdd2.cacheHit == 2): self.missC1 += 1 self.hitC2 += 1 elif(dadoMemoriaAdd2.cacheHit == 3): self.missC1 += 1 self.missC2 += 1 self.hitC3 += 1 else: self.missC1 += 1 self.missC2 += 1 self.missC3 += 1 self.hitRAM += 1 #endereco 3 if(dadoMemoriaAdd3.cacheHit == 1): self.hitC1 += 1 elif(dadoMemoriaAdd3.cacheHit == 2): self.missC1 += 1 self.hitC2 += 1 elif(dadoMemoriaAdd3.cacheHit == 3): self.missC1 += 1 self.missC2 += 1 self.hitC3 += 1 else: self.missC1 += 1 self.missC2 += 1 self.missC3 += 1 self.hitRAM += 1 if (opcode == 0): break elif(opcode==1): conteudo1 = dadoMemoriaAdd1.palavra[umaInstrucao.add1.endPalavra] conteudo2 = dadoMemoriaAdd2.palavra[umaInstrucao.add2.endPalavra] soma = conteudo1 + conteudo2 dadoMemoriaAdd3.palavra[umaInstrucao.add3.endPalavra] = soma dadoMemoriaAdd3.atualizado = True elif(opcode==2): conteudo1 = dadoMemoriaAdd1.palavra[umaInstrucao.add1.endPalavra] conteudo2 = dadoMemoriaAdd2.palavra[umaInstrucao.add2.endPalavra] sub = conteudo1 - conteudo2 dadoMemoriaAdd3.palavra[umaInstrucao.add3.endPalavra] = sub dadoMemoriaAdd3.atualizado = True PC = PC +1 if(self.output_type): Output.printResult(self.hitC1, self.missC1, self.hitC2, self.missC2, self.hitC3, self.missC3, self.hitRAM, self.custo) else: totalHits = self.hitC1 + self.hitC2 + self.hitC3 + self.hitRAM totalMisses = self.missC1 + self.missC2 + self.missC3 taxaC1 = (self.hitC1 * 100) / (self.hitC1 + self.missC1) taxaC2 = (self.hitC2 * 100) / (self.hitC2 + self.missC2) taxaC3 = (self.hitC3 * 100) / (self.hitC3 + self.missC3) print('Custo total do programa: {0}'.format(self.custo)) print("===================== INFORMACOES SOBRE A MAQUINA =====================") print('\tCACHE1\tCACHE2\tCACHE3\t') print("SIZE\t{}\t{}\t{}".format(self.tamanhoCache1, self.tamanhoCache2, self.tamanhoCache3)) print("============================ HITS & MISSES ============================") print('\tHIT \tMISS \t') print('C1\t{0}\t{1}'.format( self.hitC1, self.missC1 )) print('C2\t{0}\t{1}'.format( self.hitC2, self.missC2 )) print('C3\t{0}\t{1}'.format( self.hitC3, self.missC3 )) print('RAM\t{}\t-'.format(self.hitRAM)) print("================================= TAXAS =================================") print('\tCACHE1\tCACHE2\tCACHE3\t') print("TAXA\t{:.2f}%\t{:.2f}%\t{:.2f}%".format(taxaC1, taxaC2, taxaC3)) print('\n') print('Total de Hits: {0}'.format(totalHits)) print('Total de Misses: {0}'.format(totalMisses)) def montarCacheVazia(self, tamanhoBlocos: int, tamanhoConjunto: int, qqCache: list): #gerando a lista de conjuntos e dentro dela a de blocos for _ in range(tamanhoConjunto): conjunto = Conjunto() for _ in range(int(tamanhoBlocos/tamanhoConjunto)): aux = BlocoMemoria() aux.endBlock = -sys.maxsize - 1 conjunto.blocos.append(aux) qqCache.append(conjunto) def montarRam(self): for i in range(self.tamanhoRAM): aux = BlocoMemoria() aux.endBlock = i palavras = [] for _ in range(self.qdePalavrasBloco): palavras.append(random.randint(0, 999999)) aux.palavra = palavras self.RAM.append(aux) def montarProgramaGerador(self, nome:str): with open(nome + ".txt", 'r') as reader: line = reader.readline() umaInstrucao = None while(line!=""): palavras = line.split(":") umaInstrucao = Instrucao() umaInstrucao.opcode = int(palavras[0]) e1 = Endereco() e1.endBlock = int(palavras[1]) e1.endPalavra = int(palavras[2]) umaInstrucao.add1 = e1 e2 = Endereco() e2.endBlock = int(palavras[3]) e2.endPalavra = int(palavras[4]) e2.endPalavra = e2.endPalavra%4 umaInstrucao.add2 = e2 e3 = Endereco() e3.endBlock = int(palavras[5]) e3.endPalavra = int(palavras[6]) e3.endPalavra = e3.endPalavra%4 umaInstrucao.add3 = e3 self.memoriaInstrucoes.append(umaInstrucao) line = reader.readline() umaInstrucao = Instrucao() umaInstrucao.opcode = -1 self.memoriaInstrucoes.append(umaInstrucao) def __init__(self, output_type = "custom"): if(output_type == "normal"): self.output_type = 0 elif(output_type == "custom"): self.output_type = 1 self.montarRam() self.montarCacheVazia(self.tamanhoCache1, self.tamanhoConjuntosCache1, self.cache1) self.montarCacheVazia(self.tamanhoCache2, self.tamanhoConjuntosCache2, self.cache2) self.montarCacheVazia(self.tamanhoCache3, self.tamanhoConjuntosCache3, self.cache3) self.montarProgramaGerador("programa") self.maquina(0) self.sincroniza(self.cache1, self.cache2, self.cache3, self.RAM) def main(): #cwd = os.getcwd() # Get the current working directory (cwd) #files = os.listdir(cwd) # Get all the files in that directory #print("Arquivos %r: %s" % (cwd, files)) if(len(sys.argv)>1): output_type = sys.argv[1] TP2(output_type) else: TP2() if __name__ == "__main__": main()
StarcoderdataPython
11342778
<reponame>xinming365/LeetCode #!/usr/bin/env python # -*- coding: utf-8 -*- # @Time : 2022/4/10 1:31 下午 # @Author : xinming # @File : 78_subsets.py from typing import List class Solution: def subsets(self, nums): if not nums: return None res = [] n = len(nums) def back_track(idx, sub_num): res.append(sub_num) for i in range(idx, n): back_track(i+1, sub_num+[nums[i]]) back_track(0, []) return res if __name__=='__main__': nums = [1,2,3] out = Solution().subsets(nums=nums) print(out)
StarcoderdataPython
3244147
<filename>QLearning/task0_train.py #!/usr/bin/env python # coding=utf-8 ''' Author: John Email: <EMAIL> Date: 2020-09-11 23:03:00 LastEditor: John LastEditTime: 2021-09-23 12:22:58 Discription: Environment: ''' import sys,os curr_path = os.path.dirname(os.path.abspath(__file__)) # 当前路径 parent_path=os.path.dirname(curr_path) # 父路径,这里就是我们的项目路径 sys.path.append(parent_path) # 由于需要引用项目路径下的其他模块比如envs,所以需要添加路径到sys.path import gym import torch import datetime from envs.gridworld_env import CliffWalkingWapper from QLearning.agent import QLearning from common.plot import plot_rewards,plot_rewards_cn from common.utils import save_results,make_dir curr_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") # 获取当前时间 class QlearningConfig: '''训练相关参数''' def __init__(self): self.algo = 'Q-learning' # 算法名称 self.env = 'CliffWalking-v0' # 环境名称 self.result_path = curr_path+"/outputs/" +self.env+'/'+curr_time+'/results/' # 保存结果的路径 self.model_path = curr_path+"/outputs/" +self.env+'/'+curr_time+'/models/' # 保存模型的路径 self.train_eps = 400 # 训练的回合数 self.eval_eps = 30 # 测试的回合数 self.gamma = 0.9 # reward的衰减率 self.epsilon_start = 0.95 # e-greedy策略中初始epsilon self.epsilon_end = 0.01 # e-greedy策略中的终止epsilon self.epsilon_decay = 300 # e-greedy策略中epsilon的衰减率 self.lr = 0.1 # 学习率 self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # 检测GPU def env_agent_config(cfg,seed=1): env = gym.make(cfg.env) env = CliffWalkingWapper(env) env.seed(seed) # 设置随机种子 state_dim = env.observation_space.n # 状态维度 action_dim = env.action_space.n # 动作维度 agent = QLearning(state_dim,action_dim,cfg) return env,agent def train(cfg,env,agent): print('开始训练!') print(f'环境:{cfg.env}, 算法:{cfg.algo}, 设备:{cfg.device}') rewards = [] # 记录奖励 ma_rewards = [] # 记录滑动平均奖励 for i_ep in range(cfg.train_eps): ep_reward = 0 # 记录每个回合的奖励 state = env.reset() # 重置环境,即开始新的回合 while True: action = agent.choose_action(state) # 根据算法选择一个动作 next_state, reward, done, _ = env.step(action) # 与环境进行一次动作交互 print(reward) agent.update(state, action, reward, next_state, done) # Q学习算法更新 state = next_state # 更新状态 ep_reward += reward if done: break rewards.append(ep_reward) if ma_rewards: ma_rewards.append(ma_rewards[-1]*0.9+ep_reward*0.1) else: ma_rewards.append(ep_reward) print("回合数:{}/{},奖励{:.1f}".format(i_ep+1, cfg.train_eps,ep_reward)) print('完成训练!') return rewards,ma_rewards def eval(cfg,env,agent): print('开始测试!') print(f'环境:{cfg.env}, 算法:{cfg.algo}, 设备:{cfg.device}') for item in agent.Q_table.items(): print(item) rewards = [] # 记录所有回合的奖励 ma_rewards = [] # 滑动平均的奖励 for i_ep in range(cfg.eval_eps): ep_reward = 0 # 记录每个episode的reward state = env.reset() # 重置环境, 重新开一局(即开始新的一个回合) while True: action = agent.predict(state) # 根据算法选择一个动作 next_state, reward, done, _ = env.step(action) # 与环境进行一个交互 state = next_state # 更新状态 ep_reward += reward if done: break rewards.append(ep_reward) if ma_rewards: ma_rewards.append(ma_rewards[-1]*0.9+ep_reward*0.1) else: ma_rewards.append(ep_reward) print(f"回合数:{i_ep+1}/{cfg.eval_eps}, 奖励:{ep_reward:.1f}") print('完成测试!') return rewards,ma_rewards if __name__ == "__main__": cfg = QlearningConfig() # 训练 env,agent = env_agent_config(cfg,seed=0) rewards,ma_rewards = train(cfg,env,agent) make_dir(cfg.result_path,cfg.model_path) # 创建文件夹 agent.save(path=cfg.model_path) # 保存模型 for item in agent.Q_table.items(): print(item) save_results(rewards,ma_rewards,tag='train',path=cfg.result_path) # 保存结果 plot_rewards_cn(rewards,ma_rewards,tag="train",env=cfg.env,algo = cfg.algo,path=cfg.result_path) # # 测试 env,agent = env_agent_config(cfg,seed=10) agent.load(path=cfg.model_path) # 加载模型 rewards,ma_rewards = eval(cfg,env,agent) save_results(rewards,ma_rewards,tag='eval',path=cfg.result_path) plot_rewards_cn(rewards,ma_rewards,tag="eval",env=cfg.env,algo = cfg.algo,path=cfg.result_path)
StarcoderdataPython
8076177
from django.contrib import sitemaps from . import models class FlatPageSitemap(sitemaps.Sitemap): changefreq = "daily" priority = 1.0 def items(self): return models.FlatPage.objects.filter(is_enabled=True).order_by('id') def lastmod(self, obj): return obj.updated_at
StarcoderdataPython
12853188
<gh_stars>1-10 # -*- coding: utf-8 -*- import json from collections import OrderedDict from typing import List import dash_core_components as dcc import dash_html_components as html import dash_table import pandas as pd from dash import dash from dash.dependencies import Input, Output, State from zvdata import IntervalLevel from zvdata.app import app from zvdata.chart import Drawer from zvdata.domain import global_providers, get_schemas, get_schema_by_name, get_schema_columns from zvdata.normal_data import NormalData, IntentType from zvdata.reader import DataReader from zvdata.utils.pd_utils import df_is_not_null from zvdata.utils.time_utils import now_pd_timestamp, TIME_FORMAT_DAY current_df = None layout = html.Div( [ html.Div( [ # provider selector dcc.Dropdown( id='provider-selector', placeholder='select provider', options=[{'label': provider, 'value': provider} for provider in global_providers]), # schema selector dcc.Dropdown(id='schema-selector', placeholder='select schema'), # level selector dcc.Dropdown(id='level-selector', placeholder='select level', options=[{'label': level.value, 'value': level.value} for level in IntervalLevel], value=IntervalLevel.LEVEL_1DAY.value), # column selector html.Div(id='schema-column-selector-container', children=None), dcc.Dropdown( id='properties-selector', options=[ {'label': 'undefined', 'value': 'undefined'} ], value='undefined', multi=True ), # codes filter dcc.Input(id='input-code-filter', type='text', placeholder='input codes', style={'width': '400px'}), # time range filter dcc.DatePickerRange( id='date-picker-range', start_date='2009-01-01', end_date=now_pd_timestamp(), display_format=TIME_FORMAT_DAY ), # load data for table html.Button('load data', id='btn-load-data', n_clicks_timestamp=0), # table container html.Div(id='data-table-container', children=None), # selected properties html.Label('setting y_axis and chart type for the columns:'), # col setting container html.Div(id='col-setting-container', children=dash_table.DataTable( id='col-setting-table', columns=[ {'id': 'property', 'name': 'property', 'editable': False}, {'id': 'y_axis', 'name': 'y_axis', 'presentation': 'dropdown'}, {'id': 'chart', 'name': 'chart', 'presentation': 'dropdown'} ], dropdown={ 'y_axis': { 'options': [ {'label': i, 'value': i} for i in ['y1', 'y2', 'y3', 'y4', 'y5'] ] }, 'chart': { 'options': [ {'label': chart_type.value, 'value': chart_type.value} for chart_type in NormalData.get_charts_by_intent(IntentType.compare_self) ] } }, editable=True ), ), html.Div(id='table-type-label', children=None), html.Div( [ html.Div([dcc.Dropdown(id='intent-selector')], style={'width': '50%', 'display': 'inline-block'}), html.Div([dcc.Dropdown(id='chart-selector')], style={'width': '50%', 'display': 'inline-block'}) ] ), html.Div(id='chart-container', children=None) ]) ] ) @app.callback( Output('schema-selector', 'options'), [Input('provider-selector', 'value')]) def update_schema_selector(provider): if provider: return [{'label': schema.__name__, 'value': schema.__name__} for schema in get_schemas(provider=provider)] raise dash.exceptions.PreventUpdate() @app.callback( Output('schema-column-selector-container', 'children'), [Input('schema-selector', 'value')], state=[State('provider-selector', 'value')]) def update_column_selector(schema_name, provider): if provider and schema_name: schema = get_schema_by_name(name=schema_name) cols = get_schema_columns(schema=schema) return dcc.Dropdown( id='schema-column-selector', options=[ {'label': col, 'value': col} for col in cols ], value=get_schema_by_name(name=schema_name).important_cols(), multi=True ) raise dash.exceptions.PreventUpdate() @app.callback( [Output('properties-selector', 'options'), Output('properties-selector', 'value')], [Input('schema-column-selector', 'value')], state=[State('provider-selector', 'value'), State('schema-selector', 'value'), State('properties-selector', 'options'), State('properties-selector', 'value')]) def update_selected_properties(selected_cols, provider, schema_name, options, value): if selected_cols and provider and schema_name: current_options = options current_value = value added_labels = [] added_values = [] for col in selected_cols: added_labels.append(col) added_values.append( json.dumps({ 'provider': provider, 'schema': schema_name, 'column': col })) added_options = [{'label': col, 'value': added_values[i]} for i, col in enumerate(added_labels)] if 'undefined' in value: current_options = [] current_value = [] current_options += added_options current_value += added_values return current_options, current_value raise dash.exceptions.PreventUpdate() def properties_to_readers(properties, level, codes, start_date, end_date) -> List[DataReader]: provider_schema_map_cols = {} for prop in properties: provider = prop['provider'] schema_name = prop['schema'] key = (provider, schema_name) if key not in provider_schema_map_cols: provider_schema_map_cols[key] = [] provider_schema_map_cols[key].append(prop['column']) readers = [] for item, columns in provider_schema_map_cols.items(): provider = item[0] schema_name = item[1] schema = get_schema_by_name(schema_name) readers.append(DataReader(data_schema=schema, provider=provider, codes=codes, level=level, columns=columns, start_timestamp=start_date, end_timestamp=end_date, time_field=schema.time_field())) return readers @app.callback( [Output('data-table-container', 'children'), Output('col-setting-table', 'data'), Output('table-type-label', 'children'), Output('intent-selector', 'options'), Output('intent-selector', 'value')], [Input('btn-load-data', 'n_clicks')], state=[State('properties-selector', 'value'), State('level-selector', 'value'), State('input-code-filter', 'value'), State('date-picker-range', 'start_date'), State('date-picker-range', 'end_date')]) def update_data_table(n_clicks, properties, level, codes: str, start_date, end_date): if n_clicks and properties: props = [] for prop in properties: props.append(json.loads(prop)) readers = properties_to_readers(properties=props, level=level, codes=codes, start_date=start_date, end_date=end_date) if readers: data_df = readers[0].data_df for reader in readers[1:]: if df_is_not_null(reader.data_df): data_df = data_df.join(reader.data_df, how='outer') global current_df current_df = data_df if not df_is_not_null(current_df): return 'no data,please reselect!', [], '', [ {'label': 'compare_self', 'value': 'compare_self'}], 'compare_self' normal_data = NormalData(current_df) data_table = Drawer(data=normal_data).draw_data_table(id='data-table-content') # generate col setting table properties = normal_data.data_df.columns.to_list() df = pd.DataFrame(OrderedDict([ ('property', properties), ('y_axis', ['y1'] * len(properties)), ('chart', ['line'] * len(properties)) ])) # generate intents intents = normal_data.get_intents() intent_options = [ {'label': intent.value, 'value': intent.value} for intent in intents ] intent_value = intents[0].value return data_table, df.to_dict('records'), normal_data.get_table_type(), intent_options, intent_value else: return 'no data,please reselect!', [], '', [ {'label': 'compare_self', 'value': 'compare_self'}], 'compare_self' raise dash.exceptions.PreventUpdate() @app.callback( [Output('chart-selector', 'options'), Output('chart-selector', 'value')], [Input('intent-selector', 'value')]) def update_chart_selector(intent): if intent: charts = NormalData.get_charts_by_intent(intent=intent) options = [ {'label': chart.value, 'value': chart.value} for chart in charts ] value = charts[0].value return options, value raise dash.exceptions.PreventUpdate() operators_df = [['ge ', '>='], ['le ', '<='], ['lt ', '<'], ['gt ', '>'], ['ne ', '!='], ['eq ', '='], ['contains '], ['datestartswith ']] operators_sql = [['>= ', '>='], ['<= ', '<='], ['< ', '<'], ['> ', '>'], ['!= ', '!='], ['== ', '='], ['contains '], ['datestartswith ']] def split_filter_part(filter_part, operators=operators_df): for operator_type in operators: for operator in operator_type: if operator in filter_part: name_part, value_part = filter_part.split(operator, 1) name = name_part[name_part.find('{') + 1: name_part.rfind('}')] value_part = value_part.strip() v0 = value_part[0] if (v0 == value_part[-1] and v0 in ("'", '"', '`')): value = value_part[1: -1].replace('\\' + v0, v0) else: try: value = float(value_part) except ValueError: value = value_part # word operators need spaces after them in the filter string, # but we don't want these later return name, operator_type[0].strip(), value return [None] * 3 @app.callback( [Output('data-table-content', "data"), Output('chart-container', "children")], [Input('data-table-content', "page_current"), Input('data-table-content', "page_size"), Input('data-table-content', "sort_by"), Input('data-table-content', "filter_query"), Input('intent-selector', "value"), Input('chart-selector', "value"), Input('col-setting-table', 'data'), Input('col-setting-table', 'columns')]) def update_table_and_graph(page_current, page_size, sort_by, filter, intent, chart, rows, columns): if chart: property_map = {} for row in rows: property_map[row['property']] = { 'y_axis': row['y_axis'], 'chart': row['chart'] } dff = current_df if filter: filtering_expressions = filter.split(' && ') for filter_part in filtering_expressions: col_name, operator, filter_value = split_filter_part(filter_part) if operator in ('eq', 'ne', 'lt', 'le', 'gt', 'ge'): # these operators match pandas series operator method names dff = dff.loc[getattr(dff[col_name], operator)(filter_value)] elif operator == 'contains': dff = dff.loc[dff[col_name].str.contains(filter_value)] elif operator == 'datestartswith': # this is a simplification of the front-end filtering logic, # only works with complete fields in standard format dff = dff.loc[dff[col_name].str.startswith(filter_value)] # if sort_by: # dff = dff.sort_values( # [col['entity_id'] for col in sort_by], # ascending=[ # col['direction'] == 'asc' # for col in sort_by # ], # inplace=False # ) if intent in (IntentType.compare_self.value, IntentType.compare_to_other.value): graph_data, graph_layout = Drawer(NormalData(dff)).draw_compare(chart=chart, property_map=property_map, render=None, keep_ui_state=False) else: graph_data, graph_layout = Drawer(NormalData(dff)).draw(chart=chart, property_map=property_map, render=None, keep_ui_state=False) table_data = dff.iloc[page_current * page_size: (page_current + 1) * page_size ].to_dict('records') return table_data, \ dcc.Graph( id='chart-content', figure={ 'data': graph_data, 'layout': graph_layout } ) raise dash.exceptions.PreventUpdate()
StarcoderdataPython
9608428
from .hex_ia import HexIA, dotdict, args from .hex_board import HexBoard, WHITE, BLACK from .hex_game_manager import HexGameManager from uct import UCT from parameters import Params import time import traceback class ConvNetUnableToProduceGame(Exception): pass class HexCoach: average_number_moves = [0] average_winner = [0] rii = 0 riis = [] def __init__(self): self.last_checkpoint = "checkpoint.pth.tar" self.file_to_save = "games0" self.ai = HexIA() self.uct = UCT(self.ai) self.training_calls = 0 def add_batch_file(self): player = Params.FIRST_PLAYER i = 0 error_count = 0 while i < Params.NUMBER_GAMES_BATCH: try: b = HexBoard() moves = [] w = 0 j = 0 expansions = [] rollouts = [] ended = [] start = time.time() while w == 0: m, infos = self.uct.next_turn(b, player) expansions.append(infos["expansions"]) rollouts.append(infos["rollouts"]) ended.append(infos["ended"]) player = Params.get_next_player(player) moves.append(m) b.play_move(m) b.find_if_winner(m) w = b.winner() j += 1 Params.ongoing() end = time.time() Params.end_ongoing() Params.log("hex_coach.py", "Match : " + str(i + 1) + "/" + str(Params.NUMBER_GAMES_BATCH) + " - " + str(end - start) + " sec") Params.log("hex_coach.py", "Winner : " + str(w)) Params.log("hex_coach.py", "Moves (" + str(len(moves)) + ") : " + str(moves)) Params.log("hex_coach.py", "Expansions : " + str(expansions)) Params.log("hex_coach.py", "Rollouts : " + str(rollouts)) Params.log("hex_coach.py", "Ended : " + str(ended)) Params.log("hex_coach.py", "Matrix : \n" + str(b.get_copy_matrix())) args = {"player1": "cnn", "player2": "cnn", "winner": str(w)} HexGameManager.write_add_format_advanced(moves, args, Params.STANDARD_GAME_FILE) i += 1 HexCoach.rii = Params.RII_PARAMETER*HexCoach.rii + (1-Params.RII_PARAMETER)*w except Exception: traceback.print_exc() time.sleep(0.1) Params.log("hex_coach.py", "Failure when creating game") error_count += 1 if error_count >= Params.SAVING_FROM_CONVERGENCE_TO_ERROR: raise ConvNetUnableToProduceGame def trainAI(self, checkpoint=Params.TAKE_FROM_CHECKPOINT): j = 0 if checkpoint: try: infos = self.get_last_valid_checkpoint_name() if infos is not None: self.ai.load_checkpoint(filename=infos["full"]) self.training_calls = infos["iters"] Params.prt("hex_coach.py", "Checkpoint Loaded : " + infos["full"]) except: Params.log("hex_coach.py", "Unable to open the checkpoint") while True: try: if Params.GAME_SET_METHOD is "reset": if j % Params.RESET_GAMES_AFTER_BATCH is 0: import os if os.path.isfile(Params.STANDARD_GAME_FILE): os.remove(Params.STANDARD_GAME_FILE) Params.prt("hex_coach.py", "Games removed") except Exception: traceback.print_exc() Params.log("hex_coach.py", "Impossible to remove previous games") try: self.add_batch_file() except Exception: traceback.print_exc() Params.log("hex_coach.py", "Impossible to add Files") try: self.launch_train() except Exception: traceback.print_exc() Params.log("hex_coach.py", "Impossible to train the neural network") try: self.check_infos_size_and_save() except Exception: traceback.print_exc() Params.log("hex_coach.py", "Impossible to check the infos") j += 1 try: Params.log("hex_coach.py", "Round " + str(j + 1) + " (round " + str(j % Params.RESET_GAMES_AFTER_BATCH + 1) + "/" + str(Params.RESET_GAMES_AFTER_BATCH) + ", average winner : " + str(HexCoach.average_winner[-1]) + ", number of moves : " + str(HexCoach.average_number_moves[-1]) + ", number of learning iter : " + str(self.training_calls) + ", rii : " + str(HexCoach.rii) + ")") HexCoach.riis.append(HexCoach.rii) except: traceback.print_exc() Params.log("hex_coach.py", "Impossible to view round work") def launch_train(self): gm = HexGameManager moves = [] nb_moves = [] winner = [] while len(moves) < args['batch_size']: b, v, p, i = gm.get_random_move() moves.append((b, v, p)) nb_moves.append(i["nb_moves"]) winner.append(i["winner"]) HexCoach.average_number_moves.append(0.0) for i in nb_moves: HexCoach.average_number_moves[-1] += i/len(nb_moves) HexCoach.average_winner.append(0.0) for i in winner: HexCoach.average_winner[-1] += i/len(winner) self.ai.train(moves) self.training_calls += 1 if Params.STORE_AFTER > 0: if self.training_calls % Params.STORE_AFTER > 0: self.ai.save_checkpoint(filename=Params.WORKING_CHECKPOINT_FILENAME) else: name = self.give_checkpoint_name() self.ai.save_checkpoint(filename=name) else: self.ai.save_checkpoint(filename=Params.WORKING_CHECKPOINT_FILENAME) def give_checkpoint_name(self): name = Params.PREFIX_NAME utc_version = Params.UTC_VERSION neural_version = Params.NEURAL_VERSION board_version = Params.BOARD_VERSION sep = Params.SEPARATOR suffix = Params.SUFFIX iteration = str(self.training_calls) return name + sep + board_version + sep + utc_version + sep + neural_version + sep + iteration + suffix def get_checkpoint_informations(self, name): wp = name.split(".")[0] s = wp.split(Params.SEPARATOR) info = {"valid": False, "name": None, "board": None, "utc": None, "neural": None, "iters": None, "full": name} if len(s) is 5: try: info["name"] = s[0] info["board"] = s[1] info["utc"] = s[2] info["neural"] = s[3] info["iters"] = int(s[4]) info["valid"] = True except: pass return info def get_last_valid_checkpoint_name(self, folder=Params.NN_CHECKPOINT_FOLDER): from os import walk f = [] for (dirpath, dirnames, filenames) in walk(folder): f.extend(filenames) break p = [] for v in f: ci = self.get_checkpoint_informations(v) if ci["valid"] is True: if (ci["name"] == Params.PREFIX_NAME and ci["board"] == Params.BOARD_VERSION and ci["utc"] == Params.UTC_VERSION and ci["neural"] == Params.NEURAL_VERSION): p.append(ci) if len(p) > 0: return sorted(p, key=lambda x: x["iters"], reverse=True)[0] else: return None def check_infos_size_and_save(self): if len(HexCoach.riis) > 1000: HexCoach.riis.pop(0) if len(HexCoach.riis) > 1000: HexCoach.riis.pop(0) if len(HexCoach.average_winner) > 1000: HexCoach.average_winner.pop(0) if len(HexCoach.average_winner) > 1000: HexCoach.average_winner.pop(0) if len(HexCoach.average_number_moves) > 1000: HexCoach.average_number_moves.pop(0) if len(HexCoach.average_number_moves) > 1000: HexCoach.average_number_moves.pop(0) if self.training_calls % Params.SAVE_INFOS is 0: with open(Params.INFOS_FILE, 'a') as f: f.write(str(self.training_calls) + ":" + str(self.rii) + "\n")
StarcoderdataPython
1660653
import utils as u from collections import deque from itertools import count from time import time puzzle_input = "077201" # part 1 -'*'-.,__,.-'*'-.,__,.-'*'-.,__,.-'*'-.,__,.-'*'-.,__,.-'*'-.,__,.-'*'-.,_ def debug_position(scores: list, positions: list): for idx, score in enumerate(scores): if idx == positions[0]: print(f"{u.CYAN}{score}{u.NORMAL}", end="") elif idx == positions[1]: print(f"{u.PINK}{score}{u.NORMAL}", end="") else: print(score, end="") print("") def look_for_ten_recipes_after_nth(n: int): score = deque([3, 7]) elves_position = [0, 1] # debug_position(score, elves_position) while True: new_scores = sum(score[pos] for pos in elves_position) score.extend(map(int, list(str(new_scores)))) elves_position = [(pos + score[pos] + 1) % len(score) for pos in elves_position] # debug_position(score, elves_position) if len(score) >= n + 10: break return "".join(map(str, (score[x] for x in range(n, n + 10)))) u.assert_equals(look_for_ten_recipes_after_nth(9), "5158916779") u.assert_equals(look_for_ten_recipes_after_nth(5), "0124515891") u.assert_equals(look_for_ten_recipes_after_nth(2018), "5941429882") u.assert_equals(look_for_ten_recipes_after_nth(18), "9251071085") u.answer_part_1(look_for_ten_recipes_after_nth(int(puzzle_input))) # part 2 -'*'-.,__,.-'*'-.,__,.-'*'-.,__,.-'*'-.,__,.-'*'-.,__,.-'*'-.,__,.-'*'-.,_ def look_for_pattern_in_recipes(pattern: str): score = "37" elves_position = [0, 1] init = time() for i in count(): if i % 10000 == 0: print(f"round {i} - len = {len(score)} - time = {time() - init}", end="\r") new_scores = str(sum(int(score[pos]) for pos in elves_position)) score += new_scores elves_position = [ (pos + int(score[pos]) + 1) % len(score) for pos in elves_position ] if pattern in score[-7:]: print(f"round {i} - len = {len(score)} - time = {time() - init}") return score.index(pattern) u.assert_equals(look_for_pattern_in_recipes("51589"), 9) u.assert_equals(look_for_pattern_in_recipes("01245"), 5) u.assert_equals(look_for_pattern_in_recipes("92510"), 18) u.assert_equals(look_for_pattern_in_recipes("59414"), 2018) u.answer_part_2(look_for_pattern_in_recipes(puzzle_input))
StarcoderdataPython
3215648
from . import db from werkzeug.security import generate_password_hash,check_password_hash from flask_login import UserMixin from . import login_manager from datetime import datetime from sqlalchemy import desc @login_manager.user_loader def load_user(user_id): return User.query.get(int(user_id)) class User(UserMixin,db.Model): __tablename__ = 'users' id = db.Column(db.Integer,primary_key = True) username = db.Column(db.String(255)) email = db.Column(db.String(255),unique = True,index = True) bio = db.Column(db.String(255)) profile_pic_path = db.Column(db.String()) pass_secure = db.Column(db.String(255)) posts = db.relationship('Post',backref = 'user',lazy = "dynamic") comments = db.relationship('Comment',backref = 'user',lazy="dynamic") @property def password(self): raise AttributeError('You cannot read the password attribute') @password.setter def password(self, password): self.pass_secure = generate_password_hash(password) def verify_password(self,password): return check_password_hash(self.pass_secure,password) def __repr__(self): return f'User {self.username}' class Post(db.Model): __tablename__ = 'posts' id = db.Column(db.Integer,primary_key = True) title = db.Column(db.String) post = db.Column(db.String) posted = db.Column(db.DateTime,default=datetime.utcnow) user_id = db.Column(db.Integer,db.ForeignKey("users.id")) comments = db.relationship('Comment', backref='post', lazy='dynamic') def save_post(self): db.session.add(self) db.session.commit() def delete_post(self): db.session.delete(self) db.session.commit() @classmethod def get_post(id): post = Post.query.filter_by(id=id).order_by(desc('posted')).all() return post @classmethod def all_posts(cls): post = Post.query.order_by(desc('posted')).all() return post def __repr__(self): return f'Post {self.title}' class Comment(db.Model): __tablename__ = 'comments' id = db.Column(db.Integer,primary_key = True) comment = db.Column(db.String(255)) posted = db.Column(db.DateTime, default=datetime.utcnow) user_id = db.Column(db.Integer,db.ForeignKey("users.id")) post_id = db.Column(db.Integer, db.ForeignKey("posts.id")) def save_comment(self): db.session.add(self) db.session.commit() @classmethod def get_comments(cls,id): comments = Comment.query.filter_by(post_id=post_id).all() return comments def delete_comment(self): db.session.delete(self) db.session.commit() def __repr__(self): return f'Comment: {self.comment}' class Subscriber(db.Model): __tablename__='subscribers' id=db.Column(db.Integer,primary_key=True) email = db.Column(db.String(255),unique=True,index=True) def save_subscriber(self): db.session.add(self) db.session.commit() def __repr__(self): return f'Subscriber {self.email}'
StarcoderdataPython
1924977
<filename>apps/web/api/views.py from rest_framework import status from rest_framework.generics import CreateAPIView from rest_framework.response import Response from apps.web.api.serializers import UpdateModelSerializer from apps.web.models import AppUser, CallbackQuery, Chat, Message, Update from apps.web.models.message import Photo from apps.web.utils import allowed_hooks from ..tasks import handle_message_task class ProcessWebHookAPIView(CreateAPIView): """View to retrieve and handle all user's request, i.e webhook Steps: 1) Got request with ``hook_id`` param to determine the bot 2) Extract message or callback_query.message 3) Handle this message by celery task creation """ serializer_class = UpdateModelSerializer queryset = Update.objects.all() @allowed_hooks def post(self, request, *args, **kwargs): bot_id = kwargs.get('hook_id', None) self.request.data['hook_id'] = bot_id serializer = self.get_serializer(data=request.data) is_valid = serializer.is_valid() if is_valid: update = self.perform_create(serializer) else: # TODO: implement errors handling print("Error has been occurred. Format is not valid.") return Response(status=status.HTTP_204_NO_CONTENT) headers = self.get_success_headers(serializer.data) # handle_message_task.delay(update.id) handle_message_task(update.id) return Response( serializer.data, headers=headers, status=status.HTTP_201_CREATED, ) def handle_message(self, data): bot = data['bot'] user, _ = AppUser.objects.get_or_create(**data['message']['from_user']) chat, _ = Chat.objects.get_or_create(**data['message']['chat']) message, _ = Message.objects.get_or_create( from_user=user, chat=chat, date=data['message']['date'], text=data['message'].get('text'), message_id=data['message']['message_id'], ) self.attach_photo_to_message(data=data['message'], message=message) update, _ = Update.objects.get_or_create( bot=bot, message=message, update_id=data['update_id'], ) return update @staticmethod def extract_callback_message(callback): user, _ = AppUser.objects.get_or_create( **callback['message']['from_user'] ) chat, _ = Chat.objects.get_or_create( **callback['message']['chat'] ) message, _ = Message.objects.get_or_create( message_id=callback['message']['message_id'], from_user=user, chat=chat, date=callback['message']['date'], text=callback['message'].get('text'), ) return message @staticmethod def attach_photo_to_message(data, message): photos = data.get('photo', []) for photo in photos: photo.pop('message', None) Photo.objects.get_or_create(**photo, message=message) def handle_callback(self, data): bot = data['bot'] user, _ = AppUser.objects.get_or_create( **data['callback_query']['from_user'] ) chat, _ = Chat.objects.get_or_create( **data['callback_query']['message']['chat'] ) message = data['callback_query'].get('message') if message: message = self.extract_callback_message(data['callback_query']) self.attach_photo_to_message( data=data['callback_query']['message'], message=message ) callback_query, _ = CallbackQuery.objects.get_or_create( from_user=user, message=message, data=data['callback_query']['data'], id=data['callback_query']['id'], ) update, _ = Update.objects.get_or_create( bot=bot, callback_query=callback_query, update_id=data['update_id'], ) return update def perform_create(self, serializer): data = serializer.validated_data if 'message' in data: update = self.handle_message(data) else: update = self.handle_callback(data) return update
StarcoderdataPython
1756510
"""nskipgrams: A lightweight Python package to work with ngrams and skipgrams Author: <NAME> <<EMAIL>> License: MIT License Source: https://github.com/jacksonllee/nskipgrams """ from collections import defaultdict, OrderedDict from itertools import combinations import pkg_resources __version__ = pkg_resources.get_distribution("nskipgrams").version def _trie(): return defaultdict(_trie) def _flattened_ngrams_with_counts(trie, prefix): def _flatten_trie(trie_): try: # If `trie.values()` are not sum-able (hence TypeError), they # are tries themselves. sum(trie_.values()) except TypeError: for token, inner_trie in trie_.items(): for inner_tokens in _flatten_trie(inner_trie): # Python < 3.8 doesn't allow `token, *inner_tokens` syntax combined = (token,) + inner_tokens yield combined else: yield from trie_.items() prefix = prefix or () inner_trie = _get_inner_trie_from_prefix(trie, prefix) if inner_trie is None: return elif type(inner_trie) == int: # `inner_trie` is the count of the ngram (= the given prefix) yield prefix, inner_trie else: for tokens in _flatten_trie(inner_trie): ngram = tokens[:-1] count = tokens[-1] yield prefix + ngram, count def _get_inner_trie_from_prefix(trie, ngram_prefix): for token in ngram_prefix: if type(trie) == int or token not in trie: # If we reach the end of the trie without exhausting the prefix, # then the prefix is simply not a prefix for the given trie. return None else: # Since `trie` is a defaultdict but not a vanilla dict, # we can't use a try-except block around `trie = trie[token]` # which would never raise KeyError and would undesirably # create an empty defaultdict by calling `trie[token]` # even for an unfound `token`. trie = trie[token] # `trie` at this point could be an int for the prefix's count, # or an actual trie that continues from the prefix. return trie def skipgrams_from_seq(seq, n, skip): _validate_n(n, upper_bound=len(seq)) _validate_skip(skip, upper_bound=len(seq)) all_indices = OrderedDict() # used an ordered set for k in range(len(seq) - n + 1): for indices in combinations(range(min(skip + n, len(seq))), n): all_indices[tuple(i + k for i in indices)] = 0 # value 0 is meaningless for indices in all_indices.keys(): try: yield tuple(seq[i] for i in indices) except IndexError: pass def ngrams_from_seq(seq, n): """Yield ngrams extracted from the given sequence. Parameters ---------- seq : iterable n : int The size of the ngrams to extract. Yields ------ tuple ngram of size `n` """ yield from zip(*(seq[i:] for i in range(n))) def _validate_n(n, upper_bound=None): if type(n) != int or n < 1: raise ValueError(f"n must be an integer >= 1: {n}") elif upper_bound is not None and n > upper_bound: raise ValueError(f"n is outside of [1, {upper_bound}]: {n}") return n def _validate_skip(skip, upper_bound=None): if type(skip) != int or skip < 0: raise ValueError(f"skip must be an integer >= 0: {skip}") elif upper_bound is not None and skip > upper_bound: raise ValueError(f"skip is outside of [0, {upper_bound}]: {skip}") return skip class Skipgrams: """A collection of skipgrams.""" def __init__(self, n, skip=0): self.n = _validate_n(n) self.skip = _validate_skip(skip) self._tries = {(i + 1, k): _trie() for i in range(n) for k in range(skip + 1)} def add(self, skipgram, skip=0, count=1): """Add a skipgram. Parameters ---------- skipgram : tuple Skipgram to add. skip : int Number of skips this skipgram has. count : int, optional Count for the skipgram, for the convenience of not having to call this method multiple times if this skipgram occurs multiple times in your data. """ self._add(skipgram, skip, count, validated=False) def _add(self, skipgram, skip, count, validated): if not validated: if not 1 <= len(skipgram) <= self.n: raise ValueError(f"length of {skipgram} is outside of [1, {self.n}]") _validate_skip(skip, self.skip) trie = self._tries[(len(skipgram), skip)] for token in skipgram[:-1]: trie = trie[token] last_token = skipgram[-1] if last_token in trie: trie[last_token] += count else: trie[last_token] = count def add_from_seq(self, seq, count=1): """Add skipgrams from a sequence. Parameters ---------- seq : iterable A sequence (e.g., a list or tuple of strings as a sentence with words, or a string as a word with characters) from which skipgrams are extracted. count : int, optional Count for the skipgram, for the convenience of not having to call this method multiple times if this skipgram occurs multiple times in your data. """ for n in range(1, min(self.n, len(seq)) + 1): for skip in range(0, max(min(self.skip, len(seq) - n) + 1, 1)): for ngram in skipgrams_from_seq(seq, n, skip): self._add(ngram, skip=skip, count=count, validated=True) def count(self, skipgram, skip=0): """Return the skipgram's count. If the skipgram is longer than the order of this skipgram collection, or if the skipgram isn't found in this collection, then 0 is returned. Parameters ---------- skipgram : tuple Skipgram to get the count for. skip : int Number of skips this skipgram has. Returns ------- int """ _validate_skip(skip, self.skip) if len(skipgram) > self.n: return 0 trie = self._tries[(len(skipgram), skip)] count_ = _get_inner_trie_from_prefix(trie, skipgram) if isinstance(count_, dict) or not count_: return 0 else: return count_ def __contains__(self, skipgram): """Determine if the skipgram is found in this collection. Note that this function returns ``True`` if and only if the skipgram exactly matches an existing one in the collection. Parameters ---------- skipgram : tuple Skipgram to check membership for. Returns ------- bool """ for skip in range(self.skip + 1): c = self.count(skipgram, skip=skip) if c: return c else: return 0 def skipgrams_with_counts(self, n, skip=0, prefix=None): """Yield pairs of skipgrams and counts. Parameters ---------- n : int skip : int Number of skips to yield skipgrams for. prefix : iterable, optional If provided, all yielded skipgrams start with this prefix. Yields ------ tuple, int A skipgram (tuple) and its count (int) """ yield from self._skipgrams_with_counts(n, skip, prefix, validated=False) def _skipgrams_with_counts(self, n, skip, prefix, validated): if not validated: _validate_n(n, upper_bound=self.n) _validate_skip(skip, upper_bound=self.skip) trie = self._tries[(n, skip)] yield from _flattened_ngrams_with_counts(trie, prefix) def combine(self, *others): """Combine collections of skipgrams in-place. If any new skipgram collections' ``n`` and/or ``skip`` is larger than those of the current collection, then the current collection's ``n`` and/or ``skip`` will be coerced to enlarge so that the new skipgrams can fit. Parameters ---------- *others : iterable of ``Skipgrams`` instances Raises ------ TypeError If any item in `others` is not a ``Skipgrams`` instance. """ for other in others: if not isinstance(other, Skipgrams): raise TypeError(f"arg must be a Skipgrams instance: {type(other)}") if other.n > self.n: self.n = other.n if other.skip > self.skip: self.skip = other.skip for n in range(1, other.n + 1): for skip in range(0, other.skip + 1): for ngram, count in other._skipgrams_with_counts( n=n, skip=skip, prefix=None, validated=True ): self._add(ngram, skip, count, validated=True) class Ngrams(Skipgrams): """A collection of ngrams. Ngrams are a special case of skipgrams, where skip = 0. This class has methods inherited from ``Skipgrams``. """ def __init__(self, n): super().__init__(n, skip=0) def ngrams_with_counts(self, n, prefix=None): yield from super(Ngrams, self).skipgrams_with_counts(n, skip=0, prefix=prefix)
StarcoderdataPython
9607958
<reponame>WaiNaat/BOJ-Python ### 틀렸습니다 ### from collections import deque # functions ''' 문자열 S + tail 이 퀼린드롬인지 판별. ''' def isQilin(): s = "".join(["".join(S), "".join(tail)]) for i in range(len(s) // 2 + 1): if change[s[i]] != s[-1-i] and s[-1-i] != '-': return False return True # input S = list(input()) # process ''' 1. 대칭이 없는 대문자/소문자를 동일한 소문자/대문자로 바꿨을 때 대칭이 생기면 일단 바꿔줌. 대상: (B, D, L, N, P, Q, R, a, e, h, s, t, y, z) 2. 새로운 닉네임은 S + tail로 구성됨 문자열 tail은 S+tail이 퀼린드롬이 되기 위해 추가해야 하는 글자들임. i in range(len(S)) 에 대해 S + tail 이 퀼린드롬이 아니면 S[i]의 대칭을 tail의 앞부분에 추가 ''' # 1. change_upper = "BDLNRPQR" change_lower = "aehstyz" for i in range(len(S)): if S[i] in change_upper: S[i] = S[i].lower() elif S[i] in change_lower: S[i] = S[i].upper() # 거울대칭표 만들기 original = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" change = "A---3--HI---M-O---2TUVWXY5-d-b----i--lmnoqp7--uvwx--01SE-Z-r8-" change = {original[i]:change[i] for i in range(len(original))} # 2. sLen = len(S) tail = deque([]) for i in range(sLen): found = isQilin() if found or change[S[i]] == '-': break tail.appendleft(change[S[i]]) found = isQilin() # output print("".join(["".join(S), "".join(tail)])) if found else print(-1)
StarcoderdataPython
4878772
<reponame>coderMaruf/leetcode-1<gh_stars>10-100 ''' Description: Roman numerals are represented by seven different symbols: I, V, X, L, C, D and M. Symbol Value I 1 V 5 X 10 L 50 C 100 D 500 M 1000 For example, two is written as II in Roman numeral, just two one's added together. Twelve is written as, XII, which is simply X + II. The number twenty seven is written as XXVII, which is XX + V + II. Roman numerals are usually written largest to smallest from left to right. However, the numeral for four is not IIII. Instead, the number four is written as IV. Because the one is before the five we subtract it making four. The same principle applies to the number nine, which is written as IX. There are six instances where subtraction is used: I can be placed before V (5) and X (10) to make 4 and 9. X can be placed before L (50) and C (100) to make 40 and 90. C can be placed before D (500) and M (1000) to make 400 and 900. Given an integer, convert it to a roman numeral. Input is guaranteed to be within the range from 1 to 3999. Example 1: Input: 3 Output: "III" Example 2: Input: 4 Output: "IV" Example 3: Input: 9 Output: "IX" Example 4: Input: 58 Output: "LVIII" Explanation: L = 50, V = 5, III = 3. Example 5: Input: 1994 Output: "MCMXCIV" Explanation: M = 1000, CM = 900, XC = 90 and IV = 4. ''' class Solution: def intToRoman(self, num: int) -> str: counter = 0 ones, tens, hundreds, thousands = None, None, None, None while( num != 0): digit = num % 10 if counter == 0: ones = digit elif counter == 1: tens = digit elif counter == 2: hundreds = digit elif counter == 3: thousands = digit # next run counter += 1 num = num // 10 # message for tracing and debugging # print("{} {} {} {}".format(thousands, hundreds, tens, ones) ) # str_roman store the roman number representation str_roman = str() if thousands is not None: roman_thousands = "M" str_roman += (roman_thousands * thousands) if hundreds is not None: roman_100 = "C" roman_500 = "D" roman_400 = "CD" roman_900 = "CM" if 3 >= hundreds >= 1 : str_roman += roman_100*hundreds elif 4 == hundreds : str_roman += roman_400 elif 5 == hundreds : str_roman += roman_500 elif 8 >= hundreds >= 6: offset = hundreds-5 str_roman += ( roman_500 + roman_100*offset ) elif 9 == hundreds: str_roman += roman_900 if tens is not None: roman_10 = "X" roman_50 = "L" roman_40 = "XL" roman_90 = "XC" if 3 >= tens >= 1 : str_roman += roman_10*tens elif 4 == tens : str_roman += roman_40 elif 5 == tens : str_roman += roman_50 elif 8 >= tens >= 6: offset = tens-5 str_roman += ( roman_50 + roman_10*offset ) elif 9 == tens: str_roman += roman_90 if ones is not None: roman_1 = "I" roman_5 = "V" roman_4 = "IV" roman_9 = "IX" if 3 >= ones >= 1 : str_roman += roman_1*ones elif 4 == ones : str_roman += roman_4 elif 5 == ones : str_roman += roman_5 elif 8 >= ones >= 6: offset = ones-5 str_roman += ( roman_5 + roman_1*offset ) elif 9 == ones: str_roman += roman_9 return str_roman def test_bench(): test_numbers = [4, 9, 10, 8, 68, 468, 2468] "expected output" ''' IV IX X VIII LXVIII CDLXVIII MMCDLXVIII ''' for x in test_numbers: roman_notation = Solution().intToRoman( x ) print( roman_notation ) return if __name__ == "__main__": test_bench()
StarcoderdataPython
8120160
import os import dace.library def _find_mkl_include(): if 'MKLROOT' in os.environ: return [os.path.join(os.environ['MKLROOT'], 'include')] else: return [] @dace.library.environment class IntelMKL: cmake_minimum_version = None cmake_packages = ["BLAS"] cmake_variables = {"BLA_VENDOR": "Intel10_64lp"} cmake_includes = _find_mkl_include() cmake_libraries = ["mkl_rt"] cmake_compile_flags = [] cmake_link_flags = [] cmake_files = [] headers = ["mkl.h", "../include/dace_blas.h"] init_code = "" finalize_code = ""
StarcoderdataPython
105123
from django.core.management.base import BaseCommand, CommandError from dashboard.models import Bin, Dataset class Command(BaseCommand): """for testing only!!""" help = 'delete all bins' def add_arguments(self, parser): parser.add_argument('-ds', '--dataset', type=str, help='name of dataset') def handle(self, *args, **options): ds_name = options.get('dataset') if ds_name is not None: ds = Dataset.objects.get(name=ds_name) ds.bins.all().delete() else: Bin.objects.all().delete()
StarcoderdataPython
4951826
# Generated by Django 3.1.7 on 2021-07-09 06:50 import datetime from django.db import migrations import django_jalali.db.models class Migration(migrations.Migration): dependencies = [ ('main', '0018_auto_20210707_1155'), ] operations = [ migrations.AlterField( model_name='ad', name='end_date', field=django_jalali.db.models.jDateTimeField(default=datetime.datetime(2021, 7, 9, 11, 20, 24, 883317), verbose_name='زمان پایان'), ), ]
StarcoderdataPython
4907959
<filename>syft/frameworks/torch/tensors/interpreters/replicated_shared.py import random from operator import add, sub import torch import syft from syft.generic.abstract.tensor import AbstractTensor class ReplicatedSharingTensor(AbstractTensor): def __init__( self, shares_map=None, owner=None, id=None, tags=None, description=None, ): super().__init__(id=id, owner=owner, tags=tags, description=description) self.child = shares_map self.ring_size = 2 ** 5 def share_secret(self, secret, workers): number_of_shares = len(workers) workers = self.__arrange_workers(list(workers)) shares = self.generate_shares(secret, number_of_shares) shares_map = self.__distribute_shares(workers, shares) self.child = shares_map return self @staticmethod def __arrange_workers(workers): """ having local worker in index 0 saves one communication round""" if syft.hook.local_worker in workers: workers.remove(syft.hook.local_worker) workers = [syft.hook.local_worker] + workers return workers def generate_shares(self, plain_text, number_of_shares=3): shares = [] for _ in range(number_of_shares - 1): shares.append(torch.tensor(random.randrange(self.ring_size))) shares.append(torch.tensor((plain_text - sum(shares)) % self.ring_size)) return shares @staticmethod def __distribute_shares(workers, shares): shares_map = {} for i in range(len(shares)): pointer1 = shares[i].send(workers[i]) pointer2 = shares[(i + 1) % len(shares)].send(workers[i]) shares_map[workers[i]] = (pointer1, pointer2) return shares_map def reconstruct(self): shares_map = self.get_shares_map() shares = self.__retrieve_shares(shares_map) plain_text_mod = self.__sum_shares(shares) plain_text = self.__map_modular_to_real(plain_text_mod) return plain_text def __retrieve_shares(self, shares_map): pointers = self.__retrieve_pointers(shares_map) shares = [] for pointer in pointers: shares.append(pointer.get()) return shares @staticmethod def __retrieve_pointers(shares_map): players = list(shares_map.keys()) pointers = list(shares_map[players[0]]) pointers.append(shares_map[players[1]][1]) return pointers def __sum_shares(self, shares): return sum(shares) % self.ring_size def __map_modular_to_real(self, mod_number): """In a modular ring, a number x is mapped to a negative real number ]0,-∞[ iff x > ring_size/2 """ element_wise_comparison = mod_number > self.ring_size // 2 real_number = (element_wise_comparison * -self.ring_size) + mod_number return real_number def add(self, value): return self.__switch_public_private(value, self.public_add, self.private_add) __add__ = add def public_add(self, plain_text): return self.public_linear_operation(plain_text, add) def private_add(self, secret): return self.private_linear_operation(secret, add) def sub(self, value): return self.__switch_public_private(value, self.public_sub, self.private_sub) __sub__ = sub def public_sub(self, plain_text): return self.public_linear_operation(plain_text, sub) def private_sub(self, secret): return self.private_linear_operation(secret, sub) @staticmethod def __switch_public_private(value, public_function, private_function): if isinstance(value, (int, float, torch.Tensor, syft.FixedPrecisionTensor)): return public_function(value) elif isinstance(value, syft.ReplicatedSharingTensor): return private_function(value) else: raise NotImplementedError( "ReplicatedSharingTensor can only be added to" " int, float, torch tensor, or ReplicatedSharingTensor" ) def private_linear_operation(self, secret, operator): if not self.verify_matching_players(secret): raise ValueError("Shares must be distributed among same parties") z = {} x, y = self.get_shares_map(), secret.get_shares_map() for player in x.keys(): z[player] = (operator(x[player][0], y[player][0]), operator(x[player][1], y[player][1])) return ReplicatedSharingTensor(z) def public_linear_operation(self, plain_text, operator): players = self.get_players() shares_map = self.get_shares_map() plain_text = torch.tensor(plain_text).send(players[0]) shares_map[players[0]] = ( operator(shares_map[players[0]][0], plain_text), shares_map[players[0]][1], ) return syft.ReplicatedSharingTensor(shares_map) def verify_matching_players(self, *secrets): players_set_0 = self.get_players() for secret in secrets: players_set_i = secret.get_players() if players_set_i != players_set_0: return False return True def get_players(self): return list(self.get_shares_map().keys()) def get_shares_map(self): """ shares_map: dic(worker i : (share_pointer i, share_pointer i+1) """ return self.child def __repr__(self): return self.__str__() def __str__(self): type_name = type(self).__name__ out = f"[" f"{type_name}]" if self.child is not None: for v in self.child.values(): out += "\n\t-> " + str(v) return out
StarcoderdataPython
3343615
<gh_stars>100-1000 from .cspace import CSpace from .. import robotsim from ..model import collide from .cspaceutils import EmbeddedCSpace import math import random class RobotCSpace(CSpace): """A basic robot cspace that allows collision free motion. Args: robot (RobotModel): the robot that's moving. collider (:class:`WorldCollider`, optional): a collide.WorldCollider instance instantiated with the world in which the robot lives. Any ignored collisions in the collider will be respected in the feasibility tests of this CSpace. If this is not provided, then only self-collisions will be checked. .. warning:: If your robot has non-standard joints, like a free- floating base or continuously rotating (spin) joints, you may need to overload the :meth:`sample` method. The default implementation assumes that everything with unbounded limits is a rotational joint. """ def __init__(self,robot,collider=None): CSpace.__init__(self) self.robot = robot self.setBounds(list(zip(*robot.getJointLimits()))) self.collider = collider self.addFeasibilityTest((lambda x: self.inJointLimits(x)),"joint limits") def setconfig(x): self.robot.setConfig(x) return True if collider: bb0 = ([float('inf')]*3,[float('-inf')]*3) bb = [bb0[0],bb0[1]] def calcbb(x): bb[0] = bb0[0] bb[1] = bb0[1] for i in range(self.robot.numLinks()): g = self.robot.link(i).geometry() if not g.empty(): bbi = g.getBB() bb[0] = [min(a,b) for (a,b) in zip(bb[0],bbi[0])] bb[1] = [max(a,b) for (a,b) in zip(bb[1],bbi[1])] return True def objCollide(o): obb = self.collider.world.rigidObject(o).geometry().getBB() if not collide.bb_intersect(obb,bb): return False return any(True for _ in self.collider.robotObjectCollisions(self.robot.index,o)) def terrCollide(o): obb = self.collider.world.terrain(o).geometry().getBB() if not collide.bb_intersect(obb,bb): return False return any(True for _ in self.collider.robotTerrainCollisions(self.robot.index,o)) self.addFeasibilityTest(setconfig,"setconfig") self.addFeasibilityTest(calcbb,"calcbb",dependencies="setconfig") self.addFeasibilityTest((lambda x: not self.selfCollision()),"self collision",dependencies="setconfig") #self.addFeasibilityTest((lambda x: not self.envCollision()),"env collision") for o in range(self.collider.world.numRigidObjects()): self.addFeasibilityTest((lambda x,o=o: not objCollide(o)),"obj collision "+str(o)+" "+self.collider.world.rigidObject(o).getName(),dependencies="calcbb") for o in range(self.collider.world.numTerrains()): self.addFeasibilityTest((lambda x,o=o: not terrCollide(o)),"terrain collision "+str(o)+" "+self.collider.world.terrain(o).getName(),dependencies="calcbb") else: self.addFeasibilityTest(setconfig,"setconfig") self.addFeasibilityTest((lambda x: not self.selfCollision()),"self collision",dependencies="setconfig") self.joint_limit_failures = [0]*len(self.bound) self.properties['geodesic'] = 1 def addConstraint(self,checker,name=None): self.addFeasibilityTest(checker,name) def sample(self): """Overload this to implement custom sampling strategies or to handle non-standard joints. This one will handle spin joints and rotational axes of floating bases.""" res = CSpace.sample(self) for i,x in enumerate(res): if math.isnan(x): res[i] = random.uniform(0,math.pi*2.0) return res def inJointLimits(self,x): """Checks joint limits of the configuration x""" for i,(xi,bi) in enumerate(zip(x,self.bound)): if xi < bi[0] or xi > bi[1]: self.joint_limit_failures[i] += 1 return False return True def selfCollision(self,x=None): """Checks whether the robot at its current configuration is in self collision""" #This should be faster than going through the collider... if x is not None: self.robot.setConfig(x) return self.robot.selfCollides() #if not self.collider: return False #return any(self.collider.robotSelfCollisions(self.robot.index)) def envCollision(self,x=None): """Checks whether the robot at its current configuration is in collision with the environment.""" if not self.collider: return False if x is not None: self.robot.setConfig(x) for o in range(self.collider.world.numRigidObjects()): if any(self.collider.robotObjectCollisions(self.robot.index,o)): return True for o in range(self.collider.world.numTerrains()): if any(self.collider.robotTerrainCollisions(self.robot.index,o)): return True return False def interpolate(self,a,b,u): return self.robot.interpolate(a,b,u) def distance(self,a,b): return self.robot.distance(a,b) def sendPathToController(self,path,controller): """Given a planned CSpace path 'path' and a SimRobotController 'controller', sends the path so that it is executed correctly by the controller (this assumes a fully actuated robot).""" controller.setMilestone(path[0]) for q in path[1:]: controller.appendMilestoneLinear(q) class ClosedLoopRobotCSpace(RobotCSpace): """A closed loop cspace. Allows one or more IK constraints to be maintained during the robot's motion. Attributes: solver (IKSolver): the solver containing all IK constraints maxIters (int): the maximum number of iterations for numerical IK solver tol (float): how closely the IK constraint must be met, in meters and/ or radians To satisfy the IK constraint, the motion planner ensures that configuration samples are projected to the manifold of closed-loop IK solutions. To create edges between samples a and b, the straight line path a and b is projected to the manifold via an IK solve. """ def __init__(self,robot,iks,collider=None): RobotCSpace.__init__(self,robot,collider) self.solver = robotsim.IKSolver(robot) if hasattr(iks,'__iter__'): for ik in iks: self.solver.add(ik) else: self.solver.add(iks) #root finding iterations self.maxIters = 100 self.tol = 1e-3 self.addFeasibilityTest((lambda x: self.closedLoop(x)),'closed loop constraint') def setIKActiveDofs(self,activeSet): """Marks that only a subset of the DOFs of the robot are to be used for solving the IK constraint. Args: activeSet (list of int): the robot DOF indices that should be active. """ self.solver.setActiveDofs(activeSet) def sample(self): """Samples directly on the contact manifold. The basic method samples arbitrarily in the configuration space and then solves IK constraints. This may be an ineffective method especially for floating-base robots, since the floating joints may be sampled arbitrarily. """ x = RobotCSpace.sample(self) return self.solveConstraints(x) def sampleneighborhood(self,c,r): """Samples a neighborhood in ambient space and then projects onto the contact manifold. """ x = RobotCSpace.sampleneighborhood(self,c,r) return self.solveConstraints(x) def solveConstraints(self,x): """Given an initial configuration of the robot x, attempts to solve the IK constraints given in this space. Return value is the best configuration found via local optimization. """ self.robot.setConfig(x) self.solver.setMaxIters(self.maxIters) self.solver.setTolerance(self.tol) res = self.solver.solve() return self.robot.getConfig() def closedLoop(self,config=None,tol=None): """Returns true if the closed loop constraint has been met at config, or if config==None, the robot's current configuration.""" if config is not None: self.robot.setConfig(config) e = self.solver.getResidual() if tol==None: tol = self.tol return max(abs(ei) for ei in e) <= tol def interpolate(self,a,b,u): """Interpolates on the manifold. Used by edge collision checking""" x = RobotCSpace.interpolate(self,a,b,u) return self.solveConstraints(x) def interpolationPath(self,a,b,epsilon=1e-2): """Creates a discretized path on the contact manifold between the points a and b, with resolution epsilon. """ d = self.distance(a,b) nsegs = int(math.ceil(d/epsilon)) if nsegs <= 1: return [a,b] res = [a] for i in range(nsegs-1): u = float(i+1)/float(nsegs) res.append(self.interpolate(a,b,u)) res.append(b) return res def discretizePath(self,path,epsilon=1e-2): """Given a :class:`CSpace` path ``path``, generates a path that satisfies closed-loop constraints up to the given distance between milestones. """ if path is None: return None if len(path)==0: return [] respath = [path[0]] for a,b in zip(path[:-1],path[1:]): respath += self.interpolationPath(a,b,epsilon)[1:] return respath def sendPathToController(self,path,controller,epsilon=1e-2): """Given a :class:`CSpace` path ``path``, sends the path to be executed to the :class:`SimRobotController` ``controller``. This discretizes the path and sends it as a piecewise linear curve, limited in speed by the robot's maximum velocity. .. note:: This isn't the best thing to do for robots with slow acceleration limits and/or high inertias because it ignores acceleration. A better solution can be found in the MInTOS package or the C++ code in Klampt/Cpp/Planning/RobotTimeScaling.h. """ dpath = self.discretizePath(path,epsilon) vmax = controller.model().getVelocityLimits() assert len(dpath[0]) == len(vmax) controller.setMilestone(dpath[0]) for a,b in zip(dpath[:-1],dpath[1:]): dt = 0.0 for i in range(len(a)): if vmax[i] == 0: if a[i] != b[i]: print("ClosedLoopRobotCSpace.sendPathToController(): Warning, path moves on DOF %d with maximum velocity 0"%(i,)) else: dt = max(dt,abs(a[i]-b[i])/vmax[i]) #this does a piecewise lienar interpolation controller.appendLinear(dt,b) class ImplicitManifoldRobotCSpace(RobotCSpace): """A closed loop cspace with an arbitrary numerical manifold f(q)=0 to constrain the robot's motion. The argument implicitConstraint should be a function f(q) returning a list of values that should be equal to 0 up to the given tolerance. Essentially this is a ClosedLoopRobotCSpace except with a user-provided function. See :class:`ClosedLoopRobotCSpace`. """ def __init__(self,robot,implicitConstraint,collider=None): RobotCSpace.__init__self(robot,collider) self.implicitConstraint = implicitConstraint #root finding iterations self.maxIters = 100 self.tol = 1e-3 self.addFeasibilityTest((lambda x: self.onManifold(x)),'implicit manifold constraint') def sample(self): """Samples directly on the contact manifold""" x = RobotCSpace.sample() return self.solveManifold(x) def onManifold(self,x,tol=None): """Returns true if the manifold constraint has been met at x.""" e = self.implicitConstraint.eval(x) if tol==None: tol = self.tol return max(abs(ei) for ei in e) <= tol def solveManifold(self,x,tol=None,maxIters=None): """Solves the manifold constraint starting from x, to the given tolerance and with the given maximum iteration count. Default uses the values set as attributes of this class. """ if tol==None: tol = self.tol if maxIters==None: maxIters = self.maxIters import rootfind rootfind.setXTolerance(1e-8) rootfind.setFTolerance(tol) rootfind.setVectorField(self.implicitConstraint) (res,x,val) = rootfind.findRootsBounded(x,self.bound) return x def interpolate(self,a,b,u): """Interpolates on the manifold. Used by edge collision checking""" x = RobotCSpace.interpolate(self,a,b,u) return self.solveManifold(x) class EmbeddedRobotCSpace(EmbeddedCSpace): """A basic robot cspace that allows collision free motion of a *subset* of joints. The subset is given by the indices in the list "subset" provided to the constructor. The configuration space is R^k where k is the number of DOFs in the subset. Args: ambientspace (RobotCSpace): a RobotCSpace, ClosedLoopRobotCSpace, etc. subset (list of ints): the indices of moving DOFs xinit (configuration, optional): the reference configuration, or None to use the robot's current configuration as the reference. """ def __init__(self,ambientspace,subset,xinit=None): self.robot = ambientspace.robot if xinit is None: xinit = self.robot.getConfig() EmbeddedCSpace.__init__(self,ambientspace,subset,xinit) #do monkey-patching needed to make the sampler work properly for closed-loop spaces if isinstance(ambientspace,ImplicitManifoldRobotCSpace): import rootfind def subsetImplicitConstraint(x): return self.ambientSpace.implicitConstraint(self.lift(x)) def solveManifold(x): rootfind.setXTolerance(1e-8) rootfind.setFTolerance(self.ambientspace.tol) rootfind.setVectorField(subsetImplicitConstraint) (res,x,val) = rootfind.findRootsBounded(x,self.bound) return x def sample(): return solveManifold(self.lift(CSpace.sample(self))) def sampleneighborhood(c,r): return solveManifold(self.lift(CSpace.sampleneighborhood(self,c,r))) self.sample = sample self.sampleneighborhood = sampleneighborhood if isinstance(ambientspace,ClosedLoopRobotCSpace): #sanity check activedofs = ambientspace.solver.getActiveDofs() if len(activedofs) > len(subset): raise ValueError("ClosedLoopRobotCSpace IK solver must be configured with moving dofs that are within the subset of embedded dofs") elif activedofs != subset: ssubset = set(subset) for i in activedofs: if i not in ssubset: raise ValueError("ClosedLoopRobotCSpace IK solver must be configured with moving dofs that are within the subset of embedded dofs") def sample(): xseed = self.lift(CSpace.sample(self)) return self.project(self.ambientspace.solveConstraints(xseed)) def sampleneighborhood(c,r): xseed = self.lift(CSpace.sampleneighborhood(self,c,r)) return self.project(self.ambientspace.solveConstraints(xseed)) self.sample = sample self.sampleneighborhood = sampleneighborhood def disableInactiveCollisions(self): """This modifies the collider in ambientspace to only check collisions between moving pairs. Should be called before `setup()` in most cases. """ robot = self.robot collider = self.ambientspace.collider subset = self.mapping active = [False]*robot.numLinks() for i in subset: active[i] = True for i in range(robot.numLinks()): if active[robot.link(i).getParent()]: active[i] = True inactive = [] for i in range(robot.numLinks()): if not active[i]: inactive.append(i) #disable self-collisions for inactive objects for i in inactive: rindices = collider.robots[robot.index] rindex = rindices[i] if rindex < 0: continue newmask = set() for j in range(robot.numLinks()): if rindices[j] in collider.mask[rindex] and active[j]: newmask.add(rindices[j]) collider.mask[rindex] = newmask def discretizePath(self,path,epsilon=1e-2): """Only useful for ClosedLoopRobotCSpace""" if hasattr(self.ambientspace,'discretizePath'): return self.ambientspace.discretizePath(self.liftPath(path),epsilon) else: return self.liftPath(path) def sendPathToController(self,path,controller): """Sends a planned path so that it is executed correctly by the controller (assumes a fully actuated robot). Args: path (list of Configs): a path in the embedded space or the ambient space, as returned by a planner. controller (SimRobotController): the robot's controller """ if len(path[0]) == len(self.mapping): path = self.liftPath(path) if hasattr(self.ambientspace,'discretizePath'): path = self.ambientspace.discretizePath(path) self.ambientspace.sendPathToController(path,controller) class RobotSubsetCSpace(EmbeddedCSpace): """A basic robot cspace that allows collision free motion of a *subset* of joints. The subset is given by the indices in the list "subset" provided to the constructor. The configuration space is R^k where k is the number of DOFs in the subset. This class will automatically disable all collisions for inactive robot links in the collider. .. note:: To convert from start/goal robot configurations to the CSpace, call the `project(qrobot)` method for the start and goal. (see :meth:`EmbeddedCSpace.project`) .. note:: To convert from a planned path back to the robot's full configuration space, you will need to call the `lift(q)` method for all configurations q in the planned path. (see :meth:`EmbeddedCSpace.lift`) .. warning:: If your robot has non-standard joints, like a free-floating base or continuously rotating (spin) joints, you will need to overload the :meth:`sample` method. .. deprecated:: 0.8.6 Deprecated this in favor of EmbeddedRobotCSpace, which is adaptable to ClosedLoopRobotCSpace and ImplicitManifoldRobotCSpace. To convert code to EmbeddedRobotCSpace, convert ``space = RobotSubsetCSpace(robot,subset,collider)`` to ``space = EmbeddedCSpace(RobotCSpace(robot,collider),subset);`` ``space.disableInactiveCollisions()`` """ def __init__(self,robot,subset,collider=None): EmbeddedCSpace.__init__(self,RobotCSpace(robot,collider),subset,xinit=robot.getConfig()) self.collider = collider if self.collider: #determine moving objects, which includes all links in the subset and descendants moving = [False]*robot.numLinks() for i in range(robot.numLinks()): if i in subset: moving[i] = True else: p = robot.link(i).getParent() if p >= 0 and moving[p]: moving[i]=True #disable self-collisions for non moving objects for i,mv in enumerate(moving): if not mv: rindices = self.collider.robots[robot.index] rindex = rindices[i] if rindex < 0: continue newmask = set() for j in range(robot.numLinks()): if rindices[j] in self.collider.mask[rindex] and moving[j]: newmask.add(rindices[j]) self.collider.mask[rindex] = newmask def sendPathToController(self,path,controller): """Given a planned :class:`CSpace` path ``path`` and a :class:`SimRobotController` ``controller``, sends the path so that it is executed correctly by the controller. .. note: This assumes a fully actuated robot. It won't work for robots with free-floating bases. """ lpath = self.liftPath(path) controller.setMilestone(lpath[0]) for q in lpath[1:]: controller.appendMilestoneLinear(q)
StarcoderdataPython
3416977
<filename>Leetcode/2001-3000/2062. Count Vowel Substrings of a String/2062.py class Solution: def countVowelSubstrings(self, word: str) -> int: def countVowelSubstringsAtMost(goal: int) -> int: ans = 0 k = goal count = Counter() l = 0 for r, c in enumerate(word): if c not in 'aeiou': # fresh start l = r + 1 k = goal count = Counter() continue count[c] += 1 if count[c] == 1: k -= 1 while k == -1: count[word[l]] -= 1 if count[word[l]] == 0: k += 1 l += 1 ans += r - l + 1 # s[l..r], s[l + 1..r], ..., s[r] return ans return countVowelSubstringsAtMost(5) - countVowelSubstringsAtMost(4)
StarcoderdataPython